hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
9b80abb3856c0a90b85a0efcdfafe055e1f28148.hip | // !!! This is a file automatically generated by hipify!!!
//headers
#include <stdio.h>
#include <hip/hip_runtime.h> //standard cuda header file
#include "helper_timer.h" //header for time calculation
//global variables
int iNumberOfArrayElements = 11444777; //from Nvidea OpenCL samples
float *hostInput1 = NULL;
float *hostInput2 = NULL;
float *hostOutput = NULL;
float *gold = NULL;
float *deviceInput1 = NULL;
float *deviceInput2 = NULL;
float *deviceOutput = NULL;
float timeOnCPU;
float timeOnGPU;
// *** CUDA KERNEL DEFINITION ***
//global kernel function definition
__global__ void vecAdd(float *in1, float *in2, float *out, int len)
{
//variable declaration
int i = blockIdx.x * blockDim.x + threadIdx.x;
//code
if(i < len)
{
out[i] = in1[i] + in2[i];
}
}
int main(int argc, char *argv[])
{
//function declaration
void fillFloatArrayWithRandomNumbers(float *pFloatArray, int iSize);
void vecAddHost(const float *in1, const float *in2, float *out, int len);
void cleanup();
//code
//allocate host-memory
hostInput1 = (float *)malloc(sizeof(float) * iNumberOfArrayElements);
if(hostInput1 == NULL)
{
printf("CPU Memory Fatal Error = Can Not Allocate Enough Memory For Host Input Array 1.\nExiting ...\n");
cleanup();
exit(EXIT_FAILURE);
}
hostInput2 = (float *)malloc(sizeof(float) * iNumberOfArrayElements);
if(hostInput2 == NULL)
{
printf("CPU Memory Fatal Error = Can Not Allocate Enough Memory For Host Input Array 2.\nExiting ...\n");
cleanup();
exit(EXIT_FAILURE);
}
hostOutput = (float *)malloc(sizeof(float) * iNumberOfArrayElements);
if(hostOutput == NULL)
{
printf("CPU Memory Fatal Error = Can Not Allocate Enough Memory For Host Output Array.\nExiting ...\n");
cleanup();
exit(EXIT_FAILURE);
}
gold = (float *)malloc(sizeof(float) * iNumberOfArrayElements);
if(gold == NULL)
{
printf("CPU Memory Fatal Error = Can Not Allocate Enough Memory For Gold Output Array.\nExiting ...\n");
cleanup();
exit(EXIT_FAILURE);
}
//fill above input host vectors with arbitary but hard-coded data
fillFloatArrayWithRandomNumbers(hostInput1, iNumberOfArrayElements);
fillFloatArrayWithRandomNumbers(hostInput2, iNumberOfArrayElements);
//allocate device-memory
hipError_t err = hipSuccess;
err = hipMalloc((void **)&deviceInput1, sizeof(float) * iNumberOfArrayElements);
if(err != hipSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No %d.\nExiting ...\n", hipGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
err = hipMalloc((void **)&deviceInput2, sizeof(float) * iNumberOfArrayElements);
if(err != hipSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No %d.\nExiting ...\n", hipGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
err = hipMalloc((void **)&deviceOutput, sizeof(float) * iNumberOfArrayElements);
if(err != hipSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No %d.\nExiting ...\n", hipGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
//copy host memory contents to device memory
err = hipMemcpy(deviceInput1, hostInput1, sizeof(float) * iNumberOfArrayElements, hipMemcpyHostToDevice);
if(err != hipSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No %d.\nExiting ...\n", hipGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
err = hipMemcpy(deviceInput2, hostInput2, sizeof(float) * iNumberOfArrayElements, hipMemcpyHostToDevice);
if(err != hipSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No %d.\nExiting ...\n", hipGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
//cuda kernel configuration
dim3 DimGrid = dim3(ceil(iNumberOfArrayElements / 256.0), 1, 1);
dim3 DimBlock = dim3(256, 1, 1);
//start timer
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
hipLaunchKernelGGL(( vecAdd), dim3(DimGrid), dim3(DimBlock), 0, 0, deviceInput1, deviceInput2, deviceOutput, iNumberOfArrayElements);
//stop timer
sdkStopTimer(&timer);
timeOnGPU = sdkGetTimerValue(&timer);
sdkDeleteTimer(&timer);
timer = NULL;
//copy device memory to host memory
err = hipMemcpy(hostOutput, deviceOutput, sizeof(float) * iNumberOfArrayElements, hipMemcpyDeviceToHost);
if(err != hipSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No %d.\nExiting ...\n", hipGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
//results
vecAddHost(hostInput1, hostInput2, gold, iNumberOfArrayElements);
//compare results for golden-host
const float epsilon = 0.000001f;
bool bAccuracy = true;
int breakValue = 0;
int i;
for(i = 0; i < iNumberOfArrayElements; i++)
{
float val1 = gold[i];
float val2 = hostOutput[i];
if(fabs(val1 - val2) > epsilon)
{
bAccuracy = false;
breakValue = i;
break;
}
}
if(bAccuracy == false)
{
printf("Break Value = %d\n", breakValue);
}
char str[125];
if(bAccuracy == true)
sprintf(str, "%s", "Comparison Of Output Arrays On CPU And GPU Are Accurate Within The Limit Of 0.000001");
else
sprintf(str, "%s", "Not All Comparison Of Output Arrays On CPU And GPU Are Accurate Within The Limit Of 0.000001");
printf("1st Array Is From 0th Element %.6f to %dth Element %.6f\n", hostInput1[0], iNumberOfArrayElements - 1, hostInput1[iNumberOfArrayElements - 1]);
printf("2nd Array Is From 0th Element %.6f to %dth Element %.6f\n", hostInput2[0], iNumberOfArrayElements - 1, hostInput2[iNumberOfArrayElements - 1]);
printf("Grid Dimension = (%d, 1, 1) And Block Dimension = (%d, 1, 1)\n", DimGrid.x, DimBlock.x);
printf("Sum Of Each Element From Above 2 Arrays Creates 3rd Array As : \n");
printf("2nd Array Is From 0th Element %.6f to %dth Element %.6f\n", hostOutput[0], iNumberOfArrayElements - 1, hostOutput[iNumberOfArrayElements - 1]);
printf("The Time Taken To Do Above Addition On CPU = %.6f (ms)\n", timeOnCPU);
printf("The Time Taken To Do Above Addition On GPU = %.6f (ms)\n", timeOnGPU);
//total cleanup
cleanup();
return (0);
}
void cleanup(void)
{
//code
//free allocated device memory
if(deviceOutput)
{
hipFree(deviceOutput);
deviceOutput = NULL;
}
if(deviceInput2)
{
hipFree(deviceInput2);
deviceInput2 = NULL;
}
if(deviceInput1)
{
hipFree(deviceInput1);
deviceInput1 = NULL;
}
//free allocated host memory
if(gold)
{
free(gold);
gold = NULL;
}
if(hostOutput)
{
free(hostOutput);
hostOutput = NULL;
}
if(hostInput2)
{
free(hostInput2);
hostInput2 = NULL;
}
if(hostInput1)
{
free(hostInput1);
hostInput1 = NULL;
}
}
void fillFloatArrayWithRandomNumbers(float *pFloatArray, int iSize)
{
//code
int i;
const float fScale = 1.0f / (float)RAND_MAX;
for(i = 0; i < iSize; i++)
{
pFloatArray[i] = fScale * rand();
}
}
//"Golden" Host processing vector addition function for comparison purpose
void vecAddHost(const float *pFloatData1, const float *pFloatData2, float *pFloatResult, int iNumElements)
{
//code
int i;
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
for(i = 0; i < iNumElements; i++)
{
pFloatResult[i] = pFloatData1[i] + pFloatData2[i];
}
sdkStopTimer(&timer);
timeOnCPU = sdkGetTimerValue(&timer);
sdkDeleteTimer(&timer);
timer = NULL;
}
| 9b80abb3856c0a90b85a0efcdfafe055e1f28148.cu | //headers
#include <stdio.h>
#include <cuda.h> //standard cuda header file
#include "helper_timer.h" //header for time calculation
//global variables
int iNumberOfArrayElements = 11444777; //from Nvidea OpenCL samples
float *hostInput1 = NULL;
float *hostInput2 = NULL;
float *hostOutput = NULL;
float *gold = NULL;
float *deviceInput1 = NULL;
float *deviceInput2 = NULL;
float *deviceOutput = NULL;
float timeOnCPU;
float timeOnGPU;
// *** CUDA KERNEL DEFINITION ***
//global kernel function definition
__global__ void vecAdd(float *in1, float *in2, float *out, int len)
{
//variable declaration
int i = blockIdx.x * blockDim.x + threadIdx.x;
//code
if(i < len)
{
out[i] = in1[i] + in2[i];
}
}
int main(int argc, char *argv[])
{
//function declaration
void fillFloatArrayWithRandomNumbers(float *pFloatArray, int iSize);
void vecAddHost(const float *in1, const float *in2, float *out, int len);
void cleanup();
//code
//allocate host-memory
hostInput1 = (float *)malloc(sizeof(float) * iNumberOfArrayElements);
if(hostInput1 == NULL)
{
printf("CPU Memory Fatal Error = Can Not Allocate Enough Memory For Host Input Array 1.\nExiting ...\n");
cleanup();
exit(EXIT_FAILURE);
}
hostInput2 = (float *)malloc(sizeof(float) * iNumberOfArrayElements);
if(hostInput2 == NULL)
{
printf("CPU Memory Fatal Error = Can Not Allocate Enough Memory For Host Input Array 2.\nExiting ...\n");
cleanup();
exit(EXIT_FAILURE);
}
hostOutput = (float *)malloc(sizeof(float) * iNumberOfArrayElements);
if(hostOutput == NULL)
{
printf("CPU Memory Fatal Error = Can Not Allocate Enough Memory For Host Output Array.\nExiting ...\n");
cleanup();
exit(EXIT_FAILURE);
}
gold = (float *)malloc(sizeof(float) * iNumberOfArrayElements);
if(gold == NULL)
{
printf("CPU Memory Fatal Error = Can Not Allocate Enough Memory For Gold Output Array.\nExiting ...\n");
cleanup();
exit(EXIT_FAILURE);
}
//fill above input host vectors with arbitary but hard-coded data
fillFloatArrayWithRandomNumbers(hostInput1, iNumberOfArrayElements);
fillFloatArrayWithRandomNumbers(hostInput2, iNumberOfArrayElements);
//allocate device-memory
cudaError_t err = cudaSuccess;
err = cudaMalloc((void **)&deviceInput1, sizeof(float) * iNumberOfArrayElements);
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No %d.\nExiting ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&deviceInput2, sizeof(float) * iNumberOfArrayElements);
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No %d.\nExiting ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&deviceOutput, sizeof(float) * iNumberOfArrayElements);
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No %d.\nExiting ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
//copy host memory contents to device memory
err = cudaMemcpy(deviceInput1, hostInput1, sizeof(float) * iNumberOfArrayElements, cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No %d.\nExiting ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
err = cudaMemcpy(deviceInput2, hostInput2, sizeof(float) * iNumberOfArrayElements, cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No %d.\nExiting ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
//cuda kernel configuration
dim3 DimGrid = dim3(ceil(iNumberOfArrayElements / 256.0), 1, 1);
dim3 DimBlock = dim3(256, 1, 1);
//start timer
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
vecAdd<<<DimGrid, DimBlock>>>(deviceInput1, deviceInput2, deviceOutput, iNumberOfArrayElements);
//stop timer
sdkStopTimer(&timer);
timeOnGPU = sdkGetTimerValue(&timer);
sdkDeleteTimer(&timer);
timer = NULL;
//copy device memory to host memory
err = cudaMemcpy(hostOutput, deviceOutput, sizeof(float) * iNumberOfArrayElements, cudaMemcpyDeviceToHost);
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No %d.\nExiting ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
//results
vecAddHost(hostInput1, hostInput2, gold, iNumberOfArrayElements);
//compare results for golden-host
const float epsilon = 0.000001f;
bool bAccuracy = true;
int breakValue = 0;
int i;
for(i = 0; i < iNumberOfArrayElements; i++)
{
float val1 = gold[i];
float val2 = hostOutput[i];
if(fabs(val1 - val2) > epsilon)
{
bAccuracy = false;
breakValue = i;
break;
}
}
if(bAccuracy == false)
{
printf("Break Value = %d\n", breakValue);
}
char str[125];
if(bAccuracy == true)
sprintf(str, "%s", "Comparison Of Output Arrays On CPU And GPU Are Accurate Within The Limit Of 0.000001");
else
sprintf(str, "%s", "Not All Comparison Of Output Arrays On CPU And GPU Are Accurate Within The Limit Of 0.000001");
printf("1st Array Is From 0th Element %.6f to %dth Element %.6f\n", hostInput1[0], iNumberOfArrayElements - 1, hostInput1[iNumberOfArrayElements - 1]);
printf("2nd Array Is From 0th Element %.6f to %dth Element %.6f\n", hostInput2[0], iNumberOfArrayElements - 1, hostInput2[iNumberOfArrayElements - 1]);
printf("Grid Dimension = (%d, 1, 1) And Block Dimension = (%d, 1, 1)\n", DimGrid.x, DimBlock.x);
printf("Sum Of Each Element From Above 2 Arrays Creates 3rd Array As : \n");
printf("2nd Array Is From 0th Element %.6f to %dth Element %.6f\n", hostOutput[0], iNumberOfArrayElements - 1, hostOutput[iNumberOfArrayElements - 1]);
printf("The Time Taken To Do Above Addition On CPU = %.6f (ms)\n", timeOnCPU);
printf("The Time Taken To Do Above Addition On GPU = %.6f (ms)\n", timeOnGPU);
//total cleanup
cleanup();
return (0);
}
void cleanup(void)
{
//code
//free allocated device memory
if(deviceOutput)
{
cudaFree(deviceOutput);
deviceOutput = NULL;
}
if(deviceInput2)
{
cudaFree(deviceInput2);
deviceInput2 = NULL;
}
if(deviceInput1)
{
cudaFree(deviceInput1);
deviceInput1 = NULL;
}
//free allocated host memory
if(gold)
{
free(gold);
gold = NULL;
}
if(hostOutput)
{
free(hostOutput);
hostOutput = NULL;
}
if(hostInput2)
{
free(hostInput2);
hostInput2 = NULL;
}
if(hostInput1)
{
free(hostInput1);
hostInput1 = NULL;
}
}
void fillFloatArrayWithRandomNumbers(float *pFloatArray, int iSize)
{
//code
int i;
const float fScale = 1.0f / (float)RAND_MAX;
for(i = 0; i < iSize; i++)
{
pFloatArray[i] = fScale * rand();
}
}
//"Golden" Host processing vector addition function for comparison purpose
void vecAddHost(const float *pFloatData1, const float *pFloatData2, float *pFloatResult, int iNumElements)
{
//code
int i;
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
for(i = 0; i < iNumElements; i++)
{
pFloatResult[i] = pFloatData1[i] + pFloatData2[i];
}
sdkStopTimer(&timer);
timeOnCPU = sdkGetTimerValue(&timer);
sdkDeleteTimer(&timer);
timer = NULL;
}
|
228acd4df76cef9deed6d3f6157c1b54f3c77324.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2020 Neka-Nat
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
**/
#include <thrust/iterator/discard_iterator.h>
#include "cupoch/geometry/boundingvolume.h"
#include "cupoch/geometry/densegrid.inl"
#include "cupoch/geometry/geometry_functor.h"
#include "cupoch/geometry/intersection_test.h"
#include "cupoch/geometry/occupancygrid.h"
#include "cupoch/geometry/pointcloud.h"
#include "cupoch/geometry/voxelgrid.h"
#include "cupoch/utility/eigen.h"
#include "cupoch/utility/platform.h"
namespace cupoch {
namespace geometry {
namespace {
__constant__ float voxel_offset[7][3] = {{0, 0, 0}, {1, 0, 0}, {-1, 0, 0},
{0, 1, 0}, {0, -1, 0}, {0, 0, 1},
{0, 0, -1}};
struct extract_range_voxels_functor {
extract_range_voxels_functor(const Eigen::Vector3i& extents,
int resolution,
const Eigen::Vector3i& min_bound)
: extents_(extents),
resolution_(resolution),
min_bound_(min_bound){};
const Eigen::Vector3i extents_;
const int resolution_;
const Eigen::Vector3i min_bound_;
__device__ int operator()(size_t idx) const {
int x = idx / (extents_[1] * extents_[2]);
int yz = idx % (extents_[1] * extents_[2]);
int y = yz / extents_[2];
int z = yz % extents_[2];
Eigen::Vector3i gidx = min_bound_ + Eigen::Vector3i(x, y, z);
return IndexOf(gidx, resolution_);
}
};
struct compute_intersect_voxel_segment_functor {
compute_intersect_voxel_segment_functor(
const Eigen::Vector3f* points,
const Eigen::Vector3f* steps,
const Eigen::Vector3f& viewpoint,
const Eigen::Vector3i& half_resolution,
float voxel_size,
const Eigen::Vector3f& origin,
int n_div)
: points_(points),
steps_(steps),
viewpoint_(viewpoint),
half_resolution_(half_resolution),
voxel_size_(voxel_size),
box_half_size_(Eigen::Vector3f(
voxel_size / 2, voxel_size / 2, voxel_size / 2)),
origin_(origin),
n_div_(n_div){};
const Eigen::Vector3f* points_;
const Eigen::Vector3f* steps_;
const Eigen::Vector3f viewpoint_;
const Eigen::Vector3i half_resolution_;
const float voxel_size_;
const Eigen::Vector3f box_half_size_;
const Eigen::Vector3f origin_;
const int n_div_;
__device__ Eigen::Vector3i operator()(size_t idx) {
int pidx = idx / (n_div_ * 7);
int svidx = idx % (n_div_ * 7);
int sidx = svidx / 7;
int vidx = svidx % 7;
Eigen::Vector3f center = sidx * steps_[pidx] + viewpoint_;
Eigen::Vector3f voxel_idx = Eigen::device_vectorize<float, 3, ::floor>(
(center - origin_) / voxel_size_);
Eigen::Vector3f voxel_center =
voxel_size_ *
(voxel_idx + Eigen::Vector3f(voxel_offset[vidx][0],
voxel_offset[vidx][1],
voxel_offset[vidx][2]));
bool is_intersect = intersection_test::LineSegmentAABB(
viewpoint_, points_[pidx], voxel_center - box_half_size_,
voxel_center + box_half_size_);
return (is_intersect) ? voxel_idx.cast<int>() + half_resolution_
: Eigen::Vector3i(geometry::INVALID_VOXEL_INDEX,
geometry::INVALID_VOXEL_INDEX,
geometry::INVALID_VOXEL_INDEX);
}
};
void ComputeFreeVoxels(const utility::device_vector<Eigen::Vector3f>& points,
const Eigen::Vector3f& viewpoint,
float voxel_size,
int resolution,
Eigen::Vector3f& origin,
const utility::device_vector<Eigen::Vector3f>& steps,
int n_div,
utility::device_vector<Eigen::Vector3i>& free_voxels) {
if (points.empty()) return;
size_t n_points = points.size();
size_t max_idx = resolution * resolution * resolution;
Eigen::Vector3i half_resolution = Eigen::Vector3i::Constant(resolution / 2);
free_voxels.resize(n_div * n_points * 7);
compute_intersect_voxel_segment_functor func(
thrust::raw_pointer_cast(points.data()),
thrust::raw_pointer_cast(steps.data()), viewpoint, half_resolution,
voxel_size, origin, n_div);
thrust::transform(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(n_div * n_points * 7),
free_voxels.begin(), func);
auto end1 = thrust::remove_if(
free_voxels.begin(), free_voxels.end(),
[max_idx] __device__(const Eigen::Vector3i& idx) -> bool {
return idx[0] < 0 || idx[1] < 0 || idx[2] < 0 ||
idx[0] >= max_idx || idx[1] >= max_idx ||
idx[2] >= max_idx;
});
free_voxels.resize(thrust::distance(free_voxels.begin(), end1));
thrust::sort(utility::exec_policy(0)->on(0),
free_voxels.begin(), free_voxels.end());
auto end2 = thrust::unique(free_voxels.begin(), free_voxels.end());
free_voxels.resize(thrust::distance(free_voxels.begin(), end2));
}
struct create_occupancy_voxels_functor {
create_occupancy_voxels_functor(const Eigen::Vector3f& origin,
const Eigen::Vector3i& half_resolution,
float voxel_size)
: origin_(origin),
half_resolution_(half_resolution),
voxel_size_(voxel_size){};
const Eigen::Vector3f origin_;
const Eigen::Vector3i half_resolution_;
const float voxel_size_;
__device__ Eigen::Vector3i operator()(
const thrust::tuple<Eigen::Vector3f, bool>& x) const {
const Eigen::Vector3f& point = thrust::get<0>(x);
bool hit_flag = thrust::get<1>(x);
Eigen::Vector3f ref_coord = (point - origin_) / voxel_size_;
return (hit_flag)
? Eigen::device_vectorize<float, 3, ::floor>(ref_coord)
.cast<int>() +
half_resolution_
: Eigen::Vector3i(INVALID_VOXEL_INDEX,
INVALID_VOXEL_INDEX,
INVALID_VOXEL_INDEX);
;
}
};
void ComputeOccupiedVoxels(
const utility::device_vector<Eigen::Vector3f>& points,
const utility::device_vector<bool> hit_flags,
float voxel_size,
int resolution,
Eigen::Vector3f& origin,
utility::device_vector<Eigen::Vector3i>& occupied_voxels) {
occupied_voxels.resize(points.size());
size_t max_idx = resolution * resolution * resolution;
Eigen::Vector3i half_resolution = Eigen::Vector3i::Constant(resolution / 2);
create_occupancy_voxels_functor func(origin, half_resolution, voxel_size);
thrust::transform(make_tuple_begin(points, hit_flags),
make_tuple_end(points, hit_flags),
occupied_voxels.begin(), func);
auto end1 = thrust::remove_if(
occupied_voxels.begin(), occupied_voxels.end(),
[max_idx] __device__(const Eigen::Vector3i& idx) -> bool {
return idx[0] < 0 || idx[1] < 0 || idx[2] < 0 ||
idx[0] >= max_idx || idx[1] >= max_idx ||
idx[2] >= max_idx;
});
occupied_voxels.resize(thrust::distance(occupied_voxels.begin(), end1));
thrust::sort(utility::exec_policy(0)->on(0),
occupied_voxels.begin(), occupied_voxels.end());
auto end2 = thrust::unique(occupied_voxels.begin(), occupied_voxels.end());
occupied_voxels.resize(thrust::distance(occupied_voxels.begin(), end2));
}
struct add_occupancy_functor {
add_occupancy_functor(OccupancyVoxel* voxels,
int resolution,
float clamping_thres_min,
float clamping_thres_max,
float prob_miss_log,
float prob_hit_log,
bool occupied)
: voxels_(voxels),
resolution_(resolution),
clamping_thres_min_(clamping_thres_min),
clamping_thres_max_(clamping_thres_max),
prob_miss_log_(prob_miss_log),
prob_hit_log_(prob_hit_log),
occupied_(occupied){};
OccupancyVoxel* voxels_;
const int resolution_;
const float clamping_thres_min_;
const float clamping_thres_max_;
const float prob_miss_log_;
const float prob_hit_log_;
const bool occupied_;
__device__ void operator()(const Eigen::Vector3i& voxel) {
size_t idx = IndexOf(voxel, resolution_);
float p = voxels_[idx].prob_log_;
p = (isnan(p)) ? 0 : p;
p += (occupied_) ? prob_hit_log_ : prob_miss_log_;
voxels_[idx].prob_log_ =
min(max(p, clamping_thres_min_), clamping_thres_max_);
voxels_[idx].grid_index_ = voxel.cast<unsigned short>();
}
};
} // namespace
template class DenseGrid<OccupancyVoxel>;
OccupancyGrid::OccupancyGrid()
: DenseGrid<OccupancyVoxel>(Geometry::GeometryType::OccupancyGrid,
0.05,
512,
Eigen::Vector3f::Zero()),
min_bound_(Eigen::Vector3ui16::Constant(resolution_ / 2)),
max_bound_(Eigen::Vector3ui16::Constant(resolution_ / 2)) {}
OccupancyGrid::OccupancyGrid(float voxel_size,
int resolution,
const Eigen::Vector3f& origin)
: DenseGrid<OccupancyVoxel>(Geometry::GeometryType::OccupancyGrid,
voxel_size,
resolution,
origin),
min_bound_(Eigen::Vector3ui16::Constant(resolution_ / 2)),
max_bound_(Eigen::Vector3ui16::Constant(resolution_ / 2)) {}
OccupancyGrid::~OccupancyGrid() {}
OccupancyGrid::OccupancyGrid(const OccupancyGrid& other)
: DenseGrid<OccupancyVoxel>(Geometry::GeometryType::OccupancyGrid, other),
min_bound_(other.min_bound_),
max_bound_(other.max_bound_),
clamping_thres_min_(other.clamping_thres_min_),
clamping_thres_max_(other.clamping_thres_max_),
prob_hit_log_(other.prob_hit_log_),
prob_miss_log_(other.prob_miss_log_),
occ_prob_thres_log_(other.occ_prob_thres_log_),
visualize_free_area_(other.visualize_free_area_) {}
OccupancyGrid& OccupancyGrid::Clear() {
DenseGrid::Clear();
min_bound_ = Eigen::Vector3ui16::Constant(resolution_ / 2);
max_bound_ = Eigen::Vector3ui16::Constant(resolution_ / 2);
return *this;
}
Eigen::Vector3f OccupancyGrid::GetMinBound() const {
return (min_bound_.cast<int>() - Eigen::Vector3i::Constant(resolution_ / 2))
.cast<float>() *
voxel_size_ +
origin_;
}
Eigen::Vector3f OccupancyGrid::GetMaxBound() const {
return (max_bound_.cast<int>() -
Eigen::Vector3i::Constant(resolution_ / 2 - 1))
.cast<float>() *
voxel_size_ +
origin_;
}
bool OccupancyGrid::IsOccupied(const Eigen::Vector3f& point) const {
auto idx = GetVoxelIndex(point);
if (idx < 0) return false;
OccupancyVoxel voxel = voxels_[idx];
return !std::isnan(voxel.prob_log_) &&
voxel.prob_log_ > occ_prob_thres_log_;
}
bool OccupancyGrid::IsUnknown(const Eigen::Vector3f& point) const {
auto idx = GetVoxelIndex(point);
if (idx < 0) return true;
OccupancyVoxel voxel = voxels_[idx];
return std::isnan(voxel.prob_log_);
}
thrust::tuple<bool, OccupancyVoxel> OccupancyGrid::GetVoxel(
const Eigen::Vector3f& point) const {
auto idx = GetVoxelIndex(point);
if (idx < 0) return thrust::make_tuple(false, OccupancyVoxel());
OccupancyVoxel voxel = voxels_[idx];
return thrust::make_tuple(!std::isnan(voxel.prob_log_), voxel);
}
std::shared_ptr<utility::device_vector<OccupancyVoxel>>
OccupancyGrid::ExtractBoundVoxels() const {
Eigen::Vector3ui16 diff =
max_bound_ - min_bound_ + Eigen::Vector3ui16::Ones();
auto out = std::make_shared<utility::device_vector<OccupancyVoxel>>();
out->resize(diff[0] * diff[1] * diff[2]);
extract_range_voxels_functor func(diff.cast<int>(), resolution_,
min_bound_.cast<int>());
thrust::copy(thrust::make_permutation_iterator(voxels_.begin(),
thrust::make_transform_iterator(thrust::make_counting_iterator<size_t>(0), func)),
thrust::make_permutation_iterator(voxels_.begin(),
thrust::make_transform_iterator(thrust::make_counting_iterator(out->size()), func)),
out->begin());
return out;
}
std::shared_ptr<utility::device_vector<OccupancyVoxel>>
OccupancyGrid::ExtractKnownVoxels() const {
auto out = ExtractBoundVoxels();
auto remove_fn = [th = occ_prob_thres_log_] __device__(
const thrust::tuple<OccupancyVoxel>& x) {
const OccupancyVoxel& v = thrust::get<0>(x);
return isnan(v.prob_log_);
};
remove_if_vectors(remove_fn, *out);
return out;
}
std::shared_ptr<utility::device_vector<OccupancyVoxel>>
OccupancyGrid::ExtractFreeVoxels() const {
auto out = ExtractBoundVoxels();
auto remove_fn = [th = occ_prob_thres_log_] __device__(
const thrust::tuple<OccupancyVoxel>& x) {
const OccupancyVoxel& v = thrust::get<0>(x);
return isnan(v.prob_log_) || v.prob_log_ > th;
};
remove_if_vectors(remove_fn, *out);
return out;
}
std::shared_ptr<utility::device_vector<OccupancyVoxel>>
OccupancyGrid::ExtractOccupiedVoxels() const {
auto out = ExtractBoundVoxels();
auto remove_fn = [th = occ_prob_thres_log_] __device__(
const thrust::tuple<OccupancyVoxel>& x) {
const OccupancyVoxel& v = thrust::get<0>(x);
return isnan(v.prob_log_) || v.prob_log_ <= th;
};
remove_if_vectors(remove_fn, *out);
return out;
}
OccupancyGrid& OccupancyGrid::Reconstruct(float voxel_size, int resolution) {
DenseGrid::Reconstruct(voxel_size, resolution);
return *this;
}
OccupancyGrid& OccupancyGrid::SetFreeArea(const Eigen::Vector3f& min_bound,
const Eigen::Vector3f& max_bound) {
const Eigen::Vector3i half_res = Eigen::Vector3i::Constant(resolution_ / 2);
Eigen::Vector3i imin_bound = ((min_bound - origin_) / voxel_size_).array().floor().matrix().cast<int>() + half_res;
Eigen::Vector3i imax_bound = ((max_bound - origin_) / voxel_size_).array().floor().matrix().cast<int>() + half_res;
min_bound_ = imin_bound.array().max(Eigen::Array3i(0, 0, 0)).matrix().cast<unsigned short>();
max_bound_ = imax_bound.array().min(Eigen::Array3i(resolution_ - 1, resolution_ - 1, resolution_ - 1)).matrix().cast<unsigned short>();
Eigen::Vector3ui16 diff = max_bound_ - min_bound_ + Eigen::Vector3ui16::Ones();
extract_range_voxels_functor func(diff.cast<int>(), resolution_,
min_bound_.cast<int>());
thrust::for_each(thrust::make_permutation_iterator(voxels_.begin(),
thrust::make_transform_iterator(thrust::make_counting_iterator<size_t>(0), func)),
thrust::make_permutation_iterator(voxels_.begin(),
thrust::make_transform_iterator(thrust::make_counting_iterator<size_t>(diff[0] * diff[1] * diff[2]), func)),
[pml = prob_miss_log_] __device__ (geometry::OccupancyVoxel& v) {
v.prob_log_ = (isnan(v.prob_log_)) ? 0 : v.prob_log_;
v.prob_log_ += pml;
});
return *this;
}
OccupancyGrid& OccupancyGrid::Insert(
const utility::device_vector<Eigen::Vector3f>& points,
const Eigen::Vector3f& viewpoint,
float max_range) {
if (points.empty()) return *this;
utility::device_vector<Eigen::Vector3f> ranged_points(points.size());
utility::device_vector<float> ranged_dists(points.size());
utility::device_vector<bool> hit_flags(points.size());
thrust::transform(
points.begin(), points.end(),
make_tuple_begin(ranged_points, ranged_dists, hit_flags),
[viewpoint, max_range] __device__(const Eigen::Vector3f& pt) {
Eigen::Vector3f pt_vp = pt - viewpoint;
float dist = pt_vp.norm();
bool is_hit = max_range < 0 || dist <= max_range;
return thrust::make_tuple(
(is_hit) ? pt : viewpoint + pt_vp / dist * max_range,
(is_hit) ? dist : max_range, is_hit);
});
float max_dist =
*(thrust::max_element(ranged_dists.begin(), ranged_dists.end()));
int n_div = int(::ceil(max_dist / voxel_size_));
utility::device_vector<Eigen::Vector3i> free_voxels;
utility::device_vector<Eigen::Vector3i> occupied_voxels;
if (n_div > 0) {
utility::device_vector<Eigen::Vector3f> steps(points.size());
thrust::transform(
ranged_points.begin(), ranged_points.end(), steps.begin(),
[viewpoint, n_div] __device__(const Eigen::Vector3f& pt) {
return (pt - viewpoint) / n_div;
});
// comupute free voxels
ComputeFreeVoxels(ranged_points, viewpoint, voxel_size_, resolution_,
origin_, steps, n_div + 1, free_voxels);
} else {
thrust::copy(points.begin(), points.end(), ranged_points.begin());
thrust::fill(hit_flags.begin(), hit_flags.end(), true);
}
// compute occupied voxels
ComputeOccupiedVoxels(ranged_points, hit_flags, voxel_size_, resolution_,
origin_, occupied_voxels);
if (n_div > 0) {
utility::device_vector<Eigen::Vector3i> free_voxels_res(
free_voxels.size());
auto end = thrust::set_difference(
free_voxels.begin(), free_voxels.end(), occupied_voxels.begin(),
occupied_voxels.end(), free_voxels_res.begin());
free_voxels_res.resize(thrust::distance(free_voxels_res.begin(), end));
AddVoxels(free_voxels_res, false);
}
AddVoxels(occupied_voxels, true);
return *this;
}
OccupancyGrid& OccupancyGrid::Insert(
const utility::pinned_host_vector<Eigen::Vector3f>& points,
const Eigen::Vector3f& viewpoint,
float max_range) {
utility::device_vector<Eigen::Vector3f> dev_points(points.size());
cudaSafeCall(hipMemcpy(thrust::raw_pointer_cast(dev_points.data()), points.data(),
points.size() * sizeof(Eigen::Vector3f), hipMemcpyHostToDevice));
return Insert(dev_points, viewpoint, max_range);
}
OccupancyGrid& OccupancyGrid::Insert(const geometry::PointCloud& pointcloud,
const Eigen::Vector3f& viewpoint,
float max_range) {
Insert(pointcloud.points_, viewpoint, max_range);
return *this;
}
OccupancyGrid& OccupancyGrid::AddVoxel(const Eigen::Vector3i& voxel,
bool occupied) {
int idx = IndexOf(voxel, resolution_);
size_t max_idx = resolution_ * resolution_ * resolution_;
if (idx < 0 || idx >= max_idx) {
utility::LogError(
"[OccupancyGrid] a provided voxeld is not occupancy grid "
"range.");
return *this;
} else {
OccupancyVoxel org_ov = voxels_[idx];
if (std::isnan(org_ov.prob_log_)) org_ov.prob_log_ = 0.0;
org_ov.prob_log_ += (occupied) ? prob_hit_log_ : prob_miss_log_;
org_ov.prob_log_ =
::min(::max(org_ov.prob_log_, clamping_thres_min_),
clamping_thres_max_);
org_ov.grid_index_ = voxel.cast<unsigned short>();
voxels_[idx] = org_ov;
min_bound_ = min_bound_.array().min(org_ov.grid_index_.array());
max_bound_ = max_bound_.array().max(org_ov.grid_index_.array());
}
return *this;
}
OccupancyGrid& OccupancyGrid::AddVoxels(
const utility::device_vector<Eigen::Vector3i>& voxels, bool occupied) {
if (voxels.empty()) return *this;
Eigen::Vector3i fv = voxels.front();
Eigen::Vector3i bv = voxels.back();
Eigen::Vector3ui16 fvu = fv.cast<unsigned short>();
Eigen::Vector3ui16 bvu = bv.cast<unsigned short>();
min_bound_ = min_bound_.array().min(fvu.array());
min_bound_ = min_bound_.array().min(bvu.array());
max_bound_ = max_bound_.array().max(fvu.array());
max_bound_ = max_bound_.array().max(bvu.array());
add_occupancy_functor func(thrust::raw_pointer_cast(voxels_.data()),
resolution_, clamping_thres_min_,
clamping_thres_max_, prob_miss_log_,
prob_hit_log_, occupied);
thrust::for_each(voxels.begin(), voxels.end(), func);
return *this;
}
} // namespace geometry
} // namespace cupoch | 228acd4df76cef9deed6d3f6157c1b54f3c77324.cu | /**
* Copyright (c) 2020 Neka-Nat
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
**/
#include <thrust/iterator/discard_iterator.h>
#include "cupoch/geometry/boundingvolume.h"
#include "cupoch/geometry/densegrid.inl"
#include "cupoch/geometry/geometry_functor.h"
#include "cupoch/geometry/intersection_test.h"
#include "cupoch/geometry/occupancygrid.h"
#include "cupoch/geometry/pointcloud.h"
#include "cupoch/geometry/voxelgrid.h"
#include "cupoch/utility/eigen.h"
#include "cupoch/utility/platform.h"
namespace cupoch {
namespace geometry {
namespace {
__constant__ float voxel_offset[7][3] = {{0, 0, 0}, {1, 0, 0}, {-1, 0, 0},
{0, 1, 0}, {0, -1, 0}, {0, 0, 1},
{0, 0, -1}};
struct extract_range_voxels_functor {
extract_range_voxels_functor(const Eigen::Vector3i& extents,
int resolution,
const Eigen::Vector3i& min_bound)
: extents_(extents),
resolution_(resolution),
min_bound_(min_bound){};
const Eigen::Vector3i extents_;
const int resolution_;
const Eigen::Vector3i min_bound_;
__device__ int operator()(size_t idx) const {
int x = idx / (extents_[1] * extents_[2]);
int yz = idx % (extents_[1] * extents_[2]);
int y = yz / extents_[2];
int z = yz % extents_[2];
Eigen::Vector3i gidx = min_bound_ + Eigen::Vector3i(x, y, z);
return IndexOf(gidx, resolution_);
}
};
struct compute_intersect_voxel_segment_functor {
compute_intersect_voxel_segment_functor(
const Eigen::Vector3f* points,
const Eigen::Vector3f* steps,
const Eigen::Vector3f& viewpoint,
const Eigen::Vector3i& half_resolution,
float voxel_size,
const Eigen::Vector3f& origin,
int n_div)
: points_(points),
steps_(steps),
viewpoint_(viewpoint),
half_resolution_(half_resolution),
voxel_size_(voxel_size),
box_half_size_(Eigen::Vector3f(
voxel_size / 2, voxel_size / 2, voxel_size / 2)),
origin_(origin),
n_div_(n_div){};
const Eigen::Vector3f* points_;
const Eigen::Vector3f* steps_;
const Eigen::Vector3f viewpoint_;
const Eigen::Vector3i half_resolution_;
const float voxel_size_;
const Eigen::Vector3f box_half_size_;
const Eigen::Vector3f origin_;
const int n_div_;
__device__ Eigen::Vector3i operator()(size_t idx) {
int pidx = idx / (n_div_ * 7);
int svidx = idx % (n_div_ * 7);
int sidx = svidx / 7;
int vidx = svidx % 7;
Eigen::Vector3f center = sidx * steps_[pidx] + viewpoint_;
Eigen::Vector3f voxel_idx = Eigen::device_vectorize<float, 3, ::floor>(
(center - origin_) / voxel_size_);
Eigen::Vector3f voxel_center =
voxel_size_ *
(voxel_idx + Eigen::Vector3f(voxel_offset[vidx][0],
voxel_offset[vidx][1],
voxel_offset[vidx][2]));
bool is_intersect = intersection_test::LineSegmentAABB(
viewpoint_, points_[pidx], voxel_center - box_half_size_,
voxel_center + box_half_size_);
return (is_intersect) ? voxel_idx.cast<int>() + half_resolution_
: Eigen::Vector3i(geometry::INVALID_VOXEL_INDEX,
geometry::INVALID_VOXEL_INDEX,
geometry::INVALID_VOXEL_INDEX);
}
};
void ComputeFreeVoxels(const utility::device_vector<Eigen::Vector3f>& points,
const Eigen::Vector3f& viewpoint,
float voxel_size,
int resolution,
Eigen::Vector3f& origin,
const utility::device_vector<Eigen::Vector3f>& steps,
int n_div,
utility::device_vector<Eigen::Vector3i>& free_voxels) {
if (points.empty()) return;
size_t n_points = points.size();
size_t max_idx = resolution * resolution * resolution;
Eigen::Vector3i half_resolution = Eigen::Vector3i::Constant(resolution / 2);
free_voxels.resize(n_div * n_points * 7);
compute_intersect_voxel_segment_functor func(
thrust::raw_pointer_cast(points.data()),
thrust::raw_pointer_cast(steps.data()), viewpoint, half_resolution,
voxel_size, origin, n_div);
thrust::transform(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(n_div * n_points * 7),
free_voxels.begin(), func);
auto end1 = thrust::remove_if(
free_voxels.begin(), free_voxels.end(),
[max_idx] __device__(const Eigen::Vector3i& idx) -> bool {
return idx[0] < 0 || idx[1] < 0 || idx[2] < 0 ||
idx[0] >= max_idx || idx[1] >= max_idx ||
idx[2] >= max_idx;
});
free_voxels.resize(thrust::distance(free_voxels.begin(), end1));
thrust::sort(utility::exec_policy(0)->on(0),
free_voxels.begin(), free_voxels.end());
auto end2 = thrust::unique(free_voxels.begin(), free_voxels.end());
free_voxels.resize(thrust::distance(free_voxels.begin(), end2));
}
struct create_occupancy_voxels_functor {
create_occupancy_voxels_functor(const Eigen::Vector3f& origin,
const Eigen::Vector3i& half_resolution,
float voxel_size)
: origin_(origin),
half_resolution_(half_resolution),
voxel_size_(voxel_size){};
const Eigen::Vector3f origin_;
const Eigen::Vector3i half_resolution_;
const float voxel_size_;
__device__ Eigen::Vector3i operator()(
const thrust::tuple<Eigen::Vector3f, bool>& x) const {
const Eigen::Vector3f& point = thrust::get<0>(x);
bool hit_flag = thrust::get<1>(x);
Eigen::Vector3f ref_coord = (point - origin_) / voxel_size_;
return (hit_flag)
? Eigen::device_vectorize<float, 3, ::floor>(ref_coord)
.cast<int>() +
half_resolution_
: Eigen::Vector3i(INVALID_VOXEL_INDEX,
INVALID_VOXEL_INDEX,
INVALID_VOXEL_INDEX);
;
}
};
void ComputeOccupiedVoxels(
const utility::device_vector<Eigen::Vector3f>& points,
const utility::device_vector<bool> hit_flags,
float voxel_size,
int resolution,
Eigen::Vector3f& origin,
utility::device_vector<Eigen::Vector3i>& occupied_voxels) {
occupied_voxels.resize(points.size());
size_t max_idx = resolution * resolution * resolution;
Eigen::Vector3i half_resolution = Eigen::Vector3i::Constant(resolution / 2);
create_occupancy_voxels_functor func(origin, half_resolution, voxel_size);
thrust::transform(make_tuple_begin(points, hit_flags),
make_tuple_end(points, hit_flags),
occupied_voxels.begin(), func);
auto end1 = thrust::remove_if(
occupied_voxels.begin(), occupied_voxels.end(),
[max_idx] __device__(const Eigen::Vector3i& idx) -> bool {
return idx[0] < 0 || idx[1] < 0 || idx[2] < 0 ||
idx[0] >= max_idx || idx[1] >= max_idx ||
idx[2] >= max_idx;
});
occupied_voxels.resize(thrust::distance(occupied_voxels.begin(), end1));
thrust::sort(utility::exec_policy(0)->on(0),
occupied_voxels.begin(), occupied_voxels.end());
auto end2 = thrust::unique(occupied_voxels.begin(), occupied_voxels.end());
occupied_voxels.resize(thrust::distance(occupied_voxels.begin(), end2));
}
struct add_occupancy_functor {
add_occupancy_functor(OccupancyVoxel* voxels,
int resolution,
float clamping_thres_min,
float clamping_thres_max,
float prob_miss_log,
float prob_hit_log,
bool occupied)
: voxels_(voxels),
resolution_(resolution),
clamping_thres_min_(clamping_thres_min),
clamping_thres_max_(clamping_thres_max),
prob_miss_log_(prob_miss_log),
prob_hit_log_(prob_hit_log),
occupied_(occupied){};
OccupancyVoxel* voxels_;
const int resolution_;
const float clamping_thres_min_;
const float clamping_thres_max_;
const float prob_miss_log_;
const float prob_hit_log_;
const bool occupied_;
__device__ void operator()(const Eigen::Vector3i& voxel) {
size_t idx = IndexOf(voxel, resolution_);
float p = voxels_[idx].prob_log_;
p = (isnan(p)) ? 0 : p;
p += (occupied_) ? prob_hit_log_ : prob_miss_log_;
voxels_[idx].prob_log_ =
min(max(p, clamping_thres_min_), clamping_thres_max_);
voxels_[idx].grid_index_ = voxel.cast<unsigned short>();
}
};
} // namespace
template class DenseGrid<OccupancyVoxel>;
OccupancyGrid::OccupancyGrid()
: DenseGrid<OccupancyVoxel>(Geometry::GeometryType::OccupancyGrid,
0.05,
512,
Eigen::Vector3f::Zero()),
min_bound_(Eigen::Vector3ui16::Constant(resolution_ / 2)),
max_bound_(Eigen::Vector3ui16::Constant(resolution_ / 2)) {}
OccupancyGrid::OccupancyGrid(float voxel_size,
int resolution,
const Eigen::Vector3f& origin)
: DenseGrid<OccupancyVoxel>(Geometry::GeometryType::OccupancyGrid,
voxel_size,
resolution,
origin),
min_bound_(Eigen::Vector3ui16::Constant(resolution_ / 2)),
max_bound_(Eigen::Vector3ui16::Constant(resolution_ / 2)) {}
OccupancyGrid::~OccupancyGrid() {}
OccupancyGrid::OccupancyGrid(const OccupancyGrid& other)
: DenseGrid<OccupancyVoxel>(Geometry::GeometryType::OccupancyGrid, other),
min_bound_(other.min_bound_),
max_bound_(other.max_bound_),
clamping_thres_min_(other.clamping_thres_min_),
clamping_thres_max_(other.clamping_thres_max_),
prob_hit_log_(other.prob_hit_log_),
prob_miss_log_(other.prob_miss_log_),
occ_prob_thres_log_(other.occ_prob_thres_log_),
visualize_free_area_(other.visualize_free_area_) {}
OccupancyGrid& OccupancyGrid::Clear() {
DenseGrid::Clear();
min_bound_ = Eigen::Vector3ui16::Constant(resolution_ / 2);
max_bound_ = Eigen::Vector3ui16::Constant(resolution_ / 2);
return *this;
}
Eigen::Vector3f OccupancyGrid::GetMinBound() const {
return (min_bound_.cast<int>() - Eigen::Vector3i::Constant(resolution_ / 2))
.cast<float>() *
voxel_size_ +
origin_;
}
Eigen::Vector3f OccupancyGrid::GetMaxBound() const {
return (max_bound_.cast<int>() -
Eigen::Vector3i::Constant(resolution_ / 2 - 1))
.cast<float>() *
voxel_size_ +
origin_;
}
bool OccupancyGrid::IsOccupied(const Eigen::Vector3f& point) const {
auto idx = GetVoxelIndex(point);
if (idx < 0) return false;
OccupancyVoxel voxel = voxels_[idx];
return !std::isnan(voxel.prob_log_) &&
voxel.prob_log_ > occ_prob_thres_log_;
}
bool OccupancyGrid::IsUnknown(const Eigen::Vector3f& point) const {
auto idx = GetVoxelIndex(point);
if (idx < 0) return true;
OccupancyVoxel voxel = voxels_[idx];
return std::isnan(voxel.prob_log_);
}
thrust::tuple<bool, OccupancyVoxel> OccupancyGrid::GetVoxel(
const Eigen::Vector3f& point) const {
auto idx = GetVoxelIndex(point);
if (idx < 0) return thrust::make_tuple(false, OccupancyVoxel());
OccupancyVoxel voxel = voxels_[idx];
return thrust::make_tuple(!std::isnan(voxel.prob_log_), voxel);
}
std::shared_ptr<utility::device_vector<OccupancyVoxel>>
OccupancyGrid::ExtractBoundVoxels() const {
Eigen::Vector3ui16 diff =
max_bound_ - min_bound_ + Eigen::Vector3ui16::Ones();
auto out = std::make_shared<utility::device_vector<OccupancyVoxel>>();
out->resize(diff[0] * diff[1] * diff[2]);
extract_range_voxels_functor func(diff.cast<int>(), resolution_,
min_bound_.cast<int>());
thrust::copy(thrust::make_permutation_iterator(voxels_.begin(),
thrust::make_transform_iterator(thrust::make_counting_iterator<size_t>(0), func)),
thrust::make_permutation_iterator(voxels_.begin(),
thrust::make_transform_iterator(thrust::make_counting_iterator(out->size()), func)),
out->begin());
return out;
}
std::shared_ptr<utility::device_vector<OccupancyVoxel>>
OccupancyGrid::ExtractKnownVoxels() const {
auto out = ExtractBoundVoxels();
auto remove_fn = [th = occ_prob_thres_log_] __device__(
const thrust::tuple<OccupancyVoxel>& x) {
const OccupancyVoxel& v = thrust::get<0>(x);
return isnan(v.prob_log_);
};
remove_if_vectors(remove_fn, *out);
return out;
}
std::shared_ptr<utility::device_vector<OccupancyVoxel>>
OccupancyGrid::ExtractFreeVoxels() const {
auto out = ExtractBoundVoxels();
auto remove_fn = [th = occ_prob_thres_log_] __device__(
const thrust::tuple<OccupancyVoxel>& x) {
const OccupancyVoxel& v = thrust::get<0>(x);
return isnan(v.prob_log_) || v.prob_log_ > th;
};
remove_if_vectors(remove_fn, *out);
return out;
}
std::shared_ptr<utility::device_vector<OccupancyVoxel>>
OccupancyGrid::ExtractOccupiedVoxels() const {
auto out = ExtractBoundVoxels();
auto remove_fn = [th = occ_prob_thres_log_] __device__(
const thrust::tuple<OccupancyVoxel>& x) {
const OccupancyVoxel& v = thrust::get<0>(x);
return isnan(v.prob_log_) || v.prob_log_ <= th;
};
remove_if_vectors(remove_fn, *out);
return out;
}
OccupancyGrid& OccupancyGrid::Reconstruct(float voxel_size, int resolution) {
DenseGrid::Reconstruct(voxel_size, resolution);
return *this;
}
OccupancyGrid& OccupancyGrid::SetFreeArea(const Eigen::Vector3f& min_bound,
const Eigen::Vector3f& max_bound) {
const Eigen::Vector3i half_res = Eigen::Vector3i::Constant(resolution_ / 2);
Eigen::Vector3i imin_bound = ((min_bound - origin_) / voxel_size_).array().floor().matrix().cast<int>() + half_res;
Eigen::Vector3i imax_bound = ((max_bound - origin_) / voxel_size_).array().floor().matrix().cast<int>() + half_res;
min_bound_ = imin_bound.array().max(Eigen::Array3i(0, 0, 0)).matrix().cast<unsigned short>();
max_bound_ = imax_bound.array().min(Eigen::Array3i(resolution_ - 1, resolution_ - 1, resolution_ - 1)).matrix().cast<unsigned short>();
Eigen::Vector3ui16 diff = max_bound_ - min_bound_ + Eigen::Vector3ui16::Ones();
extract_range_voxels_functor func(diff.cast<int>(), resolution_,
min_bound_.cast<int>());
thrust::for_each(thrust::make_permutation_iterator(voxels_.begin(),
thrust::make_transform_iterator(thrust::make_counting_iterator<size_t>(0), func)),
thrust::make_permutation_iterator(voxels_.begin(),
thrust::make_transform_iterator(thrust::make_counting_iterator<size_t>(diff[0] * diff[1] * diff[2]), func)),
[pml = prob_miss_log_] __device__ (geometry::OccupancyVoxel& v) {
v.prob_log_ = (isnan(v.prob_log_)) ? 0 : v.prob_log_;
v.prob_log_ += pml;
});
return *this;
}
OccupancyGrid& OccupancyGrid::Insert(
const utility::device_vector<Eigen::Vector3f>& points,
const Eigen::Vector3f& viewpoint,
float max_range) {
if (points.empty()) return *this;
utility::device_vector<Eigen::Vector3f> ranged_points(points.size());
utility::device_vector<float> ranged_dists(points.size());
utility::device_vector<bool> hit_flags(points.size());
thrust::transform(
points.begin(), points.end(),
make_tuple_begin(ranged_points, ranged_dists, hit_flags),
[viewpoint, max_range] __device__(const Eigen::Vector3f& pt) {
Eigen::Vector3f pt_vp = pt - viewpoint;
float dist = pt_vp.norm();
bool is_hit = max_range < 0 || dist <= max_range;
return thrust::make_tuple(
(is_hit) ? pt : viewpoint + pt_vp / dist * max_range,
(is_hit) ? dist : max_range, is_hit);
});
float max_dist =
*(thrust::max_element(ranged_dists.begin(), ranged_dists.end()));
int n_div = int(std::ceil(max_dist / voxel_size_));
utility::device_vector<Eigen::Vector3i> free_voxels;
utility::device_vector<Eigen::Vector3i> occupied_voxels;
if (n_div > 0) {
utility::device_vector<Eigen::Vector3f> steps(points.size());
thrust::transform(
ranged_points.begin(), ranged_points.end(), steps.begin(),
[viewpoint, n_div] __device__(const Eigen::Vector3f& pt) {
return (pt - viewpoint) / n_div;
});
// comupute free voxels
ComputeFreeVoxels(ranged_points, viewpoint, voxel_size_, resolution_,
origin_, steps, n_div + 1, free_voxels);
} else {
thrust::copy(points.begin(), points.end(), ranged_points.begin());
thrust::fill(hit_flags.begin(), hit_flags.end(), true);
}
// compute occupied voxels
ComputeOccupiedVoxels(ranged_points, hit_flags, voxel_size_, resolution_,
origin_, occupied_voxels);
if (n_div > 0) {
utility::device_vector<Eigen::Vector3i> free_voxels_res(
free_voxels.size());
auto end = thrust::set_difference(
free_voxels.begin(), free_voxels.end(), occupied_voxels.begin(),
occupied_voxels.end(), free_voxels_res.begin());
free_voxels_res.resize(thrust::distance(free_voxels_res.begin(), end));
AddVoxels(free_voxels_res, false);
}
AddVoxels(occupied_voxels, true);
return *this;
}
OccupancyGrid& OccupancyGrid::Insert(
const utility::pinned_host_vector<Eigen::Vector3f>& points,
const Eigen::Vector3f& viewpoint,
float max_range) {
utility::device_vector<Eigen::Vector3f> dev_points(points.size());
cudaSafeCall(cudaMemcpy(thrust::raw_pointer_cast(dev_points.data()), points.data(),
points.size() * sizeof(Eigen::Vector3f), cudaMemcpyHostToDevice));
return Insert(dev_points, viewpoint, max_range);
}
OccupancyGrid& OccupancyGrid::Insert(const geometry::PointCloud& pointcloud,
const Eigen::Vector3f& viewpoint,
float max_range) {
Insert(pointcloud.points_, viewpoint, max_range);
return *this;
}
OccupancyGrid& OccupancyGrid::AddVoxel(const Eigen::Vector3i& voxel,
bool occupied) {
int idx = IndexOf(voxel, resolution_);
size_t max_idx = resolution_ * resolution_ * resolution_;
if (idx < 0 || idx >= max_idx) {
utility::LogError(
"[OccupancyGrid] a provided voxeld is not occupancy grid "
"range.");
return *this;
} else {
OccupancyVoxel org_ov = voxels_[idx];
if (std::isnan(org_ov.prob_log_)) org_ov.prob_log_ = 0.0;
org_ov.prob_log_ += (occupied) ? prob_hit_log_ : prob_miss_log_;
org_ov.prob_log_ =
std::min(std::max(org_ov.prob_log_, clamping_thres_min_),
clamping_thres_max_);
org_ov.grid_index_ = voxel.cast<unsigned short>();
voxels_[idx] = org_ov;
min_bound_ = min_bound_.array().min(org_ov.grid_index_.array());
max_bound_ = max_bound_.array().max(org_ov.grid_index_.array());
}
return *this;
}
OccupancyGrid& OccupancyGrid::AddVoxels(
const utility::device_vector<Eigen::Vector3i>& voxels, bool occupied) {
if (voxels.empty()) return *this;
Eigen::Vector3i fv = voxels.front();
Eigen::Vector3i bv = voxels.back();
Eigen::Vector3ui16 fvu = fv.cast<unsigned short>();
Eigen::Vector3ui16 bvu = bv.cast<unsigned short>();
min_bound_ = min_bound_.array().min(fvu.array());
min_bound_ = min_bound_.array().min(bvu.array());
max_bound_ = max_bound_.array().max(fvu.array());
max_bound_ = max_bound_.array().max(bvu.array());
add_occupancy_functor func(thrust::raw_pointer_cast(voxels_.data()),
resolution_, clamping_thres_min_,
clamping_thres_max_, prob_miss_log_,
prob_hit_log_, occupied);
thrust::for_each(voxels.begin(), voxels.end(), func);
return *this;
}
} // namespace geometry
} // namespace cupoch |
ebc5addab50a1cf5c43e57f726e4e67999037153.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <hip/hip_runtime.h>
const int max_seq_len = 1024;
const int max_batch_tokens = 9216;
/* Convert 2-dim tensor index into vector index */
__forceinline__ __host__ __device__
int flat_2dim(int id1, int id2, int dim2) {
return id1 * dim2 + id2;
}
/* Convert 3-dim tensor index into vector index */
__forceinline__ __host__ __device__
int flat_3dim(int id1, int id2, int id3, int dim2, int dim3) {
return id1 * dim2 * dim3 + id2 * dim3 + id3;
}
template <typename T>
__global__
void concat (const T *__restrict__ inp1,
const T *__restrict__ inp2,
T *output,
int sz0, int sz2, int sz1_1, int sz1_2)
{
int nele = sz0 * sz2 * (sz1_1 + sz1_2);
int idx = flat_2dim(blockIdx.x, threadIdx.x, blockDim.x);
if (idx >= nele) return;
float *dst_ptr = (float *)output + idx;
int idx2 = idx % sz2;
idx = idx / sz2;
int idx1 = idx % (sz1_1 + sz1_2);
int idx0 = idx / (sz1_1 + sz1_2);
float *src_ptr = nullptr;
int sz1 = 0;
if (idx1 < sz1_1) {
sz1 = sz1_1;
src_ptr = (float *)inp1;
} else {
idx1 -= sz1_1;
sz1 = sz1_2;
src_ptr = (float *)inp2;
}
src_ptr += flat_3dim(idx0, idx1, idx2, sz1, sz2);
*dst_ptr = *src_ptr;
}
int main(int argc, char* argv[])
{
if (argc != 2) {
printf("Usage: %s <repeat>\n", argv[0]);
return 1;
}
const int repeat = atoi(argv[1]);
for (int nhead = 4; nhead <= 16; nhead += 4) { // a multiple of 4
srand(nhead);
int seq_len = rand() % max_seq_len + 1;
while (seq_len <= 1) {
seq_len = rand() % max_seq_len + 1;
}
const int max_batch_size = max_batch_tokens / seq_len;
const int batch_size = rand() % max_batch_size + 1;
const int upbound = 1024 / nhead + 1;
const int hidden_dim = (rand() % upbound + 1) * nhead * 4;
const int head_dim = hidden_dim / nhead;
const int sl1 = rand() % (seq_len - 1) + 1;
const int sl2 = seq_len - sl1;
const int beam_size = rand() % 8 + 1;
printf("num_head = %d\t", nhead);
printf("seq_len = %d\t", seq_len);
printf("batch size = %d\t", batch_size);
printf("hidden dimension = %d\t", hidden_dim);
printf("beam size = %d\n", beam_size);
const size_t inp1_size = batch_size * beam_size * nhead * sl1 * head_dim;
const size_t inp2_size = batch_size * beam_size * nhead * sl2 * head_dim;
const size_t outp_size = batch_size * beam_size * nhead * seq_len * head_dim;
const size_t inp1_size_bytes = inp1_size * sizeof(float);
const size_t inp2_size_bytes = inp2_size * sizeof(float);
const size_t outp_size_bytes = outp_size * sizeof(float);
float *inp1 = (float*) malloc (inp1_size_bytes);
float *inp2 = (float*) malloc (inp2_size_bytes);
float *outp = (float*) malloc (outp_size_bytes);
for (size_t i = 0; i < inp1_size; i++) {
inp1[i] = -1.f;
}
for (size_t i = 0; i < inp2_size; i++) {
inp2[i] = 1.f;
}
float *d_inp1, *d_inp2, *d_outp;
hipMalloc ((void**)&d_inp1, inp1_size_bytes);
hipMalloc ((void**)&d_inp2, inp2_size_bytes);
hipMalloc ((void**)&d_outp, outp_size_bytes);
hipMemcpy (d_inp1, inp1, inp1_size_bytes, hipMemcpyHostToDevice);
hipMemcpy (d_inp2, inp2, inp2_size_bytes, hipMemcpyHostToDevice);
const size_t n = batch_size * beam_size * nhead * head_dim * (sl1 + sl2);
const size_t nblock = (n + 255) / 256;
// warmup
hipLaunchKernelGGL(( concat) , dim3(nblock), dim3(256), 0, 0,
d_inp1, d_inp2, d_outp, batch_size * beam_size * nhead, head_dim, sl1, sl2);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++) {
hipLaunchKernelGGL(( concat) , dim3(nblock), dim3(256), 0, 0,
d_inp1, d_inp2, d_outp, batch_size * beam_size * nhead, head_dim, sl1, sl2);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (us)\n", (time * 1e-3f) / repeat);
hipMemcpy (outp, d_outp, outp_size_bytes, hipMemcpyDeviceToHost);
double checksum = 0;
for (size_t i = 0; i < outp_size; i++) {
checksum += outp[i];
}
printf("Checksum = %lf\n\n", checksum);
hipFree(d_inp1);
hipFree(d_inp2);
hipFree(d_outp);
free(inp1);
free(inp2);
free(outp);
}
}
| ebc5addab50a1cf5c43e57f726e4e67999037153.cu | #include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <cuda.h>
const int max_seq_len = 1024;
const int max_batch_tokens = 9216;
/* Convert 2-dim tensor index into vector index */
__forceinline__ __host__ __device__
int flat_2dim(int id1, int id2, int dim2) {
return id1 * dim2 + id2;
}
/* Convert 3-dim tensor index into vector index */
__forceinline__ __host__ __device__
int flat_3dim(int id1, int id2, int id3, int dim2, int dim3) {
return id1 * dim2 * dim3 + id2 * dim3 + id3;
}
template <typename T>
__global__
void concat (const T *__restrict__ inp1,
const T *__restrict__ inp2,
T *output,
int sz0, int sz2, int sz1_1, int sz1_2)
{
int nele = sz0 * sz2 * (sz1_1 + sz1_2);
int idx = flat_2dim(blockIdx.x, threadIdx.x, blockDim.x);
if (idx >= nele) return;
float *dst_ptr = (float *)output + idx;
int idx2 = idx % sz2;
idx = idx / sz2;
int idx1 = idx % (sz1_1 + sz1_2);
int idx0 = idx / (sz1_1 + sz1_2);
float *src_ptr = nullptr;
int sz1 = 0;
if (idx1 < sz1_1) {
sz1 = sz1_1;
src_ptr = (float *)inp1;
} else {
idx1 -= sz1_1;
sz1 = sz1_2;
src_ptr = (float *)inp2;
}
src_ptr += flat_3dim(idx0, idx1, idx2, sz1, sz2);
*dst_ptr = *src_ptr;
}
int main(int argc, char* argv[])
{
if (argc != 2) {
printf("Usage: %s <repeat>\n", argv[0]);
return 1;
}
const int repeat = atoi(argv[1]);
for (int nhead = 4; nhead <= 16; nhead += 4) { // a multiple of 4
srand(nhead);
int seq_len = rand() % max_seq_len + 1;
while (seq_len <= 1) {
seq_len = rand() % max_seq_len + 1;
}
const int max_batch_size = max_batch_tokens / seq_len;
const int batch_size = rand() % max_batch_size + 1;
const int upbound = 1024 / nhead + 1;
const int hidden_dim = (rand() % upbound + 1) * nhead * 4;
const int head_dim = hidden_dim / nhead;
const int sl1 = rand() % (seq_len - 1) + 1;
const int sl2 = seq_len - sl1;
const int beam_size = rand() % 8 + 1;
printf("num_head = %d\t", nhead);
printf("seq_len = %d\t", seq_len);
printf("batch size = %d\t", batch_size);
printf("hidden dimension = %d\t", hidden_dim);
printf("beam size = %d\n", beam_size);
const size_t inp1_size = batch_size * beam_size * nhead * sl1 * head_dim;
const size_t inp2_size = batch_size * beam_size * nhead * sl2 * head_dim;
const size_t outp_size = batch_size * beam_size * nhead * seq_len * head_dim;
const size_t inp1_size_bytes = inp1_size * sizeof(float);
const size_t inp2_size_bytes = inp2_size * sizeof(float);
const size_t outp_size_bytes = outp_size * sizeof(float);
float *inp1 = (float*) malloc (inp1_size_bytes);
float *inp2 = (float*) malloc (inp2_size_bytes);
float *outp = (float*) malloc (outp_size_bytes);
for (size_t i = 0; i < inp1_size; i++) {
inp1[i] = -1.f;
}
for (size_t i = 0; i < inp2_size; i++) {
inp2[i] = 1.f;
}
float *d_inp1, *d_inp2, *d_outp;
cudaMalloc ((void**)&d_inp1, inp1_size_bytes);
cudaMalloc ((void**)&d_inp2, inp2_size_bytes);
cudaMalloc ((void**)&d_outp, outp_size_bytes);
cudaMemcpy (d_inp1, inp1, inp1_size_bytes, cudaMemcpyHostToDevice);
cudaMemcpy (d_inp2, inp2, inp2_size_bytes, cudaMemcpyHostToDevice);
const size_t n = batch_size * beam_size * nhead * head_dim * (sl1 + sl2);
const size_t nblock = (n + 255) / 256;
// warmup
concat <<<nblock, 256>>>(
d_inp1, d_inp2, d_outp, batch_size * beam_size * nhead, head_dim, sl1, sl2);
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++) {
concat <<<nblock, 256>>>(
d_inp1, d_inp2, d_outp, batch_size * beam_size * nhead, head_dim, sl1, sl2);
}
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (us)\n", (time * 1e-3f) / repeat);
cudaMemcpy (outp, d_outp, outp_size_bytes, cudaMemcpyDeviceToHost);
double checksum = 0;
for (size_t i = 0; i < outp_size; i++) {
checksum += outp[i];
}
printf("Checksum = %lf\n\n", checksum);
cudaFree(d_inp1);
cudaFree(d_inp2);
cudaFree(d_outp);
free(inp1);
free(inp2);
free(outp);
}
}
|
399893e65aab5803d2c1c89098c8906ccb2100d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "PrefixSum.cuh"
/**
* @brief Up-Sweep
*/
__device__ void up_sweep_2048(
uint* data_block
) {
uint starting_elem = 1;
for (uint i=2; i<=2048; i<<=1) {
for (uint j=0; j<(2047 + blockDim.x) / i; ++j) {
const uint element = starting_elem + (j*blockDim.x + threadIdx.x) * i;
if (element < 2048) {
data_block[element] += data_block[element - (i>>1)];
}
}
starting_elem += i;
__syncthreads();
}
}
/**
* @brief Down-sweep
*/
__device__ void down_sweep_2048(
uint* data_block
) {
for (uint i=2048; i>=2; i>>=1) {
for (uint j=0; j<(2047 + blockDim.x) / i; ++j) {
const auto element = 2047 - (j*blockDim.x + threadIdx.x) * i;
if (element < 2048) {
const auto other_element = element - (i>>1);
const auto value = data_block[other_element];
data_block[other_element] = data_block[element];
data_block[element] += value;
}
}
__syncthreads();
}
}
__device__ void prefix_sum_single_block_implementation(
uint* dev_total_sum,
uint* dev_array,
const uint array_size
) {
// Prefix sum of elements in dev_array
// Using Blelloch scan https://www.youtube.com/watch?v=mmYv3Haj6uc
__shared__ uint data_block [2048];
// Let's do it in blocks of 2048 (2^11)
unsigned prev_last_elem = 0;
for (uint block=0; block<(array_size>>11); ++block) {
const uint first_elem = block << 11;
// Load elements into shared memory, add prev_last_elem
data_block[2*threadIdx.x] = dev_array[first_elem + 2*threadIdx.x];
data_block[2*threadIdx.x + 1] = dev_array[first_elem + 2*threadIdx.x + 1];
__syncthreads();
up_sweep_2048((uint*) &data_block[0]);
const uint new_last_elem = data_block[2047];
__syncthreads();
data_block[2047] = 0;
__syncthreads();
down_sweep_2048((uint*) &data_block[0]);
// Store back elements
dev_array[first_elem + 2*threadIdx.x] = data_block[2*threadIdx.x] + prev_last_elem;
dev_array[first_elem + 2*threadIdx.x + 1] = data_block[2*threadIdx.x + 1] + prev_last_elem;
prev_last_elem += new_last_elem;
__syncthreads();
}
// Last iteration is special because
// it may contain an unspecified number of elements
const auto elements_remaining = array_size & 0x7FF; // % 2048
if (elements_remaining > 0) {
const auto first_elem = array_size - elements_remaining;
// Initialize all elements to zero
data_block[2*threadIdx.x] = 0;
data_block[2*threadIdx.x + 1] = 0;
// Load elements
const auto elem_index = first_elem + 2 * threadIdx.x;
if (elem_index < array_size) {
data_block[2*threadIdx.x] = dev_array[elem_index];
}
if ((elem_index+1) < array_size) {
data_block[2*threadIdx.x + 1] = dev_array[elem_index + 1];
}
__syncthreads();
up_sweep_2048((uint*) &data_block[0]);
// Store sum of all elements
if (threadIdx.x==0) {
dev_total_sum[0] = prev_last_elem + data_block[2047];
}
__syncthreads();
data_block[2047] = 0;
__syncthreads();
down_sweep_2048((uint*) &data_block[0]);
// Store back elements
if (elem_index < array_size) {
dev_array[elem_index] = data_block[2*threadIdx.x] + prev_last_elem;
}
if ((elem_index+1) < array_size) {
dev_array[elem_index + 1] = data_block[2*threadIdx.x + 1] + prev_last_elem;
}
} else {
// Special case where number of elements is binary
if (threadIdx.x==0) {
dev_total_sum[0] = prev_last_elem;
}
}
}
__global__ void prefix_sum_single_block(
uint* dev_total_sum,
uint* dev_array,
const uint array_size
) {
prefix_sum_single_block_implementation(dev_total_sum,
dev_array, array_size);
}
__global__ void copy_and_prefix_sum_single_block(
uint* dev_total_sum,
uint* dev_input_array,
uint* dev_output_array,
const uint array_size
) {
// Copy the input array into the output array
for (uint i=0; i<(array_size + blockDim.x - 1) / blockDim.x; ++i) {
const auto element = i*blockDim.x + threadIdx.x;
if (element < array_size) {
dev_output_array[element] = dev_input_array[element];
}
}
__syncthreads();
// Perform prefix_sum over output array
prefix_sum_single_block_implementation(dev_total_sum,
dev_output_array, array_size);
}
/**
* @brief Copies Velo track hit numbers on a consecutive container
*/
__global__ void copy_velo_track_hit_number(
const Velo::TrackHits* dev_tracks,
int* dev_atomics_storage,
uint* dev_velo_track_hit_number
) {
const uint number_of_events = gridDim.x;
const uint event_number = blockIdx.x;
const Velo::TrackHits* event_tracks = dev_tracks + event_number * VeloTracking::max_tracks;
const int accumulated_tracks = dev_atomics_storage[number_of_events + event_number];
const int number_of_tracks = dev_atomics_storage[event_number];
// Pointer to velo_track_hit_number of current event
uint* velo_track_hit_number = dev_velo_track_hit_number + accumulated_tracks;
for (int i=0; i<(number_of_tracks + blockDim.x - 1) / blockDim.x; ++i) {
const auto element = i*blockDim.x + threadIdx.x;
if (element < number_of_tracks) {
velo_track_hit_number[element] = event_tracks[element].hitsNum;
}
}
}
| 399893e65aab5803d2c1c89098c8906ccb2100d0.cu | #include "PrefixSum.cuh"
/**
* @brief Up-Sweep
*/
__device__ void up_sweep_2048(
uint* data_block
) {
uint starting_elem = 1;
for (uint i=2; i<=2048; i<<=1) {
for (uint j=0; j<(2047 + blockDim.x) / i; ++j) {
const uint element = starting_elem + (j*blockDim.x + threadIdx.x) * i;
if (element < 2048) {
data_block[element] += data_block[element - (i>>1)];
}
}
starting_elem += i;
__syncthreads();
}
}
/**
* @brief Down-sweep
*/
__device__ void down_sweep_2048(
uint* data_block
) {
for (uint i=2048; i>=2; i>>=1) {
for (uint j=0; j<(2047 + blockDim.x) / i; ++j) {
const auto element = 2047 - (j*blockDim.x + threadIdx.x) * i;
if (element < 2048) {
const auto other_element = element - (i>>1);
const auto value = data_block[other_element];
data_block[other_element] = data_block[element];
data_block[element] += value;
}
}
__syncthreads();
}
}
__device__ void prefix_sum_single_block_implementation(
uint* dev_total_sum,
uint* dev_array,
const uint array_size
) {
// Prefix sum of elements in dev_array
// Using Blelloch scan https://www.youtube.com/watch?v=mmYv3Haj6uc
__shared__ uint data_block [2048];
// Let's do it in blocks of 2048 (2^11)
unsigned prev_last_elem = 0;
for (uint block=0; block<(array_size>>11); ++block) {
const uint first_elem = block << 11;
// Load elements into shared memory, add prev_last_elem
data_block[2*threadIdx.x] = dev_array[first_elem + 2*threadIdx.x];
data_block[2*threadIdx.x + 1] = dev_array[first_elem + 2*threadIdx.x + 1];
__syncthreads();
up_sweep_2048((uint*) &data_block[0]);
const uint new_last_elem = data_block[2047];
__syncthreads();
data_block[2047] = 0;
__syncthreads();
down_sweep_2048((uint*) &data_block[0]);
// Store back elements
dev_array[first_elem + 2*threadIdx.x] = data_block[2*threadIdx.x] + prev_last_elem;
dev_array[first_elem + 2*threadIdx.x + 1] = data_block[2*threadIdx.x + 1] + prev_last_elem;
prev_last_elem += new_last_elem;
__syncthreads();
}
// Last iteration is special because
// it may contain an unspecified number of elements
const auto elements_remaining = array_size & 0x7FF; // % 2048
if (elements_remaining > 0) {
const auto first_elem = array_size - elements_remaining;
// Initialize all elements to zero
data_block[2*threadIdx.x] = 0;
data_block[2*threadIdx.x + 1] = 0;
// Load elements
const auto elem_index = first_elem + 2 * threadIdx.x;
if (elem_index < array_size) {
data_block[2*threadIdx.x] = dev_array[elem_index];
}
if ((elem_index+1) < array_size) {
data_block[2*threadIdx.x + 1] = dev_array[elem_index + 1];
}
__syncthreads();
up_sweep_2048((uint*) &data_block[0]);
// Store sum of all elements
if (threadIdx.x==0) {
dev_total_sum[0] = prev_last_elem + data_block[2047];
}
__syncthreads();
data_block[2047] = 0;
__syncthreads();
down_sweep_2048((uint*) &data_block[0]);
// Store back elements
if (elem_index < array_size) {
dev_array[elem_index] = data_block[2*threadIdx.x] + prev_last_elem;
}
if ((elem_index+1) < array_size) {
dev_array[elem_index + 1] = data_block[2*threadIdx.x + 1] + prev_last_elem;
}
} else {
// Special case where number of elements is binary
if (threadIdx.x==0) {
dev_total_sum[0] = prev_last_elem;
}
}
}
__global__ void prefix_sum_single_block(
uint* dev_total_sum,
uint* dev_array,
const uint array_size
) {
prefix_sum_single_block_implementation(dev_total_sum,
dev_array, array_size);
}
__global__ void copy_and_prefix_sum_single_block(
uint* dev_total_sum,
uint* dev_input_array,
uint* dev_output_array,
const uint array_size
) {
// Copy the input array into the output array
for (uint i=0; i<(array_size + blockDim.x - 1) / blockDim.x; ++i) {
const auto element = i*blockDim.x + threadIdx.x;
if (element < array_size) {
dev_output_array[element] = dev_input_array[element];
}
}
__syncthreads();
// Perform prefix_sum over output array
prefix_sum_single_block_implementation(dev_total_sum,
dev_output_array, array_size);
}
/**
* @brief Copies Velo track hit numbers on a consecutive container
*/
__global__ void copy_velo_track_hit_number(
const Velo::TrackHits* dev_tracks,
int* dev_atomics_storage,
uint* dev_velo_track_hit_number
) {
const uint number_of_events = gridDim.x;
const uint event_number = blockIdx.x;
const Velo::TrackHits* event_tracks = dev_tracks + event_number * VeloTracking::max_tracks;
const int accumulated_tracks = dev_atomics_storage[number_of_events + event_number];
const int number_of_tracks = dev_atomics_storage[event_number];
// Pointer to velo_track_hit_number of current event
uint* velo_track_hit_number = dev_velo_track_hit_number + accumulated_tracks;
for (int i=0; i<(number_of_tracks + blockDim.x - 1) / blockDim.x; ++i) {
const auto element = i*blockDim.x + threadIdx.x;
if (element < number_of_tracks) {
velo_track_hit_number[element] = event_tracks[element].hitsNum;
}
}
}
|
1afc8cbda9d38deb8fe23fc4542ffc7824e71eb5.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Discrete Sine Transform in Column wise (DST one)
* DST_I_Column
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DST_I_Column(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "DST_I_Column.cuh"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "ERRORCHK.h"
// #define TILE_DIM 16
#define DEFAULT_DIM 32 // Tile dimension
#define DELTA(i, j) ((i==j)?1:0)
//const double PI_d = 3.141592653589793238462643383279502884; //pi
template <unsigned int TILE_DIM > __global__ void DSTI_Column_Kernel(double *A, double *C,
int numARows, int numAColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
const double PI_d = 3.141592653589793238462643383279502884; //pi
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ double As[TILE_DIM][TILE_DIM];
__shared__ double Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + numARows - 1) / TILE_DIM; k++) {
//As[threadIdx.y][threadIdx.x] = cos((Row*PI_d*(threadIdx.x + (k*TILE_DIM)) / (numARows - 1)))*sqrt(1.0 / (1 + DELTA((threadIdx.x + (k*TILE_DIM)) + 1, 1) + DELTA((threadIdx.x + (k*TILE_DIM)) + 1, numARows)))*sqrt(1.0 / (1 + DELTA(1, Row + 1) + DELTA(numARows, Row + 1)))*sqrt(2.0 / numARows)
if (k*TILE_DIM + threadIdx.x < numARows && Row < numARows) { As[threadIdx.y][threadIdx.x] = sin(((((threadIdx.x + k*TILE_DIM)+1)*PI_d*(Row+1)) / (numARows + 1)))*sqrt(2.0 / (numARows+1)); }
//As[threadIdx.y][threadIdx.x] = A[Row*ACols + k*TILE_DIM + threadIdx.x];
else { As[threadIdx.y][threadIdx.x] = 0.0; }
if (k*TILE_DIM + threadIdx.y < numARows && Col < numAColumns){ Bs[threadIdx.y][threadIdx.x] = A[(k*TILE_DIM + threadIdx.y)*numAColumns + Col]; }
else { Bs[threadIdx.y][threadIdx.x] = 0.0; }
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; }
__syncthreads();
}
if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; }
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
extern "C" void CalculateTransformDSTColumnOne(double * A, double * C, int numARows,
int numAColumns, int numCRows, int numCColumns)
{
double * hostA = A; // The A matrix
//double * hostB = B; // The B matrix
double * hostC = C; // The output C matrix
//double * hostComputedC;
double * deviceA;
//double * deviceB;
double * deviceC;
//hostA = (double *)malloc(sizeof(double)*numARows*numAColumns);
hipError_t error;
int devID = 0;
// get number of SMs on this GPU
error = hipGetDevice(&devID);
hipDeviceProp_t deviceProp;
error = hipGetDeviceProperties(&deviceProp, devID);
if (error != hipSuccess)
{
printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
int TILEDIM = (deviceProp.major < 2) ? 16 : 32;
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numAColumns;
//hostC = (double *)malloc(sizeof(double)*numCRows*numCColumns);
//hostComputedC = (double *)malloc(sizeof(double)*numCRows*numCColumns);
// Allocating GPU memory
gpuErrchk(hipMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns));
//hipMalloc((void **)&deviceB, sizeof(double)*numBRows*numBColumns);
gpuErrchk(hipMalloc((void **)&deviceC, sizeof(double)*numCRows*numCColumns));
//thrust::device_ptr< double >dev_ptr_A(deviceA);
//thrust::device_ptr< double >dev_ptr_C(deviceC);
// Copy memory to the GPU
gpuErrchk(hipMemcpy(deviceA, hostA, sizeof(double)*numARows*numAColumns, hipMemcpyHostToDevice));
//hipMemcpy(deviceB, hostB, sizeof(double)*numBRows*numBColumns, hipMemcpyHostToDevice);
/////////////////////////////////////////////////////////
unsigned int TILE_DIM=16;
dim3 dimBlock;
dim3 dimGrid;
switch (TILEDIM){
case 16:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTI_Column_Kernel <16> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, hipMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(hipFree(deviceA));
//hipFree(deviceB);
gpuErrchk(hipFree(deviceC));
return;
case 32:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTI_Column_Kernel <32> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, hipMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(hipFree(deviceA));
//hipFree(deviceB);
gpuErrchk(hipFree(deviceC));
return;
}
}
| 1afc8cbda9d38deb8fe23fc4542ffc7824e71eb5.cu | /*
* Discrete Sine Transform in Column wise (DST one)
* DST_I_Column
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DST_I_Column(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "DST_I_Column.cuh"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include "ERRORCHK.h"
// #define TILE_DIM 16
#define DEFAULT_DIM 32 // Tile dimension
#define DELTA(i, j) ((i==j)?1:0)
//const double PI_d = 3.141592653589793238462643383279502884; //pi
template <unsigned int TILE_DIM > __global__ void DSTI_Column_Kernel(double *A, double *C,
int numARows, int numAColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
const double PI_d = 3.141592653589793238462643383279502884; //pi
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ double As[TILE_DIM][TILE_DIM];
__shared__ double Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + numARows - 1) / TILE_DIM; k++) {
//As[threadIdx.y][threadIdx.x] = cos((Row*PI_d*(threadIdx.x + (k*TILE_DIM)) / (numARows - 1)))*sqrt(1.0 / (1 + DELTA((threadIdx.x + (k*TILE_DIM)) + 1, 1) + DELTA((threadIdx.x + (k*TILE_DIM)) + 1, numARows)))*sqrt(1.0 / (1 + DELTA(1, Row + 1) + DELTA(numARows, Row + 1)))*sqrt(2.0 / numARows)
if (k*TILE_DIM + threadIdx.x < numARows && Row < numARows) { As[threadIdx.y][threadIdx.x] = sin(((((threadIdx.x + k*TILE_DIM)+1)*PI_d*(Row+1)) / (numARows + 1)))*sqrt(2.0 / (numARows+1)); }
//As[threadIdx.y][threadIdx.x] = A[Row*ACols + k*TILE_DIM + threadIdx.x];
else { As[threadIdx.y][threadIdx.x] = 0.0; }
if (k*TILE_DIM + threadIdx.y < numARows && Col < numAColumns){ Bs[threadIdx.y][threadIdx.x] = A[(k*TILE_DIM + threadIdx.y)*numAColumns + Col]; }
else { Bs[threadIdx.y][threadIdx.x] = 0.0; }
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; }
__syncthreads();
}
if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; }
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
extern "C" void CalculateTransformDSTColumnOne(double * A, double * C, int numARows,
int numAColumns, int numCRows, int numCColumns)
{
double * hostA = A; // The A matrix
//double * hostB = B; // The B matrix
double * hostC = C; // The output C matrix
//double * hostComputedC;
double * deviceA;
//double * deviceB;
double * deviceC;
//hostA = (double *)malloc(sizeof(double)*numARows*numAColumns);
cudaError_t error;
int devID = 0;
// get number of SMs on this GPU
error = cudaGetDevice(&devID);
cudaDeviceProp deviceProp;
error = cudaGetDeviceProperties(&deviceProp, devID);
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
int TILEDIM = (deviceProp.major < 2) ? 16 : 32;
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numAColumns;
//hostC = (double *)malloc(sizeof(double)*numCRows*numCColumns);
//hostComputedC = (double *)malloc(sizeof(double)*numCRows*numCColumns);
// Allocating GPU memory
gpuErrchk(cudaMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns));
//cudaMalloc((void **)&deviceB, sizeof(double)*numBRows*numBColumns);
gpuErrchk(cudaMalloc((void **)&deviceC, sizeof(double)*numCRows*numCColumns));
//thrust::device_ptr< double >dev_ptr_A(deviceA);
//thrust::device_ptr< double >dev_ptr_C(deviceC);
// Copy memory to the GPU
gpuErrchk(cudaMemcpy(deviceA, hostA, sizeof(double)*numARows*numAColumns, cudaMemcpyHostToDevice));
//cudaMemcpy(deviceB, hostB, sizeof(double)*numBRows*numBColumns, cudaMemcpyHostToDevice);
/////////////////////////////////////////////////////////
unsigned int TILE_DIM=16;
dim3 dimBlock;
dim3 dimGrid;
switch (TILEDIM){
case 16:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTI_Column_Kernel <16> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, cudaMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(cudaFree(deviceA));
//cudaFree(deviceB);
gpuErrchk(cudaFree(deviceC));
return;
case 32:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTI_Column_Kernel <32> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, cudaMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(cudaFree(deviceA));
//cudaFree(deviceB);
gpuErrchk(cudaFree(deviceC));
return;
}
}
|
b3f29382b0f583fd64580bb1a25bfe63a007a6bb.hip | // !!! This is a file automatically generated by hipify!!!
/*
This file contains routines for Parallel vector operations.
*/
#define PETSC_SKIP_SPINLOCK
#include <petscconf.h>
#include <../src/vec/vec/impls/mpi/pvecimpl.h> /*I "petscvec.h" I*/
#include <../src/vec/vec/impls/seq/seqcuda/cudavecimpl.h>
PetscErrorCode VecDestroy_MPICUDA(Vec v)
{
PetscErrorCode ierr;
hipError_t err;
PetscFunctionBegin;
if (v->spptr) {
if (((Vec_CUDA*)v->spptr)->GPUarray_allocated) {
err = hipFree(((Vec_CUDA*)v->spptr)->GPUarray_allocated);CHKERRCUDA(err);
((Vec_CUDA*)v->spptr)->GPUarray_allocated = NULL;
}
err = hipStreamDestroy(((Vec_CUDA*)v->spptr)->stream);CHKERRCUDA(err);
ierr = PetscFree(v->spptr);CHKERRQ(ierr);
}
ierr = VecDestroy_MPI(v);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode VecNorm_MPICUDA(Vec xin,NormType type,PetscReal *z)
{
PetscReal sum,work = 0.0;
PetscErrorCode ierr;
PetscFunctionBegin;
if (type == NORM_2 || type == NORM_FROBENIUS) {
ierr = VecNorm_SeqCUDA(xin,NORM_2,&work);
work *= work;
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
*z = PetscSqrtReal(sum);
} else if (type == NORM_1) {
/* Find the local part */
ierr = VecNorm_SeqCUDA(xin,NORM_1,&work);CHKERRQ(ierr);
/* Find the global max */
ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
} else if (type == NORM_INFINITY) {
/* Find the local max */
ierr = VecNorm_SeqCUDA(xin,NORM_INFINITY,&work);CHKERRQ(ierr);
/* Find the global max */
ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
} else if (type == NORM_1_AND_2) {
PetscReal temp[2];
ierr = VecNorm_SeqCUDA(xin,NORM_1,temp);CHKERRQ(ierr);
ierr = VecNorm_SeqCUDA(xin,NORM_2,temp+1);CHKERRQ(ierr);
temp[1] = temp[1]*temp[1];
ierr = MPIU_Allreduce(temp,z,2,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
z[1] = PetscSqrtReal(z[1]);
}
PetscFunctionReturn(0);
}
PetscErrorCode VecDot_MPICUDA(Vec xin,Vec yin,PetscScalar *z)
{
PetscScalar sum,work;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecDot_SeqCUDA(xin,yin,&work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
*z = sum;
PetscFunctionReturn(0);
}
PetscErrorCode VecTDot_MPICUDA(Vec xin,Vec yin,PetscScalar *z)
{
PetscScalar sum,work;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecTDot_SeqCUDA(xin,yin,&work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
*z = sum;
PetscFunctionReturn(0);
}
PetscErrorCode VecMDot_MPICUDA(Vec xin,PetscInt nv,const Vec y[],PetscScalar *z)
{
PetscScalar awork[128],*work = awork;
PetscErrorCode ierr;
PetscFunctionBegin;
if (nv > 128) {
ierr = PetscMalloc1(nv,&work);CHKERRQ(ierr);
}
ierr = VecMDot_SeqCUDA(xin,nv,y,work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(work,z,nv,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
if (nv > 128) {
ierr = PetscFree(work);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*MC
VECMPICUDA - VECMPICUDA = "mpicuda" - The basic parallel vector, modified to use CUDA
Options Database Keys:
. -vec_type mpicuda - sets the vector type to VECMPICUDA during a call to VecSetFromOptions()
Level: beginner
.seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMPIWithArray(), VECMPI, VecType, VecCreateMPI()
M*/
PetscErrorCode VecDuplicate_MPICUDA(Vec win,Vec *v)
{
PetscErrorCode ierr;
Vec_MPI *vw,*w = (Vec_MPI*)win->data;
PetscScalar *array;
PetscFunctionBegin;
ierr = VecCreate(PetscObjectComm((PetscObject)win),v);CHKERRQ(ierr);
ierr = PetscLayoutReference(win->map,&(*v)->map);CHKERRQ(ierr);
ierr = VecCreate_MPI_Private(*v,PETSC_FALSE,w->nghost,0);CHKERRQ(ierr);
vw = (Vec_MPI*)(*v)->data;
ierr = PetscMemcpy((*v)->ops,win->ops,sizeof(struct _VecOps));CHKERRQ(ierr);
/* save local representation of the parallel vector (and scatter) if it exists */
if (w->localrep) {
ierr = VecGetArray(*v,&array);CHKERRQ(ierr);
ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,1,win->map->n+w->nghost,array,&vw->localrep);CHKERRQ(ierr);
ierr = PetscMemcpy(vw->localrep->ops,w->localrep->ops,sizeof(struct _VecOps));CHKERRQ(ierr);
ierr = VecRestoreArray(*v,&array);CHKERRQ(ierr);
ierr = PetscLogObjectParent((PetscObject)*v,(PetscObject)vw->localrep);CHKERRQ(ierr);
vw->localupdate = w->localupdate;
if (vw->localupdate) {
ierr = PetscObjectReference((PetscObject)vw->localupdate);CHKERRQ(ierr);
}
}
/* New vector should inherit stashing property of parent */
(*v)->stash.donotstash = win->stash.donotstash;
(*v)->stash.ignorenegidx = win->stash.ignorenegidx;
/* change type_name appropriately */
ierr = VecCUDAAllocateCheck(*v);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)(*v),VECMPICUDA);CHKERRQ(ierr);
ierr = PetscObjectListDuplicate(((PetscObject)win)->olist,&((PetscObject)(*v))->olist);CHKERRQ(ierr);
ierr = PetscFunctionListDuplicate(((PetscObject)win)->qlist,&((PetscObject)(*v))->qlist);CHKERRQ(ierr);
(*v)->map->bs = PetscAbs(win->map->bs);
(*v)->bstash.bs = win->bstash.bs;
PetscFunctionReturn(0);
}
PetscErrorCode VecDotNorm2_MPICUDA(Vec s,Vec t,PetscScalar *dp,PetscScalar *nm)
{
PetscErrorCode ierr;
PetscScalar work[2],sum[2];
PetscFunctionBegin;
ierr = VecDotNorm2_SeqCUDA(s,t,work,work+1);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,2,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)s));CHKERRQ(ierr);
*dp = sum[0];
*nm = sum[1];
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_MPICUDA(Vec vv)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscLayoutSetUp(vv->map);CHKERRQ(ierr);
ierr = VecCUDAAllocateCheck(vv);CHKERRCUDA(ierr);
vv->valid_GPU_array = PETSC_OFFLOAD_GPU;
ierr = VecCreate_MPICUDA_Private(vv,PETSC_FALSE,0,((Vec_CUDA*)vv->spptr)->GPUarray_allocated);CHKERRQ(ierr);
ierr = VecSet(vv,0.0);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_CUDA(Vec v)
{
PetscErrorCode ierr;
PetscMPIInt size;
PetscFunctionBegin;
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)v),&size);CHKERRQ(ierr);
if (size == 1) {
ierr = VecSetType(v,VECSEQCUDA);CHKERRQ(ierr);
} else {
ierr = VecSetType(v,VECMPICUDA);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*@C
VecCreateMPICUDAWithArray - Creates a parallel, array-style vector,
where the user provides the GPU array space to store the vector values.
Collective on MPI_Comm
Input Parameters:
+ comm - the MPI communicator to use
. bs - block size, same meaning as VecSetBlockSize()
. n - local vector length, cannot be PETSC_DECIDE
. N - global vector length (or PETSC_DECIDE to have calculated)
- array - the user provided GPU array to store the vector values
Output Parameter:
. vv - the vector
Notes:
Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the
same type as an existing vector.
If the user-provided array is NULL, then VecCUDAPlaceArray() can be used
at a later stage to SET the array for storing the vector values.
PETSc does NOT free the array when the vector is destroyed via VecDestroy().
The user should not free the array until the vector is destroyed.
Level: intermediate
Concepts: vectors^creating with array
.seealso: VecCreateSeqCUDAWithArray(), VecCreateMPIWithArray(), VecCreateSeqWithArray(),
VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost(),
VecCreateMPI(), VecCreateGhostWithArray(), VecPlaceArray()
@*/
PetscErrorCode VecCreateMPICUDAWithArray(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const PetscScalar array[],Vec *vv)
{
PetscErrorCode ierr;
PetscFunctionBegin;
if (n == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size of vector");
ierr = PetscSplitOwnership(comm,&n,&N);CHKERRQ(ierr);
ierr = VecCreate(comm,vv);CHKERRQ(ierr);
ierr = VecSetSizes(*vv,n,N);CHKERRQ(ierr);
ierr = VecSetBlockSize(*vv,bs);CHKERRQ(ierr);
ierr = VecCreate_MPICUDA_Private(*vv,PETSC_FALSE,0,array);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_MPICUDA_Private(Vec vv,PetscBool alloc,PetscInt nghost,const PetscScalar array[])
{
PetscErrorCode ierr;
hipError_t err;
Vec_CUDA *veccuda;
PetscFunctionBegin;
ierr = VecCreate_MPI_Private(vv,PETSC_FALSE,0,0);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)vv,VECMPICUDA);CHKERRQ(ierr);
vv->ops->dotnorm2 = VecDotNorm2_MPICUDA;
vv->ops->waxpy = VecWAXPY_SeqCUDA;
vv->ops->duplicate = VecDuplicate_MPICUDA;
vv->ops->dot = VecDot_MPICUDA;
vv->ops->mdot = VecMDot_MPICUDA;
vv->ops->tdot = VecTDot_MPICUDA;
vv->ops->norm = VecNorm_MPICUDA;
vv->ops->scale = VecScale_SeqCUDA;
vv->ops->copy = VecCopy_SeqCUDA;
vv->ops->set = VecSet_SeqCUDA;
vv->ops->swap = VecSwap_SeqCUDA;
vv->ops->axpy = VecAXPY_SeqCUDA;
vv->ops->axpby = VecAXPBY_SeqCUDA;
vv->ops->maxpy = VecMAXPY_SeqCUDA;
vv->ops->aypx = VecAYPX_SeqCUDA;
vv->ops->axpbypcz = VecAXPBYPCZ_SeqCUDA;
vv->ops->pointwisemult = VecPointwiseMult_SeqCUDA;
vv->ops->setrandom = VecSetRandom_SeqCUDA;
vv->ops->placearray = VecPlaceArray_SeqCUDA;
vv->ops->replacearray = VecReplaceArray_SeqCUDA;
vv->ops->resetarray = VecResetArray_SeqCUDA;
vv->ops->dot_local = VecDot_SeqCUDA;
vv->ops->tdot_local = VecTDot_SeqCUDA;
vv->ops->norm_local = VecNorm_SeqCUDA;
vv->ops->mdot_local = VecMDot_SeqCUDA;
vv->ops->destroy = VecDestroy_MPICUDA;
vv->ops->pointwisedivide = VecPointwiseDivide_SeqCUDA;
vv->ops->getlocalvector = VecGetLocalVector_SeqCUDA;
vv->ops->restorelocalvector = VecRestoreLocalVector_SeqCUDA;
vv->ops->getlocalvectorread = VecGetLocalVector_SeqCUDA;
vv->ops->restorelocalvectorread = VecRestoreLocalVector_SeqCUDA;
/* Later, functions check for the Vec_CUDA structure existence, so do not create it without array */
if (array) {
if (!vv->spptr) {
/* Cannot use PetscNew() here because spptr is void* */
ierr = PetscMalloc(sizeof(Vec_CUDA),&vv->spptr);CHKERRQ(ierr);
veccuda = (Vec_CUDA*)vv->spptr;
err = hipStreamCreate(&veccuda->stream);CHKERRCUDA(err);
veccuda->GPUarray_allocated = 0;
veccuda->hostDataRegisteredAsPageLocked = PETSC_FALSE;
vv->valid_GPU_array = PETSC_OFFLOAD_UNALLOCATED;
}
veccuda = (Vec_CUDA*)vv->spptr;
veccuda->GPUarray = (PetscScalar*)array;
}
PetscFunctionReturn(0);
}
| b3f29382b0f583fd64580bb1a25bfe63a007a6bb.cu |
/*
This file contains routines for Parallel vector operations.
*/
#define PETSC_SKIP_SPINLOCK
#include <petscconf.h>
#include <../src/vec/vec/impls/mpi/pvecimpl.h> /*I "petscvec.h" I*/
#include <../src/vec/vec/impls/seq/seqcuda/cudavecimpl.h>
PetscErrorCode VecDestroy_MPICUDA(Vec v)
{
PetscErrorCode ierr;
cudaError_t err;
PetscFunctionBegin;
if (v->spptr) {
if (((Vec_CUDA*)v->spptr)->GPUarray_allocated) {
err = cudaFree(((Vec_CUDA*)v->spptr)->GPUarray_allocated);CHKERRCUDA(err);
((Vec_CUDA*)v->spptr)->GPUarray_allocated = NULL;
}
err = cudaStreamDestroy(((Vec_CUDA*)v->spptr)->stream);CHKERRCUDA(err);
ierr = PetscFree(v->spptr);CHKERRQ(ierr);
}
ierr = VecDestroy_MPI(v);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode VecNorm_MPICUDA(Vec xin,NormType type,PetscReal *z)
{
PetscReal sum,work = 0.0;
PetscErrorCode ierr;
PetscFunctionBegin;
if (type == NORM_2 || type == NORM_FROBENIUS) {
ierr = VecNorm_SeqCUDA(xin,NORM_2,&work);
work *= work;
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
*z = PetscSqrtReal(sum);
} else if (type == NORM_1) {
/* Find the local part */
ierr = VecNorm_SeqCUDA(xin,NORM_1,&work);CHKERRQ(ierr);
/* Find the global max */
ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
} else if (type == NORM_INFINITY) {
/* Find the local max */
ierr = VecNorm_SeqCUDA(xin,NORM_INFINITY,&work);CHKERRQ(ierr);
/* Find the global max */
ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
} else if (type == NORM_1_AND_2) {
PetscReal temp[2];
ierr = VecNorm_SeqCUDA(xin,NORM_1,temp);CHKERRQ(ierr);
ierr = VecNorm_SeqCUDA(xin,NORM_2,temp+1);CHKERRQ(ierr);
temp[1] = temp[1]*temp[1];
ierr = MPIU_Allreduce(temp,z,2,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
z[1] = PetscSqrtReal(z[1]);
}
PetscFunctionReturn(0);
}
PetscErrorCode VecDot_MPICUDA(Vec xin,Vec yin,PetscScalar *z)
{
PetscScalar sum,work;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecDot_SeqCUDA(xin,yin,&work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
*z = sum;
PetscFunctionReturn(0);
}
PetscErrorCode VecTDot_MPICUDA(Vec xin,Vec yin,PetscScalar *z)
{
PetscScalar sum,work;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecTDot_SeqCUDA(xin,yin,&work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
*z = sum;
PetscFunctionReturn(0);
}
PetscErrorCode VecMDot_MPICUDA(Vec xin,PetscInt nv,const Vec y[],PetscScalar *z)
{
PetscScalar awork[128],*work = awork;
PetscErrorCode ierr;
PetscFunctionBegin;
if (nv > 128) {
ierr = PetscMalloc1(nv,&work);CHKERRQ(ierr);
}
ierr = VecMDot_SeqCUDA(xin,nv,y,work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(work,z,nv,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
if (nv > 128) {
ierr = PetscFree(work);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*MC
VECMPICUDA - VECMPICUDA = "mpicuda" - The basic parallel vector, modified to use CUDA
Options Database Keys:
. -vec_type mpicuda - sets the vector type to VECMPICUDA during a call to VecSetFromOptions()
Level: beginner
.seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMPIWithArray(), VECMPI, VecType, VecCreateMPI()
M*/
PetscErrorCode VecDuplicate_MPICUDA(Vec win,Vec *v)
{
PetscErrorCode ierr;
Vec_MPI *vw,*w = (Vec_MPI*)win->data;
PetscScalar *array;
PetscFunctionBegin;
ierr = VecCreate(PetscObjectComm((PetscObject)win),v);CHKERRQ(ierr);
ierr = PetscLayoutReference(win->map,&(*v)->map);CHKERRQ(ierr);
ierr = VecCreate_MPI_Private(*v,PETSC_FALSE,w->nghost,0);CHKERRQ(ierr);
vw = (Vec_MPI*)(*v)->data;
ierr = PetscMemcpy((*v)->ops,win->ops,sizeof(struct _VecOps));CHKERRQ(ierr);
/* save local representation of the parallel vector (and scatter) if it exists */
if (w->localrep) {
ierr = VecGetArray(*v,&array);CHKERRQ(ierr);
ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,1,win->map->n+w->nghost,array,&vw->localrep);CHKERRQ(ierr);
ierr = PetscMemcpy(vw->localrep->ops,w->localrep->ops,sizeof(struct _VecOps));CHKERRQ(ierr);
ierr = VecRestoreArray(*v,&array);CHKERRQ(ierr);
ierr = PetscLogObjectParent((PetscObject)*v,(PetscObject)vw->localrep);CHKERRQ(ierr);
vw->localupdate = w->localupdate;
if (vw->localupdate) {
ierr = PetscObjectReference((PetscObject)vw->localupdate);CHKERRQ(ierr);
}
}
/* New vector should inherit stashing property of parent */
(*v)->stash.donotstash = win->stash.donotstash;
(*v)->stash.ignorenegidx = win->stash.ignorenegidx;
/* change type_name appropriately */
ierr = VecCUDAAllocateCheck(*v);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)(*v),VECMPICUDA);CHKERRQ(ierr);
ierr = PetscObjectListDuplicate(((PetscObject)win)->olist,&((PetscObject)(*v))->olist);CHKERRQ(ierr);
ierr = PetscFunctionListDuplicate(((PetscObject)win)->qlist,&((PetscObject)(*v))->qlist);CHKERRQ(ierr);
(*v)->map->bs = PetscAbs(win->map->bs);
(*v)->bstash.bs = win->bstash.bs;
PetscFunctionReturn(0);
}
PetscErrorCode VecDotNorm2_MPICUDA(Vec s,Vec t,PetscScalar *dp,PetscScalar *nm)
{
PetscErrorCode ierr;
PetscScalar work[2],sum[2];
PetscFunctionBegin;
ierr = VecDotNorm2_SeqCUDA(s,t,work,work+1);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,2,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)s));CHKERRQ(ierr);
*dp = sum[0];
*nm = sum[1];
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_MPICUDA(Vec vv)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscLayoutSetUp(vv->map);CHKERRQ(ierr);
ierr = VecCUDAAllocateCheck(vv);CHKERRCUDA(ierr);
vv->valid_GPU_array = PETSC_OFFLOAD_GPU;
ierr = VecCreate_MPICUDA_Private(vv,PETSC_FALSE,0,((Vec_CUDA*)vv->spptr)->GPUarray_allocated);CHKERRQ(ierr);
ierr = VecSet(vv,0.0);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_CUDA(Vec v)
{
PetscErrorCode ierr;
PetscMPIInt size;
PetscFunctionBegin;
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)v),&size);CHKERRQ(ierr);
if (size == 1) {
ierr = VecSetType(v,VECSEQCUDA);CHKERRQ(ierr);
} else {
ierr = VecSetType(v,VECMPICUDA);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*@C
VecCreateMPICUDAWithArray - Creates a parallel, array-style vector,
where the user provides the GPU array space to store the vector values.
Collective on MPI_Comm
Input Parameters:
+ comm - the MPI communicator to use
. bs - block size, same meaning as VecSetBlockSize()
. n - local vector length, cannot be PETSC_DECIDE
. N - global vector length (or PETSC_DECIDE to have calculated)
- array - the user provided GPU array to store the vector values
Output Parameter:
. vv - the vector
Notes:
Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the
same type as an existing vector.
If the user-provided array is NULL, then VecCUDAPlaceArray() can be used
at a later stage to SET the array for storing the vector values.
PETSc does NOT free the array when the vector is destroyed via VecDestroy().
The user should not free the array until the vector is destroyed.
Level: intermediate
Concepts: vectors^creating with array
.seealso: VecCreateSeqCUDAWithArray(), VecCreateMPIWithArray(), VecCreateSeqWithArray(),
VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost(),
VecCreateMPI(), VecCreateGhostWithArray(), VecPlaceArray()
@*/
PetscErrorCode VecCreateMPICUDAWithArray(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const PetscScalar array[],Vec *vv)
{
PetscErrorCode ierr;
PetscFunctionBegin;
if (n == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size of vector");
ierr = PetscSplitOwnership(comm,&n,&N);CHKERRQ(ierr);
ierr = VecCreate(comm,vv);CHKERRQ(ierr);
ierr = VecSetSizes(*vv,n,N);CHKERRQ(ierr);
ierr = VecSetBlockSize(*vv,bs);CHKERRQ(ierr);
ierr = VecCreate_MPICUDA_Private(*vv,PETSC_FALSE,0,array);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_MPICUDA_Private(Vec vv,PetscBool alloc,PetscInt nghost,const PetscScalar array[])
{
PetscErrorCode ierr;
cudaError_t err;
Vec_CUDA *veccuda;
PetscFunctionBegin;
ierr = VecCreate_MPI_Private(vv,PETSC_FALSE,0,0);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)vv,VECMPICUDA);CHKERRQ(ierr);
vv->ops->dotnorm2 = VecDotNorm2_MPICUDA;
vv->ops->waxpy = VecWAXPY_SeqCUDA;
vv->ops->duplicate = VecDuplicate_MPICUDA;
vv->ops->dot = VecDot_MPICUDA;
vv->ops->mdot = VecMDot_MPICUDA;
vv->ops->tdot = VecTDot_MPICUDA;
vv->ops->norm = VecNorm_MPICUDA;
vv->ops->scale = VecScale_SeqCUDA;
vv->ops->copy = VecCopy_SeqCUDA;
vv->ops->set = VecSet_SeqCUDA;
vv->ops->swap = VecSwap_SeqCUDA;
vv->ops->axpy = VecAXPY_SeqCUDA;
vv->ops->axpby = VecAXPBY_SeqCUDA;
vv->ops->maxpy = VecMAXPY_SeqCUDA;
vv->ops->aypx = VecAYPX_SeqCUDA;
vv->ops->axpbypcz = VecAXPBYPCZ_SeqCUDA;
vv->ops->pointwisemult = VecPointwiseMult_SeqCUDA;
vv->ops->setrandom = VecSetRandom_SeqCUDA;
vv->ops->placearray = VecPlaceArray_SeqCUDA;
vv->ops->replacearray = VecReplaceArray_SeqCUDA;
vv->ops->resetarray = VecResetArray_SeqCUDA;
vv->ops->dot_local = VecDot_SeqCUDA;
vv->ops->tdot_local = VecTDot_SeqCUDA;
vv->ops->norm_local = VecNorm_SeqCUDA;
vv->ops->mdot_local = VecMDot_SeqCUDA;
vv->ops->destroy = VecDestroy_MPICUDA;
vv->ops->pointwisedivide = VecPointwiseDivide_SeqCUDA;
vv->ops->getlocalvector = VecGetLocalVector_SeqCUDA;
vv->ops->restorelocalvector = VecRestoreLocalVector_SeqCUDA;
vv->ops->getlocalvectorread = VecGetLocalVector_SeqCUDA;
vv->ops->restorelocalvectorread = VecRestoreLocalVector_SeqCUDA;
/* Later, functions check for the Vec_CUDA structure existence, so do not create it without array */
if (array) {
if (!vv->spptr) {
/* Cannot use PetscNew() here because spptr is void* */
ierr = PetscMalloc(sizeof(Vec_CUDA),&vv->spptr);CHKERRQ(ierr);
veccuda = (Vec_CUDA*)vv->spptr;
err = cudaStreamCreate(&veccuda->stream);CHKERRCUDA(err);
veccuda->GPUarray_allocated = 0;
veccuda->hostDataRegisteredAsPageLocked = PETSC_FALSE;
vv->valid_GPU_array = PETSC_OFFLOAD_UNALLOCATED;
}
veccuda = (Vec_CUDA*)vv->spptr;
veccuda->GPUarray = (PetscScalar*)array;
}
PetscFunctionReturn(0);
}
|
3a01daf0544d481768109b534d43380fc7edba5f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <quda_internal.h>
#include <quda_matrix.h>
#include <tune_quda.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <launch_kernel.cuh>
#include <comm_quda.h>
#include <unitarization_links.h>
#include <pgauge_monte.h>
#include <random_quda.h>
#include <cub_helper.cuh>
#include <index_helper.cuh>
#ifndef PI
#define PI 3.1415926535897932384626433832795 // pi
#endif
#ifndef PII
#define PII 6.2831853071795864769252867665590 // 2 * pi
#endif
namespace quda {
#ifdef GPU_GAUGE_ALG
template <typename Gauge>
struct InitGaugeColdArg {
int threads; // number of active threads required
int X[4]; // grid dimensions
Gauge dataOr;
InitGaugeColdArg(const Gauge &dataOr, const cudaGaugeField &data)
: dataOr(dataOr) {
for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir];
threads = X[0] * X[1] * X[2] * X[3];
}
};
template<typename Float, typename Gauge, int NCOLORS>
__global__ void compute_InitGauge_ColdStart(InitGaugeColdArg<Gauge> arg){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if ( idx >= arg.threads ) return;
int parity = 0;
if ( idx >= arg.threads / 2 ) {
parity = 1;
idx -= arg.threads / 2;
}
Matrix<complex<Float>,NCOLORS> U;
setIdentity(&U);
for ( int d = 0; d < 4; d++ )
arg.dataOr.save((Float*)(U.data),idx, d, parity);
}
template<typename Float, typename Gauge, int NCOLORS>
class InitGaugeCold : Tunable {
InitGaugeColdArg<Gauge> arg;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
return 0;
}
//bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
InitGaugeCold(InitGaugeColdArg<Gauge> &arg)
: arg(arg) {
}
~InitGaugeCold () {
}
void apply(const hipStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
compute_InitGauge_ColdStart<Float, Gauge, NCOLORS><< < tp.grid,tp.block >> > (arg);
//hipDeviceSynchronize();
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%lu", arg.threads, sizeof(Float));
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
long long flops() const {
return 0;
} // Only correct if there is no link reconstruction, no cub reduction accounted also
long long bytes() const {
return 0;
} //no accounting the reduction!!!!
};
template<typename Float, int NCOLORS, typename Gauge>
void InitGaugeField( Gauge dataOr, cudaGaugeField& data) {
InitGaugeColdArg<Gauge> initarg(dataOr, data);
InitGaugeCold<Float, Gauge, NCOLORS> init(initarg);
init.apply(0);
checkCudaError();
}
template<typename Float>
void InitGaugeField( cudaGaugeField& data) {
if ( data.isNative() ) {
if ( data.Reconstruct() == QUDA_RECONSTRUCT_NO ) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type Gauge;
InitGaugeField<Float, 3>(Gauge(data), data);
} else if ( data.Reconstruct() == QUDA_RECONSTRUCT_12 ) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type Gauge;
InitGaugeField<Float, 3>(Gauge(data), data);
} else if ( data.Reconstruct() == QUDA_RECONSTRUCT_8 ) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type Gauge;
InitGaugeField<Float, 3>(Gauge(data), data);
} else {
errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct());
}
} else {
errorQuda("Invalid Gauge Order\n");
}
}
/** @brief Perform a cold start to the gauge field, identity SU(3) matrix, also fills the ghost links in multi-GPU case (no need to exchange data)
*
* @param[in,out] data Gauge field
*/
void InitGaugeField( cudaGaugeField& data) {
if ( data.Precision() == QUDA_SINGLE_PRECISION ) {
InitGaugeField<float> (data);
} else if ( data.Precision() == QUDA_DOUBLE_PRECISION ) {
InitGaugeField<double>(data);
} else {
errorQuda("Precision %d not supported", data.Precision());
}
}
template <typename Gauge>
struct InitGaugeHotArg {
int threads; // number of active threads required
int X[4]; // grid dimensions
RNG rngstate;
#ifdef MULTI_GPU
int border[4];
#endif
Gauge dataOr;
InitGaugeHotArg(const Gauge &dataOr, const cudaGaugeField &data, RNG &rngstate)
: dataOr(dataOr), rngstate(rngstate) {
#ifdef MULTI_GPU
for ( int dir = 0; dir < 4; ++dir ) {
border[dir] = data.R()[dir];
X[dir] = data.X()[dir] - border[dir] * 2;
}
#else
for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir];
#endif
//the optimal number of RNG states in rngstate array must be equal to half the lattice volume
//this number is the same used in heatbath...
threads = X[0] * X[1] * X[2] * X[3] >> 1;
}
};
template <typename Float>
__host__ __device__ static inline void reunit_link( Matrix<complex<Float>,3> &U ){
complex<Float> t2((Float)0.0, (Float)0.0);
Float t1 = 0.0;
//first normalize first row
//sum of squares of row
#pragma unroll
for ( int c = 0; c < 3; c++ ) t1 += norm(U(0,c));
t1 = (Float)1.0 / sqrt(t1);
//14
//used to normalize row
#pragma unroll
for ( int c = 0; c < 3; c++ ) U(0,c) *= t1;
//6
#pragma unroll
for ( int c = 0; c < 3; c++ ) t2 += conj(U(0,c)) * U(1,c);
//24
#pragma unroll
for ( int c = 0; c < 3; c++ ) U(1,c) -= t2 * U(0,c);
//24
//normalize second row
//sum of squares of row
t1 = 0.0;
#pragma unroll
for ( int c = 0; c < 3; c++ ) t1 += norm(U(1,c));
t1 = (Float)1.0 / sqrt(t1);
//14
//used to normalize row
#pragma unroll
for ( int c = 0; c < 3; c++ ) U(1, c) *= t1;
//6
//Reconstruct lat row
U(2,0) = conj(U(0,1) * U(1,2) - U(0,2) * U(1,1));
U(2,1) = conj(U(0,2) * U(1,0) - U(0,0) * U(1,2));
U(2,2) = conj(U(0,0) * U(1,1) - U(0,1) * U(1,0));
//42
//T=130
}
/**
@brief Generate the four random real elements of the SU(2) matrix
@param localstate CURAND rng state
@return four real numbers of the SU(2) matrix
*/
template <class T>
__device__ static inline Matrix<T,2> randomSU2(cuRNGState& localState){
Matrix<T,2> a;
T aabs, ctheta, stheta, phi;
a(0,0) = Random<T>(localState, (T)-1.0, (T)1.0);
aabs = sqrt( 1.0 - a(0,0) * a(0,0));
ctheta = Random<T>(localState, (T)-1.0, (T)1.0);
phi = PII * Random<T>(localState);
stheta = ( hiprand(&localState) & 1 ? 1 : -1 ) * sqrt( (T)1.0 - ctheta * ctheta );
a(0,1) = aabs * stheta * cos( phi );
a(1,0) = aabs * stheta * sin( phi );
a(1,1) = aabs * ctheta;
return a;
}
/**
@brief Update the SU(Nc) link with the new SU(2) matrix, link <- u * link
@param u SU(2) matrix represented by four real numbers
@param link SU(Nc) matrix
@param id indices
*/
template <class T, int NCOLORS>
__host__ __device__ static inline void mul_block_sun( Matrix<T,2> u, Matrix<complex<T>,NCOLORS> &link, int2 id ){
for ( int j = 0; j < NCOLORS; j++ ) {
complex<T> tmp = complex<T>( u(0,0), u(1,1) ) * link(id.x, j) + complex<T>( u(1,0), u(0,1) ) * link(id.y, j);
link(id.y, j) = complex<T>(-u(1,0), u(0,1) ) * link(id.x, j) + complex<T>( u(0,0),-u(1,1) ) * link(id.y, j);
link(id.x, j) = tmp;
}
}
/**
@brief Calculate the SU(2) index block in the SU(Nc) matrix
@param block number to calculate the index's, the total number of blocks is NCOLORS * ( NCOLORS - 1) / 2.
@return Returns two index's in int2 type, accessed by .x and .y.
*/
template<int NCOLORS>
__host__ __device__ static inline int2 IndexBlock(int block){
int2 id;
int i1;
int found = 0;
int del_i = 0;
int index = -1;
while ( del_i < (NCOLORS - 1) && found == 0 ) {
del_i++;
for ( i1 = 0; i1 < (NCOLORS - del_i); i1++ ) {
index++;
if ( index == block ) {
found = 1;
break;
}
}
}
id.y = i1 + del_i;
id.x = i1;
return id;
}
/**
@brief Generate a SU(Nc) random matrix
@param localstate CURAND rng state
@return SU(Nc) matrix
*/
template <class Float, int NCOLORS>
__device__ inline Matrix<complex<Float>,NCOLORS> randomize( cuRNGState& localState ){
Matrix<complex<Float>,NCOLORS> U;
for ( int i = 0; i < NCOLORS; i++ )
for ( int j = 0; j < NCOLORS; j++ )
U(i,j) = complex<Float>( (Float)(Random<Float>(localState) - 0.5), (Float)(Random<Float>(localState) - 0.5) );
reunit_link<Float>(U);
return U;
/*setIdentity(&U);
for( int block = 0; block < NCOLORS * ( NCOLORS - 1) / 2; block++ ) {
Matrix<Float,2> rr = randomSU2<Float>(localState);
int2 id = IndexBlock<NCOLORS>( block );
mul_block_sun<Float, NCOLORS>(rr, U, id);
//U = block_su2_to_su3<Float>( U, a00, a01, a10, a11, block );
}
return U;*/
}
template<typename Float, typename Gauge, int NCOLORS>
__global__ void compute_InitGauge_HotStart(InitGaugeHotArg<Gauge> arg){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if ( idx >= arg.threads ) return;
#ifdef MULTI_GPU
int X[4], x[4];
for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr];
for ( int dr = 0; dr < 4; ++dr ) X[dr] += 2 * arg.border[dr];
int id = idx;
cuRNGState localState = arg.rngstate.State()[ id ];
#else
cuRNGState localState = arg.rngstate.State()[ idx ];
#endif
for ( int parity = 0; parity < 2; parity++ ) {
#ifdef MULTI_GPU
getCoords(x, id, arg.X, parity);
for ( int dr = 0; dr < 4; ++dr ) x[dr] += arg.border[dr];
idx = linkIndex(x,X);
#endif
for ( int d = 0; d < 4; d++ ) {
Matrix<complex<Float>,NCOLORS> U;
U = randomize<Float, NCOLORS>(localState);
arg.dataOr.save((Float*)(U.data),idx, d, parity);
}
}
#ifdef MULTI_GPU
arg.rngstate.State()[ id ] = localState;
#else
arg.rngstate.State()[ idx ] = localState;
#endif
}
template<typename Float, typename Gauge, int NCOLORS>
class InitGaugeHot : Tunable {
InitGaugeHotArg<Gauge> arg;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
return 0;
}
bool tuneSharedBytes() const {
return false;
} // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
InitGaugeHot(InitGaugeHotArg<Gauge> &arg)
: arg(arg) {
}
~InitGaugeHot () {
}
void apply(const hipStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
compute_InitGauge_HotStart<Float, Gauge, NCOLORS><< < tp.grid,tp.block >> > (arg);
//hipDeviceSynchronize();
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%lud", arg.threads, sizeof(Float));
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
void preTune(){ arg.rngstate.backup(); }
void postTune(){ arg.rngstate.restore(); }
long long flops() const {
return 0;
} // Only correct if there is no link reconstruction, no cub reduction accounted also
long long bytes() const {
return 0;
} //no accounting the reduction!!!!
};
template<typename Float, int NCOLORS, typename Gauge>
void InitGaugeField( Gauge dataOr, cudaGaugeField& data, RNG &rngstate) {
InitGaugeHotArg<Gauge> initarg(dataOr, data, rngstate);
InitGaugeHot<Float, Gauge, NCOLORS> init(initarg);
init.apply(0);
checkCudaError();
qudaDeviceSynchronize();
data.exchangeExtendedGhost(data.R(),false);
}
template<typename Float>
void InitGaugeField( cudaGaugeField& data, RNG &rngstate) {
if ( data.isNative() ) {
if ( data.Reconstruct() == QUDA_RECONSTRUCT_NO ) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type Gauge;
InitGaugeField<Float, 3>(Gauge(data), data, rngstate);
} else if ( data.Reconstruct() == QUDA_RECONSTRUCT_12 ) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type Gauge;
InitGaugeField<Float, 3>(Gauge(data), data, rngstate);
} else if ( data.Reconstruct() == QUDA_RECONSTRUCT_8 ) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type Gauge;
InitGaugeField<Float, 3>(Gauge(data), data, rngstate);
} else {
errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct());
}
} else {
errorQuda("Invalid Gauge Order\n");
}
}
#endif // GPU_GAUGE_ALG
/** @brief Perform a hot start to the gauge field, random SU(3) matrix, followed by reunitarization, also exchange borders links in multi-GPU case.
*
* @param[in,out] data Gauge field
* @param[in,out] rngstate state of the CURAND random number generator
*/
void InitGaugeField( cudaGaugeField& data, RNG &rngstate) {
#ifdef GPU_GAUGE_ALG
if ( data.Precision() == QUDA_SINGLE_PRECISION ) {
InitGaugeField<float> (data, rngstate);
} else if ( data.Precision() == QUDA_DOUBLE_PRECISION ) {
InitGaugeField<double>(data, rngstate);
} else {
errorQuda("Precision %d not supported", data.Precision());
}
#else
errorQuda("Pure gauge code has not been built");
#endif
}
}
| 3a01daf0544d481768109b534d43380fc7edba5f.cu | #include <quda_internal.h>
#include <quda_matrix.h>
#include <tune_quda.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <launch_kernel.cuh>
#include <comm_quda.h>
#include <unitarization_links.h>
#include <pgauge_monte.h>
#include <random_quda.h>
#include <cub_helper.cuh>
#include <index_helper.cuh>
#ifndef PI
#define PI 3.1415926535897932384626433832795 // pi
#endif
#ifndef PII
#define PII 6.2831853071795864769252867665590 // 2 * pi
#endif
namespace quda {
#ifdef GPU_GAUGE_ALG
template <typename Gauge>
struct InitGaugeColdArg {
int threads; // number of active threads required
int X[4]; // grid dimensions
Gauge dataOr;
InitGaugeColdArg(const Gauge &dataOr, const cudaGaugeField &data)
: dataOr(dataOr) {
for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir];
threads = X[0] * X[1] * X[2] * X[3];
}
};
template<typename Float, typename Gauge, int NCOLORS>
__global__ void compute_InitGauge_ColdStart(InitGaugeColdArg<Gauge> arg){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if ( idx >= arg.threads ) return;
int parity = 0;
if ( idx >= arg.threads / 2 ) {
parity = 1;
idx -= arg.threads / 2;
}
Matrix<complex<Float>,NCOLORS> U;
setIdentity(&U);
for ( int d = 0; d < 4; d++ )
arg.dataOr.save((Float*)(U.data),idx, d, parity);
}
template<typename Float, typename Gauge, int NCOLORS>
class InitGaugeCold : Tunable {
InitGaugeColdArg<Gauge> arg;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
return 0;
}
//bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
InitGaugeCold(InitGaugeColdArg<Gauge> &arg)
: arg(arg) {
}
~InitGaugeCold () {
}
void apply(const cudaStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
compute_InitGauge_ColdStart<Float, Gauge, NCOLORS><< < tp.grid,tp.block >> > (arg);
//cudaDeviceSynchronize();
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%lu", arg.threads, sizeof(Float));
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
long long flops() const {
return 0;
} // Only correct if there is no link reconstruction, no cub reduction accounted also
long long bytes() const {
return 0;
} //no accounting the reduction!!!!
};
template<typename Float, int NCOLORS, typename Gauge>
void InitGaugeField( Gauge dataOr, cudaGaugeField& data) {
InitGaugeColdArg<Gauge> initarg(dataOr, data);
InitGaugeCold<Float, Gauge, NCOLORS> init(initarg);
init.apply(0);
checkCudaError();
}
template<typename Float>
void InitGaugeField( cudaGaugeField& data) {
if ( data.isNative() ) {
if ( data.Reconstruct() == QUDA_RECONSTRUCT_NO ) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type Gauge;
InitGaugeField<Float, 3>(Gauge(data), data);
} else if ( data.Reconstruct() == QUDA_RECONSTRUCT_12 ) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type Gauge;
InitGaugeField<Float, 3>(Gauge(data), data);
} else if ( data.Reconstruct() == QUDA_RECONSTRUCT_8 ) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type Gauge;
InitGaugeField<Float, 3>(Gauge(data), data);
} else {
errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct());
}
} else {
errorQuda("Invalid Gauge Order\n");
}
}
/** @brief Perform a cold start to the gauge field, identity SU(3) matrix, also fills the ghost links in multi-GPU case (no need to exchange data)
*
* @param[in,out] data Gauge field
*/
void InitGaugeField( cudaGaugeField& data) {
if ( data.Precision() == QUDA_SINGLE_PRECISION ) {
InitGaugeField<float> (data);
} else if ( data.Precision() == QUDA_DOUBLE_PRECISION ) {
InitGaugeField<double>(data);
} else {
errorQuda("Precision %d not supported", data.Precision());
}
}
template <typename Gauge>
struct InitGaugeHotArg {
int threads; // number of active threads required
int X[4]; // grid dimensions
RNG rngstate;
#ifdef MULTI_GPU
int border[4];
#endif
Gauge dataOr;
InitGaugeHotArg(const Gauge &dataOr, const cudaGaugeField &data, RNG &rngstate)
: dataOr(dataOr), rngstate(rngstate) {
#ifdef MULTI_GPU
for ( int dir = 0; dir < 4; ++dir ) {
border[dir] = data.R()[dir];
X[dir] = data.X()[dir] - border[dir] * 2;
}
#else
for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir];
#endif
//the optimal number of RNG states in rngstate array must be equal to half the lattice volume
//this number is the same used in heatbath...
threads = X[0] * X[1] * X[2] * X[3] >> 1;
}
};
template <typename Float>
__host__ __device__ static inline void reunit_link( Matrix<complex<Float>,3> &U ){
complex<Float> t2((Float)0.0, (Float)0.0);
Float t1 = 0.0;
//first normalize first row
//sum of squares of row
#pragma unroll
for ( int c = 0; c < 3; c++ ) t1 += norm(U(0,c));
t1 = (Float)1.0 / sqrt(t1);
//14
//used to normalize row
#pragma unroll
for ( int c = 0; c < 3; c++ ) U(0,c) *= t1;
//6
#pragma unroll
for ( int c = 0; c < 3; c++ ) t2 += conj(U(0,c)) * U(1,c);
//24
#pragma unroll
for ( int c = 0; c < 3; c++ ) U(1,c) -= t2 * U(0,c);
//24
//normalize second row
//sum of squares of row
t1 = 0.0;
#pragma unroll
for ( int c = 0; c < 3; c++ ) t1 += norm(U(1,c));
t1 = (Float)1.0 / sqrt(t1);
//14
//used to normalize row
#pragma unroll
for ( int c = 0; c < 3; c++ ) U(1, c) *= t1;
//6
//Reconstruct lat row
U(2,0) = conj(U(0,1) * U(1,2) - U(0,2) * U(1,1));
U(2,1) = conj(U(0,2) * U(1,0) - U(0,0) * U(1,2));
U(2,2) = conj(U(0,0) * U(1,1) - U(0,1) * U(1,0));
//42
//T=130
}
/**
@brief Generate the four random real elements of the SU(2) matrix
@param localstate CURAND rng state
@return four real numbers of the SU(2) matrix
*/
template <class T>
__device__ static inline Matrix<T,2> randomSU2(cuRNGState& localState){
Matrix<T,2> a;
T aabs, ctheta, stheta, phi;
a(0,0) = Random<T>(localState, (T)-1.0, (T)1.0);
aabs = sqrt( 1.0 - a(0,0) * a(0,0));
ctheta = Random<T>(localState, (T)-1.0, (T)1.0);
phi = PII * Random<T>(localState);
stheta = ( curand(&localState) & 1 ? 1 : -1 ) * sqrt( (T)1.0 - ctheta * ctheta );
a(0,1) = aabs * stheta * cos( phi );
a(1,0) = aabs * stheta * sin( phi );
a(1,1) = aabs * ctheta;
return a;
}
/**
@brief Update the SU(Nc) link with the new SU(2) matrix, link <- u * link
@param u SU(2) matrix represented by four real numbers
@param link SU(Nc) matrix
@param id indices
*/
template <class T, int NCOLORS>
__host__ __device__ static inline void mul_block_sun( Matrix<T,2> u, Matrix<complex<T>,NCOLORS> &link, int2 id ){
for ( int j = 0; j < NCOLORS; j++ ) {
complex<T> tmp = complex<T>( u(0,0), u(1,1) ) * link(id.x, j) + complex<T>( u(1,0), u(0,1) ) * link(id.y, j);
link(id.y, j) = complex<T>(-u(1,0), u(0,1) ) * link(id.x, j) + complex<T>( u(0,0),-u(1,1) ) * link(id.y, j);
link(id.x, j) = tmp;
}
}
/**
@brief Calculate the SU(2) index block in the SU(Nc) matrix
@param block number to calculate the index's, the total number of blocks is NCOLORS * ( NCOLORS - 1) / 2.
@return Returns two index's in int2 type, accessed by .x and .y.
*/
template<int NCOLORS>
__host__ __device__ static inline int2 IndexBlock(int block){
int2 id;
int i1;
int found = 0;
int del_i = 0;
int index = -1;
while ( del_i < (NCOLORS - 1) && found == 0 ) {
del_i++;
for ( i1 = 0; i1 < (NCOLORS - del_i); i1++ ) {
index++;
if ( index == block ) {
found = 1;
break;
}
}
}
id.y = i1 + del_i;
id.x = i1;
return id;
}
/**
@brief Generate a SU(Nc) random matrix
@param localstate CURAND rng state
@return SU(Nc) matrix
*/
template <class Float, int NCOLORS>
__device__ inline Matrix<complex<Float>,NCOLORS> randomize( cuRNGState& localState ){
Matrix<complex<Float>,NCOLORS> U;
for ( int i = 0; i < NCOLORS; i++ )
for ( int j = 0; j < NCOLORS; j++ )
U(i,j) = complex<Float>( (Float)(Random<Float>(localState) - 0.5), (Float)(Random<Float>(localState) - 0.5) );
reunit_link<Float>(U);
return U;
/*setIdentity(&U);
for( int block = 0; block < NCOLORS * ( NCOLORS - 1) / 2; block++ ) {
Matrix<Float,2> rr = randomSU2<Float>(localState);
int2 id = IndexBlock<NCOLORS>( block );
mul_block_sun<Float, NCOLORS>(rr, U, id);
//U = block_su2_to_su3<Float>( U, a00, a01, a10, a11, block );
}
return U;*/
}
template<typename Float, typename Gauge, int NCOLORS>
__global__ void compute_InitGauge_HotStart(InitGaugeHotArg<Gauge> arg){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if ( idx >= arg.threads ) return;
#ifdef MULTI_GPU
int X[4], x[4];
for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr];
for ( int dr = 0; dr < 4; ++dr ) X[dr] += 2 * arg.border[dr];
int id = idx;
cuRNGState localState = arg.rngstate.State()[ id ];
#else
cuRNGState localState = arg.rngstate.State()[ idx ];
#endif
for ( int parity = 0; parity < 2; parity++ ) {
#ifdef MULTI_GPU
getCoords(x, id, arg.X, parity);
for ( int dr = 0; dr < 4; ++dr ) x[dr] += arg.border[dr];
idx = linkIndex(x,X);
#endif
for ( int d = 0; d < 4; d++ ) {
Matrix<complex<Float>,NCOLORS> U;
U = randomize<Float, NCOLORS>(localState);
arg.dataOr.save((Float*)(U.data),idx, d, parity);
}
}
#ifdef MULTI_GPU
arg.rngstate.State()[ id ] = localState;
#else
arg.rngstate.State()[ idx ] = localState;
#endif
}
template<typename Float, typename Gauge, int NCOLORS>
class InitGaugeHot : Tunable {
InitGaugeHotArg<Gauge> arg;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
return 0;
}
bool tuneSharedBytes() const {
return false;
} // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
InitGaugeHot(InitGaugeHotArg<Gauge> &arg)
: arg(arg) {
}
~InitGaugeHot () {
}
void apply(const cudaStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
compute_InitGauge_HotStart<Float, Gauge, NCOLORS><< < tp.grid,tp.block >> > (arg);
//cudaDeviceSynchronize();
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%lud", arg.threads, sizeof(Float));
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
void preTune(){ arg.rngstate.backup(); }
void postTune(){ arg.rngstate.restore(); }
long long flops() const {
return 0;
} // Only correct if there is no link reconstruction, no cub reduction accounted also
long long bytes() const {
return 0;
} //no accounting the reduction!!!!
};
template<typename Float, int NCOLORS, typename Gauge>
void InitGaugeField( Gauge dataOr, cudaGaugeField& data, RNG &rngstate) {
InitGaugeHotArg<Gauge> initarg(dataOr, data, rngstate);
InitGaugeHot<Float, Gauge, NCOLORS> init(initarg);
init.apply(0);
checkCudaError();
qudaDeviceSynchronize();
data.exchangeExtendedGhost(data.R(),false);
}
template<typename Float>
void InitGaugeField( cudaGaugeField& data, RNG &rngstate) {
if ( data.isNative() ) {
if ( data.Reconstruct() == QUDA_RECONSTRUCT_NO ) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type Gauge;
InitGaugeField<Float, 3>(Gauge(data), data, rngstate);
} else if ( data.Reconstruct() == QUDA_RECONSTRUCT_12 ) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type Gauge;
InitGaugeField<Float, 3>(Gauge(data), data, rngstate);
} else if ( data.Reconstruct() == QUDA_RECONSTRUCT_8 ) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type Gauge;
InitGaugeField<Float, 3>(Gauge(data), data, rngstate);
} else {
errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct());
}
} else {
errorQuda("Invalid Gauge Order\n");
}
}
#endif // GPU_GAUGE_ALG
/** @brief Perform a hot start to the gauge field, random SU(3) matrix, followed by reunitarization, also exchange borders links in multi-GPU case.
*
* @param[in,out] data Gauge field
* @param[in,out] rngstate state of the CURAND random number generator
*/
void InitGaugeField( cudaGaugeField& data, RNG &rngstate) {
#ifdef GPU_GAUGE_ALG
if ( data.Precision() == QUDA_SINGLE_PRECISION ) {
InitGaugeField<float> (data, rngstate);
} else if ( data.Precision() == QUDA_DOUBLE_PRECISION ) {
InitGaugeField<double>(data, rngstate);
} else {
errorQuda("Precision %d not supported", data.Precision());
}
#else
errorQuda("Pure gauge code has not been built");
#endif
}
}
|
ecd6b12e19ea3eecf73701c9a93336ec50955c26.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <vector>
#include <iostream>
#include <string>
#include <nvmatrix.cuh>
#include <nvmatrix_operators.cuh>
#include <matrix.h>
#include <convnet.cuh>
#include <util.cuh>
using namespace std;
/*
* =======================
* ConvNet
* =======================
*/
ConvNet::ConvNet(PyListObject* layerParams, int minibatchSize, int deviceID, bool fix_nan) : Thread(false), _deviceID(deviceID), _data(NULL) {
try {
int numLayers = PyList_GET_SIZE(layerParams);
for (int i = 0; i < numLayers; i++) {
PyObject* paramsDict = PyList_GET_ITEM(layerParams, i);
string layerType = pyDictGetString(paramsDict, "type");
Layer* l = initLayer(layerType, paramsDict, fix_nan);
// Connect backward links in graph for this layer
intv* inputLayers = pyDictGetIntV(paramsDict, "inputs");
if (inputLayers != NULL) {
for (int i = 0; i < inputLayers->size(); i++) {
l->addPrev(&getLayer(inputLayers->at(i)));
}
}
delete inputLayers;
}
// Connect the forward links in the graph
for (int i = 0; i < _layers.size(); i++) {
vector<Layer*>& prev = _layers[i]->getPrev();
for (int j = 0; j < prev.size(); j++) {
prev[j]->addNext(_layers[i]);
}
}
// Execute post-initialization stuff
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->postInit();
}
_dp = new DataProvider(minibatchSize);
} catch (string& s) {
cout << "Error creating ConvNet: " << s << endl;
exit(1);
}
}
/*
* Override this in derived classes
*/
Layer* ConvNet::initLayer(string& layerType, PyObject* paramsDict, bool fix_nan) {
if (layerType == "fc") {
_layers.push_back(new FCLayer(this, paramsDict));
} else if (layerType == "conv") {
_layers.push_back(new ConvLayer(this, paramsDict));
} else if (layerType == "local") {
_layers.push_back(new LocalUnsharedLayer(this, paramsDict));
} else if (layerType == "pool") {
_layers.push_back(&PoolLayer::makePoolLayer(this, paramsDict));
} else if (layerType == "rnorm") {
_layers.push_back(new ResponseNormLayer(this, paramsDict));
} else if (layerType == "cmrnorm") {
_layers.push_back(new CrossMapResponseNormLayer(this, paramsDict));
} else if (layerType == "cnorm") {
_layers.push_back(new ContrastNormLayer(this, paramsDict));
} else if (layerType == "softmax") {
_layers.push_back(new SoftmaxLayer(this, paramsDict));
} else if (layerType == "l2svm") {
_layers.push_back(new L2SVMLayer(this, paramsDict));
} else if (layerType == "eltsum") {
_layers.push_back(new EltwiseSumLayer(this, paramsDict));
} else if (layerType == "eltmax") {
_layers.push_back(new EltwiseMaxLayer(this, paramsDict));
} else if (layerType == "eltabsmax") {
_layers.push_back(new EltwiseAbsMaxLayer(this, paramsDict));
} else if (layerType == "mavg") {
_layers.push_back(new MAvgPoolLayer(this, paramsDict));
} else if (layerType == "mmax") {
_layers.push_back(new MMaxPoolLayer(this, paramsDict));
} else if (layerType == "mconv") {
_layers.push_back(new MicroConvLayer(this, paramsDict));
} else if (layerType == "dshrink") {
_layers.push_back(new DShrinkLayer(this, paramsDict));
} else if (layerType == "eltfunc") {
_layers.push_back(new EltwiseFuncLayer(this, paramsDict));
} else if (layerType == "eltdfunc") {
_layers.push_back(new EltwiseDFuncLayer(this, paramsDict));
} else if (layerType == "vfunc") {
_layers.push_back(new VectFuncLayer(this, paramsDict));
} else if (layerType == "neuron") {
_layers.push_back(new NeuronLayer(this, paramsDict));
} else if (layerType == "nailbed") {
_layers.push_back(new NailbedLayer(this, paramsDict));
} else if (layerType == "blur") {
_layers.push_back(new GaussianBlurLayer(this, paramsDict));
} else if (layerType == "resize") {
_layers.push_back(new ResizeLayer(this, paramsDict));
} else if (layerType == "rgb2yuv") {
_layers.push_back(new RGBToYUVLayer(this, paramsDict));
} else if (layerType == "rgb2lab") {
_layers.push_back(new RGBToLABLayer(this, paramsDict));
} else if (layerType == "data") {
DataLayer *d = new DataLayer(this, paramsDict);
_layers.push_back(d);
_dataLayers.push_back(d);
} else if (strncmp(layerType.c_str(), "cost.", 5) == 0) {
CostLayer *c = &CostLayer::makeCostLayer(this, layerType, paramsDict);
_layers.push_back(c);
_costs.push_back(c);
} else {
throw string("Unknown layer type ") + layerType;
}
_layers.back()->SetNan2Zero(fix_nan);
return _layers.back();
}
/*
* This executes in a new CPU thread so it's OK to initialize CUDA stuff here.
*/
void ConvNet::initCuda() {
int randomSeed = time(0);
char* randomSeedEnv;
//debug
srand(7);
randomSeed = 7;
hipSetDevice(_deviceID < 0 ? cutGetMaxGflopsDeviceId() : _deviceID);
hipDeviceSetCacheConfig(hipFuncCachePreferShared);
hipblasInit();
randomSeedEnv = getenv("CONVNET_RANDOM_SEED");
if (randomSeedEnv != NULL) {
randomSeed = atoi(randomSeedEnv);
}
NVMatrix::initRandom(randomSeed);
copyToGPU();
}
void* ConvNet::run() {
initCuda();
while (true) {
Worker* worker = _workerQueue.dequeue();
worker->run();
delete worker;
}
return NULL;
}
Queue<Worker*>& ConvNet::getWorkerQueue() {
return _workerQueue;
}
Queue<WorkResult*>& ConvNet::getResultQueue() {
return _resultQueue;
}
DataProvider& ConvNet::getDataProvider() {
return *_dp;
}
Layer& ConvNet::operator[](int idx) {
return *_layers[idx];
}
Layer& ConvNet::getLayer(int idx) {
return *_layers[idx];
}
void ConvNet::copyToCPU() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->copyToCPU();
}
}
void ConvNet::copyToGPU() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->copyToGPU();
}
}
void ConvNet::updateWeights(bool useAux) {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->updateWeights(useAux);
}
}
void ConvNet::rollbackWeights(float reduceScale) {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->rollbackWeights(reduceScale);
}
}
void ConvNet::procAuxWeights() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->procAuxWeights();
}
}
void ConvNet::zeroAuxWeights() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->zeroAuxWeights();
}
}
void ConvNet::reset() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->reset();
}
}
int ConvNet::getNumLayers() {
return _layers.size();
}
double ConvNet::getErrorNum()
{
return _costs[0]->getErrorNum();
}
int ConvNet::getNumCases()
{
return _data->getNumCases();
}
void ConvNet::setEpoch(int epoch) {
_epoch = epoch;
}
int ConvNet::getEpoch() {
return _epoch;
}
void ConvNet::bprop(PASS_TYPE passType) {
for (int i = 0; i < _costs.size(); i++) {
_costs[i]->bprop(passType);
}
reset();
}
void ConvNet::fprop(PASS_TYPE passType) {
assert(_data != NULL);
reset();
for (int i = 0; i < _dataLayers.size(); i++) {
_dataLayers[i]->fprop(_data->getData(), passType);
}
}
void ConvNet::setParam(float eps_scale)
{
for (int i = 0; i < _dataLayers.size(); i++) {
_dataLayers[i]->setParam(eps_scale);
}
}
void ConvNet::fprop(GPUData& data, PASS_TYPE passType) {
if (&data != _data) {
delete _data;
}
_data = &data;
fprop(passType);
}
void ConvNet::fprop(int miniIdx, PASS_TYPE passType) {
delete _data;
_data = &_dp->getMinibatch(miniIdx);
fprop(passType);
}
void ConvNet::fpropRnd(int miniIdx, int prime_ind, PASS_TYPE passType)
{
delete _data;
_data = &_dp->getMinibatchRnd(miniIdx, prime_ind);
fprop(passType);
};
Cost& ConvNet::getCost() {
return *new Cost(_data->getNumCases(), _costs);
}
// Same as getCost() but adds results to given cost and returns it
Cost& ConvNet::getCost(Cost& cost) {
Cost& newCost = getCost();
cost += newCost;
delete &newCost;
return cost;
}
double ConvNet::getCostValue() {
Cost& cost = getCost();
double val = cost.getValue();
delete &cost;
return val;
}
/*
* Gradient checking stuff
*/
void ConvNet::checkGradients() {
_numFailures = 0;
_numTests = 0;
fprop(0, PASS_GC);
_baseErr = getCostValue();
bprop(PASS_GC);
for (vector<Layer*>::iterator it = _layers.begin(); it != _layers.end(); ++it) {
(*it)->checkGradients();
}
cout << "------------------------" << endl;
if (_numFailures > 0) {
cout << _numFailures << "/" << _numTests << " TESTS FAILED" << endl;
} else {
cout << "ALL " << _numTests << " TESTS PASSED" << endl;
}
}
/*
* name: weight matrix name
* eps: finite difference step
*/
bool ConvNet::checkGradient(const string& name, float eps, Weights& weights) {
Matrix numGrad(weights.getNumRows(), weights.getNumCols());
Matrix diff(numGrad);
numGrad.apply(Matrix::ZERO);
Matrix weightsCPU;
weights.getW().copyToHost(weightsCPU, true);
for(int i = 0; i < weights.getNumRows(); i++) {
for (int j = 0; j < weights.getNumCols(); j++) {
float v = weightsCPU(i,j);
weightsCPU(i,j) += eps;
weights.getW().copyFromHost(weightsCPU);
weightsCPU(i,j) = v;
fprop(PASS_GC);
double err = getCostValue();
numGrad(i,j) = (err - _baseErr) / (_data->getNumCases() * eps);
if (isnan_host(numGrad(i,j)) || isinf_host(numGrad(i,j))) {
cout << "Numerical computation produced nan or inf when checking '" << name << "': " << numGrad(i,j) << endl;
cout << "Consider reducing the sizes of the weights or finite difference steps." << endl;
cout << "Exiting." << endl;
exit(1);
}
weights.getW().copyFromHost(weightsCPU);
}
}
Matrix gradCPU;
weights.getGrad().copyToHost(gradCPU, true);
gradCPU.scale(-1.0 / _data->getNumCases());
float analNorm = gradCPU.norm();
float numNorm = numGrad.norm();
numGrad.subtract(gradCPU, diff);
float relErr = diff.norm() / analNorm;
bool fail = relErr >= GC_REL_ERR_THRESH;
if (fail || !GC_SUPPRESS_PASSES) {
cout << "========================" << endl;
printf("(%s) %s GRADIENT CHECK\n", fail ? "****FAIL****" : "PASS", name.c_str());
cout << "========================" << endl;
cout << "Analytic:" << endl;
gradCPU.print(6,4);
cout << "Numeric:" << endl;
numGrad.print(6,4);
printf("Analytic norm: %e\n", analNorm);
printf("Numeric norm: %e\n", numNorm);
printf("Relative error: %e\n", relErr);
}
_numTests++;
_numFailures += fail;
return fail;
}
| ecd6b12e19ea3eecf73701c9a93336ec50955c26.cu | /*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <vector>
#include <iostream>
#include <string>
#include <nvmatrix.cuh>
#include <nvmatrix_operators.cuh>
#include <matrix.h>
#include <convnet.cuh>
#include <util.cuh>
using namespace std;
/*
* =======================
* ConvNet
* =======================
*/
ConvNet::ConvNet(PyListObject* layerParams, int minibatchSize, int deviceID, bool fix_nan) : Thread(false), _deviceID(deviceID), _data(NULL) {
try {
int numLayers = PyList_GET_SIZE(layerParams);
for (int i = 0; i < numLayers; i++) {
PyObject* paramsDict = PyList_GET_ITEM(layerParams, i);
string layerType = pyDictGetString(paramsDict, "type");
Layer* l = initLayer(layerType, paramsDict, fix_nan);
// Connect backward links in graph for this layer
intv* inputLayers = pyDictGetIntV(paramsDict, "inputs");
if (inputLayers != NULL) {
for (int i = 0; i < inputLayers->size(); i++) {
l->addPrev(&getLayer(inputLayers->at(i)));
}
}
delete inputLayers;
}
// Connect the forward links in the graph
for (int i = 0; i < _layers.size(); i++) {
vector<Layer*>& prev = _layers[i]->getPrev();
for (int j = 0; j < prev.size(); j++) {
prev[j]->addNext(_layers[i]);
}
}
// Execute post-initialization stuff
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->postInit();
}
_dp = new DataProvider(minibatchSize);
} catch (string& s) {
cout << "Error creating ConvNet: " << s << endl;
exit(1);
}
}
/*
* Override this in derived classes
*/
Layer* ConvNet::initLayer(string& layerType, PyObject* paramsDict, bool fix_nan) {
if (layerType == "fc") {
_layers.push_back(new FCLayer(this, paramsDict));
} else if (layerType == "conv") {
_layers.push_back(new ConvLayer(this, paramsDict));
} else if (layerType == "local") {
_layers.push_back(new LocalUnsharedLayer(this, paramsDict));
} else if (layerType == "pool") {
_layers.push_back(&PoolLayer::makePoolLayer(this, paramsDict));
} else if (layerType == "rnorm") {
_layers.push_back(new ResponseNormLayer(this, paramsDict));
} else if (layerType == "cmrnorm") {
_layers.push_back(new CrossMapResponseNormLayer(this, paramsDict));
} else if (layerType == "cnorm") {
_layers.push_back(new ContrastNormLayer(this, paramsDict));
} else if (layerType == "softmax") {
_layers.push_back(new SoftmaxLayer(this, paramsDict));
} else if (layerType == "l2svm") {
_layers.push_back(new L2SVMLayer(this, paramsDict));
} else if (layerType == "eltsum") {
_layers.push_back(new EltwiseSumLayer(this, paramsDict));
} else if (layerType == "eltmax") {
_layers.push_back(new EltwiseMaxLayer(this, paramsDict));
} else if (layerType == "eltabsmax") {
_layers.push_back(new EltwiseAbsMaxLayer(this, paramsDict));
} else if (layerType == "mavg") {
_layers.push_back(new MAvgPoolLayer(this, paramsDict));
} else if (layerType == "mmax") {
_layers.push_back(new MMaxPoolLayer(this, paramsDict));
} else if (layerType == "mconv") {
_layers.push_back(new MicroConvLayer(this, paramsDict));
} else if (layerType == "dshrink") {
_layers.push_back(new DShrinkLayer(this, paramsDict));
} else if (layerType == "eltfunc") {
_layers.push_back(new EltwiseFuncLayer(this, paramsDict));
} else if (layerType == "eltdfunc") {
_layers.push_back(new EltwiseDFuncLayer(this, paramsDict));
} else if (layerType == "vfunc") {
_layers.push_back(new VectFuncLayer(this, paramsDict));
} else if (layerType == "neuron") {
_layers.push_back(new NeuronLayer(this, paramsDict));
} else if (layerType == "nailbed") {
_layers.push_back(new NailbedLayer(this, paramsDict));
} else if (layerType == "blur") {
_layers.push_back(new GaussianBlurLayer(this, paramsDict));
} else if (layerType == "resize") {
_layers.push_back(new ResizeLayer(this, paramsDict));
} else if (layerType == "rgb2yuv") {
_layers.push_back(new RGBToYUVLayer(this, paramsDict));
} else if (layerType == "rgb2lab") {
_layers.push_back(new RGBToLABLayer(this, paramsDict));
} else if (layerType == "data") {
DataLayer *d = new DataLayer(this, paramsDict);
_layers.push_back(d);
_dataLayers.push_back(d);
} else if (strncmp(layerType.c_str(), "cost.", 5) == 0) {
CostLayer *c = &CostLayer::makeCostLayer(this, layerType, paramsDict);
_layers.push_back(c);
_costs.push_back(c);
} else {
throw string("Unknown layer type ") + layerType;
}
_layers.back()->SetNan2Zero(fix_nan);
return _layers.back();
}
/*
* This executes in a new CPU thread so it's OK to initialize CUDA stuff here.
*/
void ConvNet::initCuda() {
int randomSeed = time(0);
char* randomSeedEnv;
//debug
srand(7);
randomSeed = 7;
cudaSetDevice(_deviceID < 0 ? cutGetMaxGflopsDeviceId() : _deviceID);
cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
cublasInit();
randomSeedEnv = getenv("CONVNET_RANDOM_SEED");
if (randomSeedEnv != NULL) {
randomSeed = atoi(randomSeedEnv);
}
NVMatrix::initRandom(randomSeed);
copyToGPU();
}
void* ConvNet::run() {
initCuda();
while (true) {
Worker* worker = _workerQueue.dequeue();
worker->run();
delete worker;
}
return NULL;
}
Queue<Worker*>& ConvNet::getWorkerQueue() {
return _workerQueue;
}
Queue<WorkResult*>& ConvNet::getResultQueue() {
return _resultQueue;
}
DataProvider& ConvNet::getDataProvider() {
return *_dp;
}
Layer& ConvNet::operator[](int idx) {
return *_layers[idx];
}
Layer& ConvNet::getLayer(int idx) {
return *_layers[idx];
}
void ConvNet::copyToCPU() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->copyToCPU();
}
}
void ConvNet::copyToGPU() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->copyToGPU();
}
}
void ConvNet::updateWeights(bool useAux) {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->updateWeights(useAux);
}
}
void ConvNet::rollbackWeights(float reduceScale) {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->rollbackWeights(reduceScale);
}
}
void ConvNet::procAuxWeights() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->procAuxWeights();
}
}
void ConvNet::zeroAuxWeights() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->zeroAuxWeights();
}
}
void ConvNet::reset() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->reset();
}
}
int ConvNet::getNumLayers() {
return _layers.size();
}
double ConvNet::getErrorNum()
{
return _costs[0]->getErrorNum();
}
int ConvNet::getNumCases()
{
return _data->getNumCases();
}
void ConvNet::setEpoch(int epoch) {
_epoch = epoch;
}
int ConvNet::getEpoch() {
return _epoch;
}
void ConvNet::bprop(PASS_TYPE passType) {
for (int i = 0; i < _costs.size(); i++) {
_costs[i]->bprop(passType);
}
reset();
}
void ConvNet::fprop(PASS_TYPE passType) {
assert(_data != NULL);
reset();
for (int i = 0; i < _dataLayers.size(); i++) {
_dataLayers[i]->fprop(_data->getData(), passType);
}
}
void ConvNet::setParam(float eps_scale)
{
for (int i = 0; i < _dataLayers.size(); i++) {
_dataLayers[i]->setParam(eps_scale);
}
}
void ConvNet::fprop(GPUData& data, PASS_TYPE passType) {
if (&data != _data) {
delete _data;
}
_data = &data;
fprop(passType);
}
void ConvNet::fprop(int miniIdx, PASS_TYPE passType) {
delete _data;
_data = &_dp->getMinibatch(miniIdx);
fprop(passType);
}
void ConvNet::fpropRnd(int miniIdx, int prime_ind, PASS_TYPE passType)
{
delete _data;
_data = &_dp->getMinibatchRnd(miniIdx, prime_ind);
fprop(passType);
};
Cost& ConvNet::getCost() {
return *new Cost(_data->getNumCases(), _costs);
}
// Same as getCost() but adds results to given cost and returns it
Cost& ConvNet::getCost(Cost& cost) {
Cost& newCost = getCost();
cost += newCost;
delete &newCost;
return cost;
}
double ConvNet::getCostValue() {
Cost& cost = getCost();
double val = cost.getValue();
delete &cost;
return val;
}
/*
* Gradient checking stuff
*/
void ConvNet::checkGradients() {
_numFailures = 0;
_numTests = 0;
fprop(0, PASS_GC);
_baseErr = getCostValue();
bprop(PASS_GC);
for (vector<Layer*>::iterator it = _layers.begin(); it != _layers.end(); ++it) {
(*it)->checkGradients();
}
cout << "------------------------" << endl;
if (_numFailures > 0) {
cout << _numFailures << "/" << _numTests << " TESTS FAILED" << endl;
} else {
cout << "ALL " << _numTests << " TESTS PASSED" << endl;
}
}
/*
* name: weight matrix name
* eps: finite difference step
*/
bool ConvNet::checkGradient(const string& name, float eps, Weights& weights) {
Matrix numGrad(weights.getNumRows(), weights.getNumCols());
Matrix diff(numGrad);
numGrad.apply(Matrix::ZERO);
Matrix weightsCPU;
weights.getW().copyToHost(weightsCPU, true);
for(int i = 0; i < weights.getNumRows(); i++) {
for (int j = 0; j < weights.getNumCols(); j++) {
float v = weightsCPU(i,j);
weightsCPU(i,j) += eps;
weights.getW().copyFromHost(weightsCPU);
weightsCPU(i,j) = v;
fprop(PASS_GC);
double err = getCostValue();
numGrad(i,j) = (err - _baseErr) / (_data->getNumCases() * eps);
if (isnan_host(numGrad(i,j)) || isinf_host(numGrad(i,j))) {
cout << "Numerical computation produced nan or inf when checking '" << name << "': " << numGrad(i,j) << endl;
cout << "Consider reducing the sizes of the weights or finite difference steps." << endl;
cout << "Exiting." << endl;
exit(1);
}
weights.getW().copyFromHost(weightsCPU);
}
}
Matrix gradCPU;
weights.getGrad().copyToHost(gradCPU, true);
gradCPU.scale(-1.0 / _data->getNumCases());
float analNorm = gradCPU.norm();
float numNorm = numGrad.norm();
numGrad.subtract(gradCPU, diff);
float relErr = diff.norm() / analNorm;
bool fail = relErr >= GC_REL_ERR_THRESH;
if (fail || !GC_SUPPRESS_PASSES) {
cout << "========================" << endl;
printf("(%s) %s GRADIENT CHECK\n", fail ? "****FAIL****" : "PASS", name.c_str());
cout << "========================" << endl;
cout << "Analytic:" << endl;
gradCPU.print(6,4);
cout << "Numeric:" << endl;
numGrad.print(6,4);
printf("Analytic norm: %e\n", analNorm);
printf("Numeric norm: %e\n", numNorm);
printf("Relative error: %e\n", relErr);
}
_numTests++;
_numFailures += fail;
return fail;
}
|
165319e361f56b3e76252554a0a7d801c670a3d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <algorithm>
#include <list>
#include <queue>
#include <tuple>
#include "utils/float_math.cuh"
#include "utils/geometry_utils.cuh"
#include "utils/warp_reduce.cuh"
// ****************************************************************************
// * PointEdgeDistance *
// ****************************************************************************
__global__ void PointEdgeForwardKernel(
const float* __restrict__ points, // (P, 3)
const int64_t* __restrict__ points_first_idx, // (B,)
const float* __restrict__ segms, // (S, 2, 3)
const int64_t* __restrict__ segms_first_idx, // (B,)
float* __restrict__ dist_points, // (P,)
int64_t* __restrict__ idx_points, // (P,)
const size_t B,
const size_t P,
const size_t S) {
float3* points_f3 = (float3*)points;
float3* segms_f3 = (float3*)segms;
// Single shared memory buffer which is split and cast to different types.
extern __shared__ char shared_buf[];
float* min_dists = (float*)shared_buf; // float[NUM_THREADS]
int64_t* min_idxs = (int64_t*)&min_dists[blockDim.x]; // int64_t[NUM_THREADS]
const size_t batch_idx = blockIdx.y; // index of batch element.
// start and end for points in batch
const int64_t startp = points_first_idx[batch_idx];
const int64_t endp = batch_idx + 1 < B ? points_first_idx[batch_idx + 1] : P;
// start and end for segments in batch_idx
const int64_t starts = segms_first_idx[batch_idx];
const int64_t ends = batch_idx + 1 < B ? segms_first_idx[batch_idx + 1] : S;
const size_t i = blockIdx.x; // index of point within batch element.
const size_t tid = threadIdx.x; // thread idx
// Each block will compute one element of the output idx_points[startp + i],
// dist_points[startp + i]. Within the block we will use threads to compute
// the distances between points[startp + i] and segms[j] for all j belonging
// in the same batch as i, i.e. j in [starts, ends]. Then use a block
// reduction to take an argmin of the distances.
// If i exceeds the number of points in batch_idx, then do nothing
if (i < (endp - startp)) {
// Retrieve (startp + i) point
const float3 p_f3 = points_f3[startp + i];
// Compute the distances between points[startp + i] and segms[j] for
// all j belonging in the same batch as i, i.e. j in [starts, ends].
// Here each thread will reduce over (ends-starts) / blockDim.x in serial,
// and store its result to shared memory
float min_dist = FLT_MAX;
size_t min_idx = 0;
for (size_t j = tid; j < (ends - starts); j += blockDim.x) {
const float3 v0 = segms_f3[(starts + j) * 2 + 0];
const float3 v1 = segms_f3[(starts + j) * 2 + 1];
float dist = PointLine3DistanceForward(p_f3, v0, v1);
min_dist = (j == tid) ? dist : min_dist;
min_idx = (dist <= min_dist) ? (starts + j) : min_idx;
min_dist = (dist <= min_dist) ? dist : min_dist;
}
min_dists[tid] = min_dist;
min_idxs[tid] = min_idx;
__syncthreads();
// Perform reduction in shared memory.
for (int s = blockDim.x / 2; s > 32; s >>= 1) {
if (tid < s) {
if (min_dists[tid] > min_dists[tid + s]) {
min_dists[tid] = min_dists[tid + s];
min_idxs[tid] = min_idxs[tid + s];
}
}
__syncthreads();
}
// Unroll the last 6 iterations of the loop since they will happen
// synchronized within a single warp.
if (tid < 32)
WarpReduce<float>(min_dists, min_idxs, tid);
// Finally thread 0 writes the result to the output buffer.
if (tid == 0) {
idx_points[startp + i] = min_idxs[0];
dist_points[startp + i] = min_dists[0];
}
}
}
std::tuple<at::Tensor, at::Tensor> PointEdgeDistanceForwardCuda(
const at::Tensor& points,
const at::Tensor& points_first_idx,
const at::Tensor& segms,
const at::Tensor& segms_first_idx,
const int64_t max_points) {
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1},
points_first_idx_t{points_first_idx, "points_first_idx", 2},
segms_t{segms, "segms", 3},
segms_first_idx_t{segms_first_idx, "segms_first_idx", 4};
at::CheckedFrom c = "PointEdgeDistanceForwardCuda";
at::checkAllSameGPU(
c, {points_t, points_first_idx_t, segms_t, segms_first_idx_t});
at::checkAllSameType(c, {points_t, segms_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(points.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int64_t P = points.size(0);
const int64_t S = segms.size(0);
const int64_t B = points_first_idx.size(0);
TORCH_CHECK(points.size(1) == 3, "points must be of shape Px3");
TORCH_CHECK(
(segms.size(1) == 2) && (segms.size(2) == 3),
"segms must be of shape Sx2x3");
TORCH_CHECK(segms_first_idx.size(0) == B);
// clang-format off
at::Tensor dists = at::zeros({P,}, points.options());
at::Tensor idxs = at::zeros({P,}, points_first_idx.options());
// clang-format on
if (dists.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(dists, idxs);
}
const int threads = 128;
const dim3 blocks(max_points, B);
size_t shared_size = threads * sizeof(size_t) + threads * sizeof(int64_t);
hipLaunchKernelGGL(( PointEdgeForwardKernel), dim3(blocks), dim3(threads), shared_size, stream,
points.contiguous().data_ptr<float>(),
points_first_idx.contiguous().data_ptr<int64_t>(),
segms.contiguous().data_ptr<float>(),
segms_first_idx.contiguous().data_ptr<int64_t>(),
dists.data_ptr<float>(),
idxs.data_ptr<int64_t>(),
B,
P,
S);
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(dists, idxs);
}
__global__ void PointEdgeBackwardKernel(
const float* __restrict__ points, // (P, 3)
const float* __restrict__ segms, // (S, 2, 3)
const int64_t* __restrict__ idx_points, // (P,)
const float* __restrict__ grad_dists, // (P,)
float* __restrict__ grad_points, // (P, 3)
float* __restrict__ grad_segms, // (S, 2, 3)
const size_t P) {
float3* points_f3 = (float3*)points;
float3* segms_f3 = (float3*)segms;
const size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
const size_t stride = gridDim.x * blockDim.x;
for (size_t p = tid; p < P; p += stride) {
const float3 p_f3 = points_f3[p];
const int64_t sidx = idx_points[p];
const float3 v0 = segms_f3[sidx * 2 + 0];
const float3 v1 = segms_f3[sidx * 2 + 1];
const float grad_dist = grad_dists[p];
const auto grads = PointLine3DistanceBackward(p_f3, v0, v1, grad_dist);
const float3 grad_point = thrust::get<0>(grads);
const float3 grad_v0 = thrust::get<1>(grads);
const float3 grad_v1 = thrust::get<2>(grads);
atomicAdd(grad_points + p * 3 + 0, grad_point.x);
atomicAdd(grad_points + p * 3 + 1, grad_point.y);
atomicAdd(grad_points + p * 3 + 2, grad_point.z);
atomicAdd(grad_segms + sidx * 2 * 3 + 0 * 3 + 0, grad_v0.x);
atomicAdd(grad_segms + sidx * 2 * 3 + 0 * 3 + 1, grad_v0.y);
atomicAdd(grad_segms + sidx * 2 * 3 + 0 * 3 + 2, grad_v0.z);
atomicAdd(grad_segms + sidx * 2 * 3 + 1 * 3 + 0, grad_v1.x);
atomicAdd(grad_segms + sidx * 2 * 3 + 1 * 3 + 1, grad_v1.y);
atomicAdd(grad_segms + sidx * 2 * 3 + 1 * 3 + 2, grad_v1.z);
}
}
std::tuple<at::Tensor, at::Tensor> PointEdgeDistanceBackwardCuda(
const at::Tensor& points,
const at::Tensor& segms,
const at::Tensor& idx_points,
const at::Tensor& grad_dists) {
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1},
idx_points_t{idx_points, "idx_points", 2}, segms_t{segms, "segms", 3},
grad_dists_t{grad_dists, "grad_dists", 4};
at::CheckedFrom c = "PointEdgeDistanceBackwardCuda";
at::checkAllSameGPU(c, {points_t, idx_points_t, segms_t, grad_dists_t});
at::checkAllSameType(c, {points_t, segms_t, grad_dists_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(points.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int64_t P = points.size(0);
const int64_t S = segms.size(0);
TORCH_CHECK(points.size(1) == 3, "points must be of shape Px3");
TORCH_CHECK(
(segms.size(1) == 2) && (segms.size(2) == 3),
"segms must be of shape Sx2x3");
TORCH_CHECK(idx_points.size(0) == P);
TORCH_CHECK(grad_dists.size(0) == P);
// clang-format off
at::Tensor grad_points = at::zeros({P, 3}, points.options());
at::Tensor grad_segms = at::zeros({S, 2, 3}, segms.options());
// clang-format on
if (grad_points.numel() == 0 || grad_segms.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(grad_points, grad_segms);
}
const int blocks = 64;
const int threads = 512;
hipLaunchKernelGGL(( PointEdgeBackwardKernel), dim3(blocks), dim3(threads), 0, stream,
points.contiguous().data_ptr<float>(),
segms.contiguous().data_ptr<float>(),
idx_points.contiguous().data_ptr<int64_t>(),
grad_dists.contiguous().data_ptr<float>(),
grad_points.data_ptr<float>(),
grad_segms.data_ptr<float>(),
P);
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(grad_points, grad_segms);
}
// ****************************************************************************
// * EdgePointDistance *
// ****************************************************************************
__global__ void EdgePointForwardKernel(
const float* __restrict__ points, // (P, 3)
const int64_t* __restrict__ points_first_idx, // (B,)
const float* __restrict__ segms, // (S, 2, 3)
const int64_t* __restrict__ segms_first_idx, // (B,)
float* __restrict__ dist_segms, // (S,)
int64_t* __restrict__ idx_segms, // (S,)
const size_t B,
const size_t P,
const size_t S) {
float3* points_f3 = (float3*)points;
float3* segms_f3 = (float3*)segms;
// Single shared memory buffer which is split and cast to different types.
extern __shared__ char shared_buf[];
float* min_dists = (float*)shared_buf; // float[NUM_THREADS]
int64_t* min_idxs = (int64_t*)&min_dists[blockDim.x]; // int64_t[NUM_THREADS]
const size_t batch_idx = blockIdx.y; // index of batch element.
// start and end for points in batch_idx
const int64_t startp = points_first_idx[batch_idx];
const int64_t endp = batch_idx + 1 < B ? points_first_idx[batch_idx + 1] : P;
// start and end for segms in batch_idx
const int64_t starts = segms_first_idx[batch_idx];
const int64_t ends = batch_idx + 1 < B ? segms_first_idx[batch_idx + 1] : S;
const size_t i = blockIdx.x; // index of point within batch element.
const size_t tid = threadIdx.x; // thread index
// Each block will compute one element of the output idx_segms[starts + i],
// dist_segms[starts + i]. Within the block we will use threads to compute
// the distances between segms[starts + i] and points[j] for all j belonging
// in the same batch as i, i.e. j in [startp, endp]. Then use a block
// reduction to take an argmin of the distances.
// If i exceeds the number of segms in batch_idx, then do nothing
if (i < (ends - starts)) {
const float3 v0 = segms_f3[(starts + i) * 2 + 0];
const float3 v1 = segms_f3[(starts + i) * 2 + 1];
// Compute the distances between segms[starts + i] and points[j] for
// all j belonging in the same batch as i, i.e. j in [startp, endp].
// Here each thread will reduce over (endp-startp) / blockDim.x in serial,
// and store its result to shared memory
float min_dist = FLT_MAX;
size_t min_idx = 0;
for (size_t j = tid; j < (endp - startp); j += blockDim.x) {
// Retrieve (startp + i) point
const float3 p_f3 = points_f3[startp + j];
float dist = PointLine3DistanceForward(p_f3, v0, v1);
min_dist = (j == tid) ? dist : min_dist;
min_idx = (dist <= min_dist) ? (startp + j) : min_idx;
min_dist = (dist <= min_dist) ? dist : min_dist;
}
min_dists[tid] = min_dist;
min_idxs[tid] = min_idx;
__syncthreads();
// Perform reduction in shared memory.
for (int s = blockDim.x / 2; s > 32; s >>= 1) {
if (tid < s) {
if (min_dists[tid] > min_dists[tid + s]) {
min_dists[tid] = min_dists[tid + s];
min_idxs[tid] = min_idxs[tid + s];
}
}
__syncthreads();
}
// Unroll the last 6 iterations of the loop since they will happen
// synchronized within a single warp.
if (tid < 32)
WarpReduce<float>(min_dists, min_idxs, tid);
// Finally thread 0 writes the result to the output buffer.
if (tid == 0) {
idx_segms[starts + i] = min_idxs[0];
dist_segms[starts + i] = min_dists[0];
}
}
}
std::tuple<at::Tensor, at::Tensor> EdgePointDistanceForwardCuda(
const at::Tensor& points,
const at::Tensor& points_first_idx,
const at::Tensor& segms,
const at::Tensor& segms_first_idx,
const int64_t max_segms) {
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1},
points_first_idx_t{points_first_idx, "points_first_idx", 2},
segms_t{segms, "segms", 3},
segms_first_idx_t{segms_first_idx, "segms_first_idx", 4};
at::CheckedFrom c = "EdgePointDistanceForwardCuda";
at::checkAllSameGPU(
c, {points_t, points_first_idx_t, segms_t, segms_first_idx_t});
at::checkAllSameType(c, {points_t, segms_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(points.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int64_t P = points.size(0);
const int64_t S = segms.size(0);
const int64_t B = points_first_idx.size(0);
TORCH_CHECK(points.size(1) == 3, "points must be of shape Px3");
TORCH_CHECK(
(segms.size(1) == 2) && (segms.size(2) == 3),
"segms must be of shape Sx2x3");
TORCH_CHECK(segms_first_idx.size(0) == B);
// clang-format off
at::Tensor dists = at::zeros({S,}, segms.options());
at::Tensor idxs = at::zeros({S,}, segms_first_idx.options());
// clang-format on
if (dists.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(dists, idxs);
}
const int threads = 128;
const dim3 blocks(max_segms, B);
size_t shared_size = threads * sizeof(size_t) + threads * sizeof(int64_t);
hipLaunchKernelGGL(( EdgePointForwardKernel), dim3(blocks), dim3(threads), shared_size, stream,
points.contiguous().data_ptr<float>(),
points_first_idx.contiguous().data_ptr<int64_t>(),
segms.contiguous().data_ptr<float>(),
segms_first_idx.contiguous().data_ptr<int64_t>(),
dists.data_ptr<float>(),
idxs.data_ptr<int64_t>(),
B,
P,
S);
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(dists, idxs);
}
__global__ void EdgePointBackwardKernel(
const float* __restrict__ points, // (P, 3)
const float* __restrict__ segms, // (S, 2, 3)
const int64_t* __restrict__ idx_segms, // (S,)
const float* __restrict__ grad_dists, // (S,)
float* __restrict__ grad_points, // (P, 3)
float* __restrict__ grad_segms, // (S, 2, 3)
const size_t S) {
float3* points_f3 = (float3*)points;
float3* segms_f3 = (float3*)segms;
const size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
const size_t stride = gridDim.x * blockDim.x;
for (size_t s = tid; s < S; s += stride) {
const float3 v0 = segms_f3[s * 2 + 0];
const float3 v1 = segms_f3[s * 2 + 1];
const int64_t pidx = idx_segms[s];
const float3 p_f3 = points_f3[pidx];
const float grad_dist = grad_dists[s];
const auto grads = PointLine3DistanceBackward(p_f3, v0, v1, grad_dist);
const float3 grad_point = thrust::get<0>(grads);
const float3 grad_v0 = thrust::get<1>(grads);
const float3 grad_v1 = thrust::get<2>(grads);
atomicAdd(grad_points + pidx * 3 + 0, grad_point.x);
atomicAdd(grad_points + pidx * 3 + 1, grad_point.y);
atomicAdd(grad_points + pidx * 3 + 2, grad_point.z);
atomicAdd(grad_segms + s * 2 * 3 + 0 * 3 + 0, grad_v0.x);
atomicAdd(grad_segms + s * 2 * 3 + 0 * 3 + 1, grad_v0.y);
atomicAdd(grad_segms + s * 2 * 3 + 0 * 3 + 2, grad_v0.z);
atomicAdd(grad_segms + s * 2 * 3 + 1 * 3 + 0, grad_v1.x);
atomicAdd(grad_segms + s * 2 * 3 + 1 * 3 + 1, grad_v1.y);
atomicAdd(grad_segms + s * 2 * 3 + 1 * 3 + 2, grad_v1.z);
}
}
std::tuple<at::Tensor, at::Tensor> EdgePointDistanceBackwardCuda(
const at::Tensor& points,
const at::Tensor& segms,
const at::Tensor& idx_segms,
const at::Tensor& grad_dists) {
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1},
idx_segms_t{idx_segms, "idx_segms", 2}, segms_t{segms, "segms", 3},
grad_dists_t{grad_dists, "grad_dists", 4};
at::CheckedFrom c = "PointEdgeDistanceBackwardCuda";
at::checkAllSameGPU(c, {points_t, idx_segms_t, segms_t, grad_dists_t});
at::checkAllSameType(c, {points_t, segms_t, grad_dists_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(points.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int64_t P = points.size(0);
const int64_t S = segms.size(0);
TORCH_CHECK(points.size(1) == 3, "points must be of shape Px3");
TORCH_CHECK(
(segms.size(1) == 2) && (segms.size(2) == 3),
"segms must be of shape Sx2x3");
TORCH_CHECK(idx_segms.size(0) == S);
TORCH_CHECK(grad_dists.size(0) == S);
// clang-format off
at::Tensor grad_points = at::zeros({P, 3}, points.options());
at::Tensor grad_segms = at::zeros({S, 2, 3}, segms.options());
// clang-format on
const int blocks = 64;
const int threads = 512;
hipLaunchKernelGGL(( EdgePointBackwardKernel), dim3(blocks), dim3(threads), 0, stream,
points.contiguous().data_ptr<float>(),
segms.contiguous().data_ptr<float>(),
idx_segms.contiguous().data_ptr<int64_t>(),
grad_dists.contiguous().data_ptr<float>(),
grad_points.data_ptr<float>(),
grad_segms.data_ptr<float>(),
S);
return std::make_tuple(grad_points, grad_segms);
}
// ****************************************************************************
// * PointEdgeArrayDistance *
// ****************************************************************************
__global__ void PointEdgeArrayForwardKernel(
const float* __restrict__ points, // (P, 3)
const float* __restrict__ segms, // (S, 2, 3)
float* __restrict__ dists, // (P, S)
const size_t P,
const size_t S) {
float3* points_f3 = (float3*)points;
float3* segms_f3 = (float3*)segms;
// Parallelize over P * S computations
const int num_threads = gridDim.x * blockDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int t_i = tid; t_i < P * S; t_i += num_threads) {
const int s = t_i / P; // segment index.
const int p = t_i % P; // point index
float3 a = segms_f3[s * 2 + 0];
float3 b = segms_f3[s * 2 + 1];
float3 point = points_f3[p];
float dist = PointLine3DistanceForward(point, a, b);
dists[p * S + s] = dist;
}
}
at::Tensor PointEdgeArrayDistanceForwardCuda(
const at::Tensor& points,
const at::Tensor& segms) {
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1}, segms_t{segms, "segms", 2};
at::CheckedFrom c = "PointEdgeArrayDistanceForwardCuda";
at::checkAllSameGPU(c, {points_t, segms_t});
at::checkAllSameType(c, {points_t, segms_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(points.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int64_t P = points.size(0);
const int64_t S = segms.size(0);
TORCH_CHECK(points.size(1) == 3, "points must be of shape Px3");
TORCH_CHECK(
(segms.size(1) == 2) && (segms.size(2) == 3),
"segms must be of shape Sx2x3");
at::Tensor dists = at::zeros({P, S}, points.options());
if (dists.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return dists;
}
const size_t blocks = 1024;
const size_t threads = 64;
hipLaunchKernelGGL(( PointEdgeArrayForwardKernel), dim3(blocks), dim3(threads), 0, stream,
points.contiguous().data_ptr<float>(),
segms.contiguous().data_ptr<float>(),
dists.data_ptr<float>(),
P,
S);
AT_CUDA_CHECK(hipGetLastError());
return dists;
}
__global__ void PointEdgeArrayBackwardKernel(
const float* __restrict__ points, // (P, 3)
const float* __restrict__ segms, // (S, 2, 3)
const float* __restrict__ grad_dists, // (P, S)
float* __restrict__ grad_points, // (P, 3)
float* __restrict__ grad_segms, // (S, 2, 3)
const size_t P,
const size_t S) {
float3* points_f3 = (float3*)points;
float3* segms_f3 = (float3*)segms;
// Parallelize over P * S computations
const int num_threads = gridDim.x * blockDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int t_i = tid; t_i < P * S; t_i += num_threads) {
const int s = t_i / P; // segment index.
const int p = t_i % P; // point index
const float3 a = segms_f3[s * 2 + 0];
const float3 b = segms_f3[s * 2 + 1];
const float3 point = points_f3[p];
const float grad_dist = grad_dists[p * S + s];
const auto grads = PointLine3DistanceBackward(point, a, b, grad_dist);
const float3 grad_point = thrust::get<0>(grads);
const float3 grad_a = thrust::get<1>(grads);
const float3 grad_b = thrust::get<2>(grads);
atomicAdd(grad_points + p * 3 + 0, grad_point.x);
atomicAdd(grad_points + p * 3 + 1, grad_point.y);
atomicAdd(grad_points + p * 3 + 2, grad_point.z);
atomicAdd(grad_segms + s * 2 * 3 + 0 * 3 + 0, grad_a.x);
atomicAdd(grad_segms + s * 2 * 3 + 0 * 3 + 1, grad_a.y);
atomicAdd(grad_segms + s * 2 * 3 + 0 * 3 + 2, grad_a.z);
atomicAdd(grad_segms + s * 2 * 3 + 1 * 3 + 0, grad_b.x);
atomicAdd(grad_segms + s * 2 * 3 + 1 * 3 + 1, grad_b.y);
atomicAdd(grad_segms + s * 2 * 3 + 1 * 3 + 2, grad_b.z);
}
}
std::tuple<at::Tensor, at::Tensor> PointEdgeArrayDistanceBackwardCuda(
const at::Tensor& points,
const at::Tensor& segms,
const at::Tensor& grad_dists) {
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1}, segms_t{segms, "segms", 2},
grad_dists_t{grad_dists, "grad_dists", 3};
at::CheckedFrom c = "PointEdgeArrayDistanceBackwardCuda";
at::checkAllSameGPU(c, {points_t, segms_t, grad_dists_t});
at::checkAllSameType(c, {points_t, segms_t, grad_dists_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(points.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int64_t P = points.size(0);
const int64_t S = segms.size(0);
TORCH_CHECK(points.size(1) == 3, "points must be of shape Px3");
TORCH_CHECK(
(segms.size(1) == 2) && (segms.size(2) == 3),
"segms must be of shape Sx2x3");
TORCH_CHECK((grad_dists.size(0) == P) && (grad_dists.size(1) == S));
at::Tensor grad_points = at::zeros({P, 3}, points.options());
at::Tensor grad_segms = at::zeros({S, 2, 3}, segms.options());
if (grad_points.numel() == 0 || grad_segms.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(grad_points, grad_segms);
}
const size_t blocks = 1024;
const size_t threads = 64;
hipLaunchKernelGGL(( PointEdgeArrayBackwardKernel), dim3(blocks), dim3(threads), 0, stream,
points.contiguous().data_ptr<float>(),
segms.contiguous().data_ptr<float>(),
grad_dists.contiguous().data_ptr<float>(),
grad_points.data_ptr<float>(),
grad_segms.data_ptr<float>(),
P,
S);
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(grad_points, grad_segms);
}
| 165319e361f56b3e76252554a0a7d801c670a3d0.cu | // Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <algorithm>
#include <list>
#include <queue>
#include <tuple>
#include "utils/float_math.cuh"
#include "utils/geometry_utils.cuh"
#include "utils/warp_reduce.cuh"
// ****************************************************************************
// * PointEdgeDistance *
// ****************************************************************************
__global__ void PointEdgeForwardKernel(
const float* __restrict__ points, // (P, 3)
const int64_t* __restrict__ points_first_idx, // (B,)
const float* __restrict__ segms, // (S, 2, 3)
const int64_t* __restrict__ segms_first_idx, // (B,)
float* __restrict__ dist_points, // (P,)
int64_t* __restrict__ idx_points, // (P,)
const size_t B,
const size_t P,
const size_t S) {
float3* points_f3 = (float3*)points;
float3* segms_f3 = (float3*)segms;
// Single shared memory buffer which is split and cast to different types.
extern __shared__ char shared_buf[];
float* min_dists = (float*)shared_buf; // float[NUM_THREADS]
int64_t* min_idxs = (int64_t*)&min_dists[blockDim.x]; // int64_t[NUM_THREADS]
const size_t batch_idx = blockIdx.y; // index of batch element.
// start and end for points in batch
const int64_t startp = points_first_idx[batch_idx];
const int64_t endp = batch_idx + 1 < B ? points_first_idx[batch_idx + 1] : P;
// start and end for segments in batch_idx
const int64_t starts = segms_first_idx[batch_idx];
const int64_t ends = batch_idx + 1 < B ? segms_first_idx[batch_idx + 1] : S;
const size_t i = blockIdx.x; // index of point within batch element.
const size_t tid = threadIdx.x; // thread idx
// Each block will compute one element of the output idx_points[startp + i],
// dist_points[startp + i]. Within the block we will use threads to compute
// the distances between points[startp + i] and segms[j] for all j belonging
// in the same batch as i, i.e. j in [starts, ends]. Then use a block
// reduction to take an argmin of the distances.
// If i exceeds the number of points in batch_idx, then do nothing
if (i < (endp - startp)) {
// Retrieve (startp + i) point
const float3 p_f3 = points_f3[startp + i];
// Compute the distances between points[startp + i] and segms[j] for
// all j belonging in the same batch as i, i.e. j in [starts, ends].
// Here each thread will reduce over (ends-starts) / blockDim.x in serial,
// and store its result to shared memory
float min_dist = FLT_MAX;
size_t min_idx = 0;
for (size_t j = tid; j < (ends - starts); j += blockDim.x) {
const float3 v0 = segms_f3[(starts + j) * 2 + 0];
const float3 v1 = segms_f3[(starts + j) * 2 + 1];
float dist = PointLine3DistanceForward(p_f3, v0, v1);
min_dist = (j == tid) ? dist : min_dist;
min_idx = (dist <= min_dist) ? (starts + j) : min_idx;
min_dist = (dist <= min_dist) ? dist : min_dist;
}
min_dists[tid] = min_dist;
min_idxs[tid] = min_idx;
__syncthreads();
// Perform reduction in shared memory.
for (int s = blockDim.x / 2; s > 32; s >>= 1) {
if (tid < s) {
if (min_dists[tid] > min_dists[tid + s]) {
min_dists[tid] = min_dists[tid + s];
min_idxs[tid] = min_idxs[tid + s];
}
}
__syncthreads();
}
// Unroll the last 6 iterations of the loop since they will happen
// synchronized within a single warp.
if (tid < 32)
WarpReduce<float>(min_dists, min_idxs, tid);
// Finally thread 0 writes the result to the output buffer.
if (tid == 0) {
idx_points[startp + i] = min_idxs[0];
dist_points[startp + i] = min_dists[0];
}
}
}
std::tuple<at::Tensor, at::Tensor> PointEdgeDistanceForwardCuda(
const at::Tensor& points,
const at::Tensor& points_first_idx,
const at::Tensor& segms,
const at::Tensor& segms_first_idx,
const int64_t max_points) {
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1},
points_first_idx_t{points_first_idx, "points_first_idx", 2},
segms_t{segms, "segms", 3},
segms_first_idx_t{segms_first_idx, "segms_first_idx", 4};
at::CheckedFrom c = "PointEdgeDistanceForwardCuda";
at::checkAllSameGPU(
c, {points_t, points_first_idx_t, segms_t, segms_first_idx_t});
at::checkAllSameType(c, {points_t, segms_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(points.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int64_t P = points.size(0);
const int64_t S = segms.size(0);
const int64_t B = points_first_idx.size(0);
TORCH_CHECK(points.size(1) == 3, "points must be of shape Px3");
TORCH_CHECK(
(segms.size(1) == 2) && (segms.size(2) == 3),
"segms must be of shape Sx2x3");
TORCH_CHECK(segms_first_idx.size(0) == B);
// clang-format off
at::Tensor dists = at::zeros({P,}, points.options());
at::Tensor idxs = at::zeros({P,}, points_first_idx.options());
// clang-format on
if (dists.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(dists, idxs);
}
const int threads = 128;
const dim3 blocks(max_points, B);
size_t shared_size = threads * sizeof(size_t) + threads * sizeof(int64_t);
PointEdgeForwardKernel<<<blocks, threads, shared_size, stream>>>(
points.contiguous().data_ptr<float>(),
points_first_idx.contiguous().data_ptr<int64_t>(),
segms.contiguous().data_ptr<float>(),
segms_first_idx.contiguous().data_ptr<int64_t>(),
dists.data_ptr<float>(),
idxs.data_ptr<int64_t>(),
B,
P,
S);
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(dists, idxs);
}
__global__ void PointEdgeBackwardKernel(
const float* __restrict__ points, // (P, 3)
const float* __restrict__ segms, // (S, 2, 3)
const int64_t* __restrict__ idx_points, // (P,)
const float* __restrict__ grad_dists, // (P,)
float* __restrict__ grad_points, // (P, 3)
float* __restrict__ grad_segms, // (S, 2, 3)
const size_t P) {
float3* points_f3 = (float3*)points;
float3* segms_f3 = (float3*)segms;
const size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
const size_t stride = gridDim.x * blockDim.x;
for (size_t p = tid; p < P; p += stride) {
const float3 p_f3 = points_f3[p];
const int64_t sidx = idx_points[p];
const float3 v0 = segms_f3[sidx * 2 + 0];
const float3 v1 = segms_f3[sidx * 2 + 1];
const float grad_dist = grad_dists[p];
const auto grads = PointLine3DistanceBackward(p_f3, v0, v1, grad_dist);
const float3 grad_point = thrust::get<0>(grads);
const float3 grad_v0 = thrust::get<1>(grads);
const float3 grad_v1 = thrust::get<2>(grads);
atomicAdd(grad_points + p * 3 + 0, grad_point.x);
atomicAdd(grad_points + p * 3 + 1, grad_point.y);
atomicAdd(grad_points + p * 3 + 2, grad_point.z);
atomicAdd(grad_segms + sidx * 2 * 3 + 0 * 3 + 0, grad_v0.x);
atomicAdd(grad_segms + sidx * 2 * 3 + 0 * 3 + 1, grad_v0.y);
atomicAdd(grad_segms + sidx * 2 * 3 + 0 * 3 + 2, grad_v0.z);
atomicAdd(grad_segms + sidx * 2 * 3 + 1 * 3 + 0, grad_v1.x);
atomicAdd(grad_segms + sidx * 2 * 3 + 1 * 3 + 1, grad_v1.y);
atomicAdd(grad_segms + sidx * 2 * 3 + 1 * 3 + 2, grad_v1.z);
}
}
std::tuple<at::Tensor, at::Tensor> PointEdgeDistanceBackwardCuda(
const at::Tensor& points,
const at::Tensor& segms,
const at::Tensor& idx_points,
const at::Tensor& grad_dists) {
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1},
idx_points_t{idx_points, "idx_points", 2}, segms_t{segms, "segms", 3},
grad_dists_t{grad_dists, "grad_dists", 4};
at::CheckedFrom c = "PointEdgeDistanceBackwardCuda";
at::checkAllSameGPU(c, {points_t, idx_points_t, segms_t, grad_dists_t});
at::checkAllSameType(c, {points_t, segms_t, grad_dists_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(points.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int64_t P = points.size(0);
const int64_t S = segms.size(0);
TORCH_CHECK(points.size(1) == 3, "points must be of shape Px3");
TORCH_CHECK(
(segms.size(1) == 2) && (segms.size(2) == 3),
"segms must be of shape Sx2x3");
TORCH_CHECK(idx_points.size(0) == P);
TORCH_CHECK(grad_dists.size(0) == P);
// clang-format off
at::Tensor grad_points = at::zeros({P, 3}, points.options());
at::Tensor grad_segms = at::zeros({S, 2, 3}, segms.options());
// clang-format on
if (grad_points.numel() == 0 || grad_segms.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(grad_points, grad_segms);
}
const int blocks = 64;
const int threads = 512;
PointEdgeBackwardKernel<<<blocks, threads, 0, stream>>>(
points.contiguous().data_ptr<float>(),
segms.contiguous().data_ptr<float>(),
idx_points.contiguous().data_ptr<int64_t>(),
grad_dists.contiguous().data_ptr<float>(),
grad_points.data_ptr<float>(),
grad_segms.data_ptr<float>(),
P);
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(grad_points, grad_segms);
}
// ****************************************************************************
// * EdgePointDistance *
// ****************************************************************************
__global__ void EdgePointForwardKernel(
const float* __restrict__ points, // (P, 3)
const int64_t* __restrict__ points_first_idx, // (B,)
const float* __restrict__ segms, // (S, 2, 3)
const int64_t* __restrict__ segms_first_idx, // (B,)
float* __restrict__ dist_segms, // (S,)
int64_t* __restrict__ idx_segms, // (S,)
const size_t B,
const size_t P,
const size_t S) {
float3* points_f3 = (float3*)points;
float3* segms_f3 = (float3*)segms;
// Single shared memory buffer which is split and cast to different types.
extern __shared__ char shared_buf[];
float* min_dists = (float*)shared_buf; // float[NUM_THREADS]
int64_t* min_idxs = (int64_t*)&min_dists[blockDim.x]; // int64_t[NUM_THREADS]
const size_t batch_idx = blockIdx.y; // index of batch element.
// start and end for points in batch_idx
const int64_t startp = points_first_idx[batch_idx];
const int64_t endp = batch_idx + 1 < B ? points_first_idx[batch_idx + 1] : P;
// start and end for segms in batch_idx
const int64_t starts = segms_first_idx[batch_idx];
const int64_t ends = batch_idx + 1 < B ? segms_first_idx[batch_idx + 1] : S;
const size_t i = blockIdx.x; // index of point within batch element.
const size_t tid = threadIdx.x; // thread index
// Each block will compute one element of the output idx_segms[starts + i],
// dist_segms[starts + i]. Within the block we will use threads to compute
// the distances between segms[starts + i] and points[j] for all j belonging
// in the same batch as i, i.e. j in [startp, endp]. Then use a block
// reduction to take an argmin of the distances.
// If i exceeds the number of segms in batch_idx, then do nothing
if (i < (ends - starts)) {
const float3 v0 = segms_f3[(starts + i) * 2 + 0];
const float3 v1 = segms_f3[(starts + i) * 2 + 1];
// Compute the distances between segms[starts + i] and points[j] for
// all j belonging in the same batch as i, i.e. j in [startp, endp].
// Here each thread will reduce over (endp-startp) / blockDim.x in serial,
// and store its result to shared memory
float min_dist = FLT_MAX;
size_t min_idx = 0;
for (size_t j = tid; j < (endp - startp); j += blockDim.x) {
// Retrieve (startp + i) point
const float3 p_f3 = points_f3[startp + j];
float dist = PointLine3DistanceForward(p_f3, v0, v1);
min_dist = (j == tid) ? dist : min_dist;
min_idx = (dist <= min_dist) ? (startp + j) : min_idx;
min_dist = (dist <= min_dist) ? dist : min_dist;
}
min_dists[tid] = min_dist;
min_idxs[tid] = min_idx;
__syncthreads();
// Perform reduction in shared memory.
for (int s = blockDim.x / 2; s > 32; s >>= 1) {
if (tid < s) {
if (min_dists[tid] > min_dists[tid + s]) {
min_dists[tid] = min_dists[tid + s];
min_idxs[tid] = min_idxs[tid + s];
}
}
__syncthreads();
}
// Unroll the last 6 iterations of the loop since they will happen
// synchronized within a single warp.
if (tid < 32)
WarpReduce<float>(min_dists, min_idxs, tid);
// Finally thread 0 writes the result to the output buffer.
if (tid == 0) {
idx_segms[starts + i] = min_idxs[0];
dist_segms[starts + i] = min_dists[0];
}
}
}
std::tuple<at::Tensor, at::Tensor> EdgePointDistanceForwardCuda(
const at::Tensor& points,
const at::Tensor& points_first_idx,
const at::Tensor& segms,
const at::Tensor& segms_first_idx,
const int64_t max_segms) {
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1},
points_first_idx_t{points_first_idx, "points_first_idx", 2},
segms_t{segms, "segms", 3},
segms_first_idx_t{segms_first_idx, "segms_first_idx", 4};
at::CheckedFrom c = "EdgePointDistanceForwardCuda";
at::checkAllSameGPU(
c, {points_t, points_first_idx_t, segms_t, segms_first_idx_t});
at::checkAllSameType(c, {points_t, segms_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(points.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int64_t P = points.size(0);
const int64_t S = segms.size(0);
const int64_t B = points_first_idx.size(0);
TORCH_CHECK(points.size(1) == 3, "points must be of shape Px3");
TORCH_CHECK(
(segms.size(1) == 2) && (segms.size(2) == 3),
"segms must be of shape Sx2x3");
TORCH_CHECK(segms_first_idx.size(0) == B);
// clang-format off
at::Tensor dists = at::zeros({S,}, segms.options());
at::Tensor idxs = at::zeros({S,}, segms_first_idx.options());
// clang-format on
if (dists.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(dists, idxs);
}
const int threads = 128;
const dim3 blocks(max_segms, B);
size_t shared_size = threads * sizeof(size_t) + threads * sizeof(int64_t);
EdgePointForwardKernel<<<blocks, threads, shared_size, stream>>>(
points.contiguous().data_ptr<float>(),
points_first_idx.contiguous().data_ptr<int64_t>(),
segms.contiguous().data_ptr<float>(),
segms_first_idx.contiguous().data_ptr<int64_t>(),
dists.data_ptr<float>(),
idxs.data_ptr<int64_t>(),
B,
P,
S);
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(dists, idxs);
}
__global__ void EdgePointBackwardKernel(
const float* __restrict__ points, // (P, 3)
const float* __restrict__ segms, // (S, 2, 3)
const int64_t* __restrict__ idx_segms, // (S,)
const float* __restrict__ grad_dists, // (S,)
float* __restrict__ grad_points, // (P, 3)
float* __restrict__ grad_segms, // (S, 2, 3)
const size_t S) {
float3* points_f3 = (float3*)points;
float3* segms_f3 = (float3*)segms;
const size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
const size_t stride = gridDim.x * blockDim.x;
for (size_t s = tid; s < S; s += stride) {
const float3 v0 = segms_f3[s * 2 + 0];
const float3 v1 = segms_f3[s * 2 + 1];
const int64_t pidx = idx_segms[s];
const float3 p_f3 = points_f3[pidx];
const float grad_dist = grad_dists[s];
const auto grads = PointLine3DistanceBackward(p_f3, v0, v1, grad_dist);
const float3 grad_point = thrust::get<0>(grads);
const float3 grad_v0 = thrust::get<1>(grads);
const float3 grad_v1 = thrust::get<2>(grads);
atomicAdd(grad_points + pidx * 3 + 0, grad_point.x);
atomicAdd(grad_points + pidx * 3 + 1, grad_point.y);
atomicAdd(grad_points + pidx * 3 + 2, grad_point.z);
atomicAdd(grad_segms + s * 2 * 3 + 0 * 3 + 0, grad_v0.x);
atomicAdd(grad_segms + s * 2 * 3 + 0 * 3 + 1, grad_v0.y);
atomicAdd(grad_segms + s * 2 * 3 + 0 * 3 + 2, grad_v0.z);
atomicAdd(grad_segms + s * 2 * 3 + 1 * 3 + 0, grad_v1.x);
atomicAdd(grad_segms + s * 2 * 3 + 1 * 3 + 1, grad_v1.y);
atomicAdd(grad_segms + s * 2 * 3 + 1 * 3 + 2, grad_v1.z);
}
}
std::tuple<at::Tensor, at::Tensor> EdgePointDistanceBackwardCuda(
const at::Tensor& points,
const at::Tensor& segms,
const at::Tensor& idx_segms,
const at::Tensor& grad_dists) {
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1},
idx_segms_t{idx_segms, "idx_segms", 2}, segms_t{segms, "segms", 3},
grad_dists_t{grad_dists, "grad_dists", 4};
at::CheckedFrom c = "PointEdgeDistanceBackwardCuda";
at::checkAllSameGPU(c, {points_t, idx_segms_t, segms_t, grad_dists_t});
at::checkAllSameType(c, {points_t, segms_t, grad_dists_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(points.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int64_t P = points.size(0);
const int64_t S = segms.size(0);
TORCH_CHECK(points.size(1) == 3, "points must be of shape Px3");
TORCH_CHECK(
(segms.size(1) == 2) && (segms.size(2) == 3),
"segms must be of shape Sx2x3");
TORCH_CHECK(idx_segms.size(0) == S);
TORCH_CHECK(grad_dists.size(0) == S);
// clang-format off
at::Tensor grad_points = at::zeros({P, 3}, points.options());
at::Tensor grad_segms = at::zeros({S, 2, 3}, segms.options());
// clang-format on
const int blocks = 64;
const int threads = 512;
EdgePointBackwardKernel<<<blocks, threads, 0, stream>>>(
points.contiguous().data_ptr<float>(),
segms.contiguous().data_ptr<float>(),
idx_segms.contiguous().data_ptr<int64_t>(),
grad_dists.contiguous().data_ptr<float>(),
grad_points.data_ptr<float>(),
grad_segms.data_ptr<float>(),
S);
return std::make_tuple(grad_points, grad_segms);
}
// ****************************************************************************
// * PointEdgeArrayDistance *
// ****************************************************************************
__global__ void PointEdgeArrayForwardKernel(
const float* __restrict__ points, // (P, 3)
const float* __restrict__ segms, // (S, 2, 3)
float* __restrict__ dists, // (P, S)
const size_t P,
const size_t S) {
float3* points_f3 = (float3*)points;
float3* segms_f3 = (float3*)segms;
// Parallelize over P * S computations
const int num_threads = gridDim.x * blockDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int t_i = tid; t_i < P * S; t_i += num_threads) {
const int s = t_i / P; // segment index.
const int p = t_i % P; // point index
float3 a = segms_f3[s * 2 + 0];
float3 b = segms_f3[s * 2 + 1];
float3 point = points_f3[p];
float dist = PointLine3DistanceForward(point, a, b);
dists[p * S + s] = dist;
}
}
at::Tensor PointEdgeArrayDistanceForwardCuda(
const at::Tensor& points,
const at::Tensor& segms) {
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1}, segms_t{segms, "segms", 2};
at::CheckedFrom c = "PointEdgeArrayDistanceForwardCuda";
at::checkAllSameGPU(c, {points_t, segms_t});
at::checkAllSameType(c, {points_t, segms_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(points.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int64_t P = points.size(0);
const int64_t S = segms.size(0);
TORCH_CHECK(points.size(1) == 3, "points must be of shape Px3");
TORCH_CHECK(
(segms.size(1) == 2) && (segms.size(2) == 3),
"segms must be of shape Sx2x3");
at::Tensor dists = at::zeros({P, S}, points.options());
if (dists.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return dists;
}
const size_t blocks = 1024;
const size_t threads = 64;
PointEdgeArrayForwardKernel<<<blocks, threads, 0, stream>>>(
points.contiguous().data_ptr<float>(),
segms.contiguous().data_ptr<float>(),
dists.data_ptr<float>(),
P,
S);
AT_CUDA_CHECK(cudaGetLastError());
return dists;
}
__global__ void PointEdgeArrayBackwardKernel(
const float* __restrict__ points, // (P, 3)
const float* __restrict__ segms, // (S, 2, 3)
const float* __restrict__ grad_dists, // (P, S)
float* __restrict__ grad_points, // (P, 3)
float* __restrict__ grad_segms, // (S, 2, 3)
const size_t P,
const size_t S) {
float3* points_f3 = (float3*)points;
float3* segms_f3 = (float3*)segms;
// Parallelize over P * S computations
const int num_threads = gridDim.x * blockDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int t_i = tid; t_i < P * S; t_i += num_threads) {
const int s = t_i / P; // segment index.
const int p = t_i % P; // point index
const float3 a = segms_f3[s * 2 + 0];
const float3 b = segms_f3[s * 2 + 1];
const float3 point = points_f3[p];
const float grad_dist = grad_dists[p * S + s];
const auto grads = PointLine3DistanceBackward(point, a, b, grad_dist);
const float3 grad_point = thrust::get<0>(grads);
const float3 grad_a = thrust::get<1>(grads);
const float3 grad_b = thrust::get<2>(grads);
atomicAdd(grad_points + p * 3 + 0, grad_point.x);
atomicAdd(grad_points + p * 3 + 1, grad_point.y);
atomicAdd(grad_points + p * 3 + 2, grad_point.z);
atomicAdd(grad_segms + s * 2 * 3 + 0 * 3 + 0, grad_a.x);
atomicAdd(grad_segms + s * 2 * 3 + 0 * 3 + 1, grad_a.y);
atomicAdd(grad_segms + s * 2 * 3 + 0 * 3 + 2, grad_a.z);
atomicAdd(grad_segms + s * 2 * 3 + 1 * 3 + 0, grad_b.x);
atomicAdd(grad_segms + s * 2 * 3 + 1 * 3 + 1, grad_b.y);
atomicAdd(grad_segms + s * 2 * 3 + 1 * 3 + 2, grad_b.z);
}
}
std::tuple<at::Tensor, at::Tensor> PointEdgeArrayDistanceBackwardCuda(
const at::Tensor& points,
const at::Tensor& segms,
const at::Tensor& grad_dists) {
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1}, segms_t{segms, "segms", 2},
grad_dists_t{grad_dists, "grad_dists", 3};
at::CheckedFrom c = "PointEdgeArrayDistanceBackwardCuda";
at::checkAllSameGPU(c, {points_t, segms_t, grad_dists_t});
at::checkAllSameType(c, {points_t, segms_t, grad_dists_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(points.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int64_t P = points.size(0);
const int64_t S = segms.size(0);
TORCH_CHECK(points.size(1) == 3, "points must be of shape Px3");
TORCH_CHECK(
(segms.size(1) == 2) && (segms.size(2) == 3),
"segms must be of shape Sx2x3");
TORCH_CHECK((grad_dists.size(0) == P) && (grad_dists.size(1) == S));
at::Tensor grad_points = at::zeros({P, 3}, points.options());
at::Tensor grad_segms = at::zeros({S, 2, 3}, segms.options());
if (grad_points.numel() == 0 || grad_segms.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(grad_points, grad_segms);
}
const size_t blocks = 1024;
const size_t threads = 64;
PointEdgeArrayBackwardKernel<<<blocks, threads, 0, stream>>>(
points.contiguous().data_ptr<float>(),
segms.contiguous().data_ptr<float>(),
grad_dists.contiguous().data_ptr<float>(),
grad_points.data_ptr<float>(),
grad_segms.data_ptr<float>(),
P,
S);
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(grad_points, grad_segms);
}
|
7845532162fda24b37ae27c7e73168bbe635793b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#define WINDOWGATE_THREADS 128
__global__ void cunnx_WindowGate_updateOutput_kernel(
float *output, float *centroids, float *normalizedCentroids, float *outputIndice,
const float *input, const float *noise, int inputSize, int outputSize,
int outputWindowSize, float a, float b, int train)
{
__shared__ float buffer[WINDOWGATE_THREADS];
unsigned int tx = threadIdx.x;
unsigned int k = blockIdx.x;
const float *input_k = input + inputSize*k;
float *output_k = output + outputWindowSize*k;
// get coordinate of centoid
buffer[tx] = 0;
for (unsigned int i=tx; i<inputSize; i+=blockDim.x)
buffer[tx] += input_k[i]*(float)(i+1);
// add (reduce)
for (unsigned int stride = WINDOWGATE_THREADS >> 1; stride > 0; stride >>= 1)
{
__syncthreads();
if (tx < stride)
buffer[tx] += buffer[tx+stride];
}
if (tx == 0)
{
float centroid = buffer[0];
// make centroid a number between 0 and 1
centroid /= (float)(inputSize);
normalizedCentroids[k] = centroid;
if ( train )
{
centroid += noise[k];
centroid = fminf(fmaxf(0,centroid),1);
}
// align centroid to output
centroid *= (float)(outputSize);
float outputIdx = centroid - 0.5*(float)outputWindowSize;
// clip indices
outputIdx = fminf(outputIdx, outputSize-outputWindowSize+1);
outputIdx = fmaxf(outputIdx, 1);
outputIdx = ceilf(outputIdx);
// align centroid to outputWindow
centroid -= (outputIdx-1);
outputIndice[k] = (int)outputIdx;
centroids[k] = centroid;
buffer[0] = centroid;
}
__syncthreads();
float centroid = buffer[0];
// gaussian blur
for (int i=tx; i<outputWindowSize; i+=blockDim.x)
{
float x = (float)(i+1)-centroid;
output_k[i] = a*expf(x*x*b);
}
}
static int cunnx_WindowGate_updateOutput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
int inputSize = luaT_getfieldcheckint(L, 1, "inputSize");
int outputSize = luaT_getfieldcheckint(L, 1, "outputSize");
int outputWindowSize = luaT_getfieldcheckint(L, 1, "outputWindowSize");
int batchSize = luaT_getfieldcheckint(L, 1, "batchSize");
int train = luaT_getfieldcheckboolean(L, 1, "train");
float a = (float)luaT_getfieldchecknumber(L, 1, "a");
float b = (float)luaT_getfieldchecknumber(L, 1, "b");
THCudaLongTensor *outputIndiceCuda = (THCudaLongTensor*)luaT_getfieldcheckudata(L, 1, "outputIndiceCuda", "torch.CudaLongTensor");
THLongTensor *outputIndice = (THLongTensor*)luaT_getfieldcheckudata(L, 1, "outputIndice", "torch.LongTensor");
THCudaTensor *centroid = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "centroid", "torch.CudaTensor");
THCudaTensor *normalizedCentroid = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "normalizedCentroid", "torch.CudaTensor");
THCudaTensor *noise = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "noise", "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "_output", "torch.CudaTensor");
luaL_argcheck(L, input->nDimension == 2, 2, "2D(batch mode) tensor expected");
luaL_argcheck(L, input->size[1] == inputSize, 2, "invalid input size");
THCudaTensor_resize2d(state, output, batchSize, outputWindowSize);
THCudaLongTensor_resize1d(state, outputIndiceCuda, batchSize);
THLongTensor_resize1d(outputIndice, batchSize);
THCudaTensor_resize1d(state, centroid, batchSize);
THCudaTensor_resize1d(state, normalizedCentroid, batchSize);
/* call cudakernel */
dim3 blocks(batchSize); // each cuda-block is an example
dim3 threads(WINDOWGATE_THREADS);
hipLaunchKernelGGL(( cunnx_WindowGate_updateOutput_kernel), dim3(blocks),dim3(threads), 0, 0,
THCudaTensor_data(state, output), THCudaTensor_data(state, centroid),
THCudaTensor_data(state, normalizedCentroid), (float *)THCudaLongTensor_data(state, outputIndiceCuda),
(const float*)THCudaTensor_data(state, input), (const float*)THCudaTensor_data(state, noise),
inputSize, outputSize, outputWindowSize, a, b, train
);
THLongTensor_copyCuda(state, outputIndice, outputIndiceCuda);
return 0;
}
__global__ void cunnx_WindowGate_updateGradInput_kernel(
float *gradInput, float *error, float* targetCentroids,
const float *centroids,const float *input, const float *outputIndice,
const float* output, const float* gradOutput,
int inputSize, int outputSize, int outputWindowSize,
float c, float d, float e, float lr)
{
__shared__ float buffer[WINDOWGATE_THREADS+1];
unsigned int tx = threadIdx.x;
unsigned int k = blockIdx.x;
const float *gradOutput_k = gradOutput + outputWindowSize*k;
const float *output_k = output + outputWindowSize*k;
const float *input_k = input + inputSize*k;
float *gradInput_k = gradInput + inputSize*k;
float centroid = centroids[k];
// get gradient of centroid
buffer[tx] = 0;
for (unsigned int i=tx; i<outputWindowSize; i+=blockDim.x)
{
buffer[tx] += gradOutput_k[i]*output_k[i]*((float)(i+1) - centroid);
}
// add (reduce)
for (unsigned int stride = WINDOWGATE_THREADS >> 1; stride > 0; stride >>= 1)
{
__syncthreads();
if (tx < stride)
buffer[tx] += buffer[tx+stride];
}
if (tx == 0)
{
int outputIdx = outputIndice[k];
float gradCentroid = buffer[0]*c;
centroid -= (lr*gradCentroid);
centroid += outputIdx-1;
centroid /= (float)(outputSize);
targetCentroids[k] = centroid;
buffer[WINDOWGATE_THREADS] = centroid*(float)(inputSize);
}
__syncthreads();
float targetCentroid = buffer[WINDOWGATE_THREADS];
buffer[tx] = 0;
// target is a gaussian blur
for (int i=tx; i<inputSize; i+=blockDim.x)
{
float target = (float)(i+1)-targetCentroid;
target = d*expf(target*target*e);
float input = input_k[i];
// dot product of logProbInput and probTarget (NLL)
buffer[tx] -= logf(input + 0.0000001)*target;
// grad input w.r.t. NLL
gradInput_k[i] = -target/(input + 0.0000001);
}
// add (reduce)
for (unsigned int stride = WINDOWGATE_THREADS >> 1; stride > 0; stride >>= 1)
{
__syncthreads();
if (tx < stride)
buffer[tx] += buffer[tx+stride];
}
if (tx == 0)
error[k] = buffer[tx];
}
static int cunnx_WindowGate_updateGradInput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor");
int inputSize = luaT_getfieldcheckint(L, 1, "inputSize");
int outputSize = luaT_getfieldcheckint(L, 1, "outputSize");
int outputWindowSize = luaT_getfieldcheckint(L, 1, "outputWindowSize");
int batchSize = luaT_getfieldcheckint(L, 1, "batchSize");
float c = (float)luaT_getfieldchecknumber(L, 1, "c");
float d = (float)luaT_getfieldchecknumber(L, 1, "d");
float e = (float)luaT_getfieldchecknumber(L, 1, "e");
float lr = (float)luaT_getfieldchecknumber(L, 1, "lr");
THCudaTensor *error = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "error", "torch.CudaTensor");
THCudaTensor *centroid = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "centroid", "torch.CudaTensor");
THCudaTensor *targetCentroid = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "targetCentroid", "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "_output", "torch.CudaTensor");
THCudaTensor *outputIndiceCuda = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "outputIndiceCuda", "torch.CudaTensor");
THCudaTensor *gradInput = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
luaL_argcheck(L, input->nDimension == 2, 2, "2D(batch mode) tensor expected");
luaL_argcheck(L, input->size[1] == inputSize, 2, "invalid input size");
THCudaTensor_resize2d(state, gradInput, batchSize, inputSize);
THCudaTensor_resize1d(state, error, batchSize);
THCudaTensor_resize1d(state, targetCentroid, batchSize);
/* call cudakernel */
dim3 blocks(batchSize); // each cuda-block is an example
dim3 threads(WINDOWGATE_THREADS);
hipLaunchKernelGGL(( cunnx_WindowGate_updateGradInput_kernel), dim3(blocks),dim3(threads), 0, 0,
THCudaTensor_data(state, gradInput), THCudaTensor_data(state, error),
THCudaTensor_data(state, targetCentroid),
(const float*)THCudaTensor_data(state, centroid),
(const float*)THCudaTensor_data(state, input),
(const float*)THCudaTensor_data(state, outputIndiceCuda),
(const float*)THCudaTensor_data(state, output),
(const float*)THCudaTensor_data(state, gradOutput),
inputSize, outputSize, outputWindowSize, c, d, e, lr
);
return 1;
}
static const struct luaL_Reg cunnx_WindowGate__ [] = {
{"WindowGate_updateOutput", cunnx_WindowGate_updateOutput},
{"WindowGate_updateGradInput", cunnx_WindowGate_updateGradInput},
{NULL, NULL}
};
static void cunnx_WindowGate_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunnx_WindowGate__, "nn");
lua_pop(L,1);
}
| 7845532162fda24b37ae27c7e73168bbe635793b.cu | #include "utils.h"
#define WINDOWGATE_THREADS 128
__global__ void cunnx_WindowGate_updateOutput_kernel(
float *output, float *centroids, float *normalizedCentroids, float *outputIndice,
const float *input, const float *noise, int inputSize, int outputSize,
int outputWindowSize, float a, float b, int train)
{
__shared__ float buffer[WINDOWGATE_THREADS];
unsigned int tx = threadIdx.x;
unsigned int k = blockIdx.x;
const float *input_k = input + inputSize*k;
float *output_k = output + outputWindowSize*k;
// get coordinate of centoid
buffer[tx] = 0;
for (unsigned int i=tx; i<inputSize; i+=blockDim.x)
buffer[tx] += input_k[i]*(float)(i+1);
// add (reduce)
for (unsigned int stride = WINDOWGATE_THREADS >> 1; stride > 0; stride >>= 1)
{
__syncthreads();
if (tx < stride)
buffer[tx] += buffer[tx+stride];
}
if (tx == 0)
{
float centroid = buffer[0];
// make centroid a number between 0 and 1
centroid /= (float)(inputSize);
normalizedCentroids[k] = centroid;
if ( train )
{
centroid += noise[k];
centroid = fminf(fmaxf(0,centroid),1);
}
// align centroid to output
centroid *= (float)(outputSize);
float outputIdx = centroid - 0.5*(float)outputWindowSize;
// clip indices
outputIdx = fminf(outputIdx, outputSize-outputWindowSize+1);
outputIdx = fmaxf(outputIdx, 1);
outputIdx = ceilf(outputIdx);
// align centroid to outputWindow
centroid -= (outputIdx-1);
outputIndice[k] = (int)outputIdx;
centroids[k] = centroid;
buffer[0] = centroid;
}
__syncthreads();
float centroid = buffer[0];
// gaussian blur
for (int i=tx; i<outputWindowSize; i+=blockDim.x)
{
float x = (float)(i+1)-centroid;
output_k[i] = a*expf(x*x*b);
}
}
static int cunnx_WindowGate_updateOutput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
int inputSize = luaT_getfieldcheckint(L, 1, "inputSize");
int outputSize = luaT_getfieldcheckint(L, 1, "outputSize");
int outputWindowSize = luaT_getfieldcheckint(L, 1, "outputWindowSize");
int batchSize = luaT_getfieldcheckint(L, 1, "batchSize");
int train = luaT_getfieldcheckboolean(L, 1, "train");
float a = (float)luaT_getfieldchecknumber(L, 1, "a");
float b = (float)luaT_getfieldchecknumber(L, 1, "b");
THCudaLongTensor *outputIndiceCuda = (THCudaLongTensor*)luaT_getfieldcheckudata(L, 1, "outputIndiceCuda", "torch.CudaLongTensor");
THLongTensor *outputIndice = (THLongTensor*)luaT_getfieldcheckudata(L, 1, "outputIndice", "torch.LongTensor");
THCudaTensor *centroid = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "centroid", "torch.CudaTensor");
THCudaTensor *normalizedCentroid = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "normalizedCentroid", "torch.CudaTensor");
THCudaTensor *noise = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "noise", "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "_output", "torch.CudaTensor");
luaL_argcheck(L, input->nDimension == 2, 2, "2D(batch mode) tensor expected");
luaL_argcheck(L, input->size[1] == inputSize, 2, "invalid input size");
THCudaTensor_resize2d(state, output, batchSize, outputWindowSize);
THCudaLongTensor_resize1d(state, outputIndiceCuda, batchSize);
THLongTensor_resize1d(outputIndice, batchSize);
THCudaTensor_resize1d(state, centroid, batchSize);
THCudaTensor_resize1d(state, normalizedCentroid, batchSize);
/* call cudakernel */
dim3 blocks(batchSize); // each cuda-block is an example
dim3 threads(WINDOWGATE_THREADS);
cunnx_WindowGate_updateOutput_kernel<<<blocks,threads>>>(
THCudaTensor_data(state, output), THCudaTensor_data(state, centroid),
THCudaTensor_data(state, normalizedCentroid), (float *)THCudaLongTensor_data(state, outputIndiceCuda),
(const float*)THCudaTensor_data(state, input), (const float*)THCudaTensor_data(state, noise),
inputSize, outputSize, outputWindowSize, a, b, train
);
THLongTensor_copyCuda(state, outputIndice, outputIndiceCuda);
return 0;
}
__global__ void cunnx_WindowGate_updateGradInput_kernel(
float *gradInput, float *error, float* targetCentroids,
const float *centroids,const float *input, const float *outputIndice,
const float* output, const float* gradOutput,
int inputSize, int outputSize, int outputWindowSize,
float c, float d, float e, float lr)
{
__shared__ float buffer[WINDOWGATE_THREADS+1];
unsigned int tx = threadIdx.x;
unsigned int k = blockIdx.x;
const float *gradOutput_k = gradOutput + outputWindowSize*k;
const float *output_k = output + outputWindowSize*k;
const float *input_k = input + inputSize*k;
float *gradInput_k = gradInput + inputSize*k;
float centroid = centroids[k];
// get gradient of centroid
buffer[tx] = 0;
for (unsigned int i=tx; i<outputWindowSize; i+=blockDim.x)
{
buffer[tx] += gradOutput_k[i]*output_k[i]*((float)(i+1) - centroid);
}
// add (reduce)
for (unsigned int stride = WINDOWGATE_THREADS >> 1; stride > 0; stride >>= 1)
{
__syncthreads();
if (tx < stride)
buffer[tx] += buffer[tx+stride];
}
if (tx == 0)
{
int outputIdx = outputIndice[k];
float gradCentroid = buffer[0]*c;
centroid -= (lr*gradCentroid);
centroid += outputIdx-1;
centroid /= (float)(outputSize);
targetCentroids[k] = centroid;
buffer[WINDOWGATE_THREADS] = centroid*(float)(inputSize);
}
__syncthreads();
float targetCentroid = buffer[WINDOWGATE_THREADS];
buffer[tx] = 0;
// target is a gaussian blur
for (int i=tx; i<inputSize; i+=blockDim.x)
{
float target = (float)(i+1)-targetCentroid;
target = d*expf(target*target*e);
float input = input_k[i];
// dot product of logProbInput and probTarget (NLL)
buffer[tx] -= logf(input + 0.0000001)*target;
// grad input w.r.t. NLL
gradInput_k[i] = -target/(input + 0.0000001);
}
// add (reduce)
for (unsigned int stride = WINDOWGATE_THREADS >> 1; stride > 0; stride >>= 1)
{
__syncthreads();
if (tx < stride)
buffer[tx] += buffer[tx+stride];
}
if (tx == 0)
error[k] = buffer[tx];
}
static int cunnx_WindowGate_updateGradInput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor");
int inputSize = luaT_getfieldcheckint(L, 1, "inputSize");
int outputSize = luaT_getfieldcheckint(L, 1, "outputSize");
int outputWindowSize = luaT_getfieldcheckint(L, 1, "outputWindowSize");
int batchSize = luaT_getfieldcheckint(L, 1, "batchSize");
float c = (float)luaT_getfieldchecknumber(L, 1, "c");
float d = (float)luaT_getfieldchecknumber(L, 1, "d");
float e = (float)luaT_getfieldchecknumber(L, 1, "e");
float lr = (float)luaT_getfieldchecknumber(L, 1, "lr");
THCudaTensor *error = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "error", "torch.CudaTensor");
THCudaTensor *centroid = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "centroid", "torch.CudaTensor");
THCudaTensor *targetCentroid = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "targetCentroid", "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "_output", "torch.CudaTensor");
THCudaTensor *outputIndiceCuda = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "outputIndiceCuda", "torch.CudaTensor");
THCudaTensor *gradInput = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
luaL_argcheck(L, input->nDimension == 2, 2, "2D(batch mode) tensor expected");
luaL_argcheck(L, input->size[1] == inputSize, 2, "invalid input size");
THCudaTensor_resize2d(state, gradInput, batchSize, inputSize);
THCudaTensor_resize1d(state, error, batchSize);
THCudaTensor_resize1d(state, targetCentroid, batchSize);
/* call cudakernel */
dim3 blocks(batchSize); // each cuda-block is an example
dim3 threads(WINDOWGATE_THREADS);
cunnx_WindowGate_updateGradInput_kernel<<<blocks,threads>>>(
THCudaTensor_data(state, gradInput), THCudaTensor_data(state, error),
THCudaTensor_data(state, targetCentroid),
(const float*)THCudaTensor_data(state, centroid),
(const float*)THCudaTensor_data(state, input),
(const float*)THCudaTensor_data(state, outputIndiceCuda),
(const float*)THCudaTensor_data(state, output),
(const float*)THCudaTensor_data(state, gradOutput),
inputSize, outputSize, outputWindowSize, c, d, e, lr
);
return 1;
}
static const struct luaL_Reg cunnx_WindowGate__ [] = {
{"WindowGate_updateOutput", cunnx_WindowGate_updateOutput},
{"WindowGate_updateGradInput", cunnx_WindowGate_updateGradInput},
{NULL, NULL}
};
static void cunnx_WindowGate_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunnx_WindowGate__, "nn");
lua_pop(L,1);
}
|
fd8399fa7312bd90f1747629fa7f1324348822b6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "helper.hpp"
#include <math.h>
/******************************************************************************
GPU main computation kernels
*******************************************************************************/
__global__ void gpu_normal_kernel(float *in_val, float *in_pos, float *out,
int grid_size, int num_in) {
//@@ INSERT CODE HERE
int outIdx = blockIdx.x * blockDim.x + threadIdx.x;
float in_val2 = 0;
float tmp = 0;
float dist = 0;
if (outIdx < grid_size){
for(int inIdx = 0; inIdx < num_in; ++inIdx){
dist = in_pos[inIdx] - (float)outIdx;
in_val2 = in_val[inIdx] * in_val[inIdx];
tmp += in_val2 / (dist*dist);
}
out[outIdx] = tmp;
}
}
__global__ void gpu_cutoff_kernel(float *in_val, float *in_pos, float *out,
int grid_size, int num_in,
float cutoff2) {
//@@ INSERT CODE HERE
int outIdx = blockIdx.x * blockDim.x + threadIdx.x;
float in_val2 = 0;
float tmp = 0;
float dist = 0;
if (outIdx < grid_size){
for(int inIdx = 0; inIdx < num_in; ++inIdx){
dist = in_pos[inIdx] - (float)outIdx;
if((dist * dist) < cutoff2){
in_val2 = in_val[inIdx] * in_val[inIdx];
tmp += in_val2 / (dist*dist);
}
}
out[outIdx] = tmp;
}
}
__global__ void gpu_cutoff_binned_kernel(int *bin_ptrs,
float *in_val_sorted,
float *in_pos_sorted, float *out,
int grid_size, float cutoff2) {
//@@ INSERT CODE HERE
//NUM_BINS(1024) / grid_size = how many bins in one grid
int outIdx = blockIdx.x * blockDim.x + threadIdx.x;
int binIdx = (outIdx / grid_size) * NUM_BINS ;
float cutoff = sqrt(float(cutoff2));
int start = (binIdx - int(cutoff)) >= 0 ? bin_ptrs[binIdx - int(cutoff)] : 0;
int end = binIdx + int(cutoff) >= NUM_BINS ? bin_ptrs[NUM_BINS] : bin_ptrs[binIdx + int(cutoff)];
if(outIdx < grid_size){
float tmp = 0.0;
for(int inIdx = start; inIdx < end; ++inIdx){
const float in_val2 = in_val_sorted[inIdx] * in_val_sorted[inIdx];
const float dist = in_pos_sorted[inIdx] - (float)outIdx;
if(dist*dist < cutoff2){
tmp += in_val2 / (dist * dist);
}
}
out[outIdx] = tmp;
}
}
/******************************************************************************
Main computation functions
*******************************************************************************/
void cpu_normal(float *in_val, float *in_pos, float *out, int grid_size,
int num_in) {
for (int inIdx = 0; inIdx < num_in; ++inIdx) {
const float in_val2 = in_val[inIdx] * in_val[inIdx];
for (int outIdx = 0; outIdx < grid_size; ++outIdx) {
const float dist = in_pos[inIdx] - (float)outIdx;
out[outIdx] += in_val2 / (dist * dist);
}
}
}
void gpu_normal(float *in_val, float *in_pos, float *out, int grid_size,
int num_in) {
const int numThreadsPerBlock = 512;
const int numBlocks = (grid_size - 1) / numThreadsPerBlock + 1;
hipLaunchKernelGGL(( gpu_normal_kernel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, in_val, in_pos, out,
grid_size, num_in);
}
void gpu_cutoff(float *in_val, float *in_pos, float *out, int grid_size,
int num_in, float cutoff2) {
const int numThreadsPerBlock = 512;
const int numBlocks = (grid_size - 1) / numThreadsPerBlock + 1;
hipLaunchKernelGGL(( gpu_cutoff_kernel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0,
in_val, in_pos, out, grid_size, num_in, cutoff2);
}
void gpu_cutoff_binned(int *bin_ptrs, float *in_val_sorted,
float *in_pos_sorted, float *out, int grid_size,
float cutoff2) {
const int numThreadsPerBlock = 512;
const int numBlocks = (grid_size - 1) / numThreadsPerBlock + 1;
hipLaunchKernelGGL(( gpu_cutoff_binned_kernel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0,
bin_ptrs, in_val_sorted, in_pos_sorted, out, grid_size, cutoff2);
}
/******************************************************************************
Preprocessing kernels
*******************************************************************************/
__global__ void histogram(float *in_pos, int *bin_counts, int num_in,
int grid_size) {
//@@ INSERT CODE HERE
int binIdx = 0;
int inIdx = blockDim.x * blockIdx.x + threadIdx.x;
if (inIdx < num_in) {
binIdx = (int) ((in_pos[inIdx] / grid_size) * NUM_BINS);
atomicAdd(&bin_counts[binIdx], 1);
}
}
__global__ void scan(int *bin_counts, int *bin_ptrs) {
//@@ INSERT CODE HERE
__shared__ int scan[NUM_BINS];
int tx = threadIdx.x;
scan[2 * tx + 1] = bin_counts[2 * tx +1];
scan[2 * tx] = bin_counts[2 * tx];
int stride = 1;
while(stride < NUM_BINS) {
__syncthreads();
int ind = (tx + 1) * stride * 2 -1;
if(ind < NUM_BINS && ind >= stride){
scan[ind] += scan[ind - stride];
}
stride = stride * 2;
}
int stride2 = NUM_BINS / 4;
while (stride2 > 0) {
__syncthreads();
int index = (tx + 1) * stride2 * 2 - 1;
if ((index + stride2) < NUM_BINS)
scan[index + stride2] += scan[index];
stride2 = stride2 / 2;
}
__syncthreads();
bin_ptrs[2 * tx + 1] = scan[2 * tx];
bin_ptrs[2 * tx + 2] = scan[2 * tx + 1];
if (tx == 0)
bin_ptrs[tx] = 0;
}
__global__ void sort(float *in_val, float *in_pos, float *in_val_sorted,
float *in_pos_sorted, int grid_size, int num_in,
int *bin_counts, int *bin_ptrs) {
//@@ INSERT CODE HERE
int inIdx = blockDim.x * blockIdx.x + threadIdx.x;
int binIdx = 0;
int newIdx = 0;
if (inIdx < num_in) {
binIdx = (int) ((in_pos[inIdx] / grid_size) * NUM_BINS);
newIdx = bin_ptrs[binIdx + 1] - atomicSub(&bin_counts[binIdx], 1);
in_val_sorted[newIdx] = in_val[inIdx];
in_pos_sorted[newIdx] = in_pos[inIdx];
}
}
/******************************************************************************
Preprocessing functions
*******************************************************************************/
static void cpu_preprocess(float *in_val, float *in_pos,
float *in_val_sorted, float *in_pos_sorted,
int grid_size, int num_in, int *bin_counts,
int *bin_ptrs) {
// Histogram the input positions
for (int binIdx = 0; binIdx < NUM_BINS; ++binIdx) {
bin_counts[binIdx] = 0;
}
for (int inIdx = 0; inIdx < num_in; ++inIdx) {
const int binIdx = (int)((in_pos[inIdx] / grid_size) * NUM_BINS);
++bin_counts[binIdx];
}
// Scan the histogram to get the bin pointers
bin_ptrs[0] = 0;
for (int binIdx = 0; binIdx < NUM_BINS; ++binIdx) {
bin_ptrs[binIdx + 1] = bin_ptrs[binIdx] + bin_counts[binIdx];
}
// Sort the inputs into the bins
for (int inIdx = 0; inIdx < num_in; ++inIdx) {
const int binIdx = (int)((in_pos[inIdx] / grid_size) * NUM_BINS);
const int newIdx = bin_ptrs[binIdx + 1] - bin_counts[binIdx];
--bin_counts[binIdx];
in_val_sorted[newIdx] = in_val[inIdx];
in_pos_sorted[newIdx] = in_pos[inIdx];
}
}
static void gpu_preprocess(float *in_val, float *in_pos,
float *in_val_sorted, float *in_pos_sorted,
int grid_size, int num_in, int *bin_counts,
int *bin_ptrs) {
const int numThreadsPerBlock = 512;
// Histogram the input positions
hipLaunchKernelGGL(( histogram), dim3(30), dim3(numThreadsPerBlock), 0, 0, in_pos, bin_counts, num_in,
grid_size);
// Scan the histogram to get the bin pointers
if (NUM_BINS != 1024) {
FAIL("NUM_BINS must be 1024. Do not change.");
return;
}
hipLaunchKernelGGL(( scan), dim3(1), dim3(numThreadsPerBlock), 0, 0, bin_counts, bin_ptrs);
// Sort the inputs into the bins
hipLaunchKernelGGL(( sort), dim3(30), dim3(numThreadsPerBlock), 0, 0, in_val, in_pos, in_val_sorted,
in_pos_sorted, grid_size, num_in,
bin_counts, bin_ptrs);
}
template <Mode mode>
int eval(const int num_in, const int max, const int grid_size) {
const std::string mode_info = mode_name(mode);
const std::string conf_info =
std::string("[len:") + std::to_string(num_in) + "/max:" + std::to_string(max) + "/gridSize:" + std::to_string(grid_size) + "]";
// Initialize host variables
// ----------------------------------------------
// Variables
std::vector<float> in_val_h;
std::vector<float> in_pos_h;
float *in_val_d = nullptr;
float *in_pos_d = nullptr;
float *out_d = nullptr;
// Constants
const float cutoff = 3000.0f; // Cutoff distance for optimized computation
const float cutoff2 = cutoff * cutoff;
// Extras needed for input binning
std::vector<int> bin_counts_h;
std::vector<int> bin_ptrs_h;
std::vector<float> in_val_sorted_h;
std::vector<float> in_pos_sorted_h;
int *bin_counts_d = nullptr;
int *bin_ptrs_d = nullptr;
float *in_val_sorted_d = nullptr;
float *in_pos_sorted_d = nullptr;
in_val_h = generate_input(num_in, max);
in_pos_h = generate_input(num_in, grid_size);
std::vector<float> out_h(grid_size);
std::fill_n(out_h.begin(), grid_size, 0.0f);
INFO("Running " << mode_info << conf_info);
// CPU Preprocessing
// ------------------------------------------------------
if (mode == Mode::GPUBinnedCPUPreprocessing) {
timer_start("Allocating data for preprocessing");
// Data structures needed to preprocess the bins on the CPU
bin_counts_h.reserve(NUM_BINS);
bin_ptrs_h.reserve(NUM_BINS + 1);
in_val_sorted_h.reserve(num_in);
in_pos_sorted_h.reserve(num_in);
cpu_preprocess(in_val_h.data(), in_pos_h.data(), in_val_sorted_h.data(), in_pos_sorted_h.data(), grid_size, num_in, bin_counts_h.data(),
bin_ptrs_h.data());
timer_stop();
}
// Allocate device variables
// ----------------------------------------------
if (mode != Mode::CPUNormal) {
timer_start("Allocating data");
// If preprocessing on the CPU, GPU doesn't need the unsorted arrays
if (mode != Mode::GPUBinnedCPUPreprocessing) {
THROW_IF_ERROR(hipMalloc((void **) &in_val_d, num_in * sizeof(float)));
THROW_IF_ERROR(hipMalloc((void **) &in_pos_d, num_in * sizeof(float)));
}
// All modes need the output array
THROW_IF_ERROR(hipMalloc((void **) &out_d, grid_size * sizeof(float)));
// Only binning modes need binning information
if (mode == Mode::GPUBinnedCPUPreprocessing || mode == Mode::GPUBinnedGPUPreprocessing) {
THROW_IF_ERROR(hipMalloc((void **) &in_val_sorted_d, num_in * sizeof(float)));
THROW_IF_ERROR(hipMalloc((void **) &in_pos_sorted_d, num_in * sizeof(float)));
THROW_IF_ERROR(hipMalloc((void **) &bin_ptrs_d, (NUM_BINS + 1) * sizeof(int)));
if (mode == Mode::GPUBinnedGPUPreprocessing) {
// Only used in preprocessing but not the actual computation
THROW_IF_ERROR(hipMalloc((void **) &bin_counts_d, NUM_BINS * sizeof(int)));
}
}
hipDeviceSynchronize();
timer_stop();
}
// Copy host variables to device
// ------------------------------------------
if (mode != Mode::CPUNormal) {
timer_start("Copying data");
// If preprocessing on the CPU, GPU doesn't need the unsorted arrays
if (mode != Mode::GPUBinnedCPUPreprocessing) {
THROW_IF_ERROR(hipMemcpy(in_val_d, in_val_h.data(), num_in * sizeof(float), hipMemcpyHostToDevice));
THROW_IF_ERROR(hipMemcpy(in_pos_d, in_pos_h.data(), num_in * sizeof(float), hipMemcpyHostToDevice));
}
// All modes need the output array
THROW_IF_ERROR(hipMemset(out_d, 0, grid_size * sizeof(float)));
if (mode == Mode::GPUBinnedCPUPreprocessing) {
THROW_IF_ERROR(hipMemcpy(in_val_sorted_d, in_val_sorted_h.data(), num_in * sizeof(float), hipMemcpyHostToDevice));
THROW_IF_ERROR(hipMemcpy(in_pos_sorted_d, in_pos_sorted_h.data(), num_in * sizeof(float), hipMemcpyHostToDevice));
THROW_IF_ERROR(hipMemcpy(bin_ptrs_d, bin_ptrs_h.data(), (NUM_BINS + 1) * sizeof(int), hipMemcpyHostToDevice));
} else if (mode == Mode::GPUBinnedGPUPreprocessing) {
// If preprocessing on the GPU, bin counts need to be initialized
// and nothing needs to be copied
THROW_IF_ERROR(hipMemset(bin_counts_d, 0, NUM_BINS * sizeof(int)));
}
THROW_IF_ERROR(hipDeviceSynchronize());
timer_stop();
}
// GPU Preprocessing
// ------------------------------------------------------
if (mode == Mode::GPUBinnedGPUPreprocessing) {
timer_start("Preprocessing data on the GPU...");
gpu_preprocess(in_val_d, in_pos_d, in_val_sorted_d, in_pos_sorted_d, grid_size, num_in, bin_counts_d, bin_ptrs_d);
THROW_IF_ERROR(hipDeviceSynchronize());
timer_stop();
}
// Launch kernel
// ----------------------------------------------------------
timer_start(std::string("Performing ") + mode_info + conf_info + std::string(" computation"));
switch (mode) {
case Mode::CPUNormal:
cpu_normal(in_val_h.data(), in_pos_h.data(), out_h.data(), grid_size, num_in);
break;
case Mode::GPUNormal:
gpu_normal(in_val_d, in_pos_d, out_d, grid_size, num_in);
break;
case Mode::GPUCutoff:
gpu_cutoff(in_val_d, in_pos_d, out_d, grid_size, num_in, cutoff2);
break;
case Mode::GPUBinnedCPUPreprocessing:
case Mode::GPUBinnedGPUPreprocessing:
gpu_cutoff_binned(bin_ptrs_d, in_val_sorted_d, in_pos_sorted_d, out_d, grid_size, cutoff2);
break;
default:
FAIL("Invalid mode " << (int) mode);
}
THROW_IF_ERROR(hipDeviceSynchronize());
timer_stop();
// Copy device variables from host
// ----------------------------------------
if (mode != Mode::CPUNormal) {
THROW_IF_ERROR(hipMemcpy(out_h.data(), out_d, grid_size * sizeof(float), hipMemcpyDeviceToHost));
THROW_IF_ERROR(hipDeviceSynchronize());
}
// Verify correctness
// -----------------------------------------------------
const auto actual_output = compute_output(in_val_h, in_pos_h, num_in, grid_size);
verify(actual_output, out_h);
// Free memory
// ------------------------------------------------------------
if (mode != Mode::CPUNormal) {
if (mode != Mode::GPUBinnedCPUPreprocessing) {
hipFree(in_val_d);
hipFree(in_pos_d);
}
hipFree(out_d);
if (mode == Mode::GPUBinnedCPUPreprocessing || mode == Mode::GPUBinnedGPUPreprocessing) {
hipFree(in_val_sorted_d);
hipFree(in_pos_sorted_d);
hipFree(bin_ptrs_d);
if (mode == Mode::GPUBinnedGPUPreprocessing) {
hipFree(bin_counts_d);
}
}
}
std::cout << "----------------------------------------\n";
return 0;
}
TEST_CASE("CPUNormal", "[cpu_normal]") {
SECTION("[len:60/max:1/gridSize:60]") {
eval<Mode::CPUNormal>(60, 1, 60);
}
SECTION("[len:600/max:1/gridSize:100]") {
eval<Mode::CPUNormal>(600, 1, 100);
}
SECTION("[len:603/max:1/gridSize:201]") {
eval<Mode::CPUNormal>(603, 1, 201);
}
SECTION("[len:409/max:1/gridSize:160]") {
eval<Mode::CPUNormal>(409, 1, 160);
}
SECTION("[len:419/max:1/gridSize:100]") {
eval<Mode::CPUNormal>(419, 1, 100);
}
SECTION("[len:8065/max:1/gridSize:201]") {
eval<Mode::CPUNormal>(8065, 1, 201);
}
SECTION("[len:1440/max:1/gridSize:443]") {
eval<Mode::CPUNormal>(1440, 1, 443);
}
SECTION("[len:400/max:1/gridSize:200]") {
eval<Mode::CPUNormal>(400, 1, 200);
}
SECTION("[len:696/max:1/gridSize:232]") {
eval<Mode::CPUNormal>(696, 1, 232);
}
}
TEST_CASE("GPUNormal", "[gpu_normal]") {
SECTION("[len:60/max:1/gridSize:60]") {
eval<Mode::GPUNormal>(60, 1, 60);
}
SECTION("[len:600/max:1/gridSize:100]") {
eval<Mode::GPUNormal>(600, 1, 100);
}
SECTION("[len:603/max:1/gridSize:201]") {
eval<Mode::GPUNormal>(603, 1, 201);
}
SECTION("[len:409/max:1/gridSize:160]") {
eval<Mode::GPUNormal>(409, 1, 160);
}
SECTION("[len:419/max:1/gridSize:100]") {
eval<Mode::GPUNormal>(419, 1, 100);
}
SECTION("[len:8065/max:1/gridSize:201]") {
eval<Mode::GPUNormal>(8065, 1, 201);
}
SECTION("[len:1440/max:1/gridSize:443]") {
eval<Mode::GPUNormal>(1440, 1, 443);
}
SECTION("[len:400/max:1/gridSize:200]") {
eval<Mode::GPUNormal>(400, 1, 200);
}
SECTION("[len:696/max:1/gridSize:232]") {
eval<Mode::GPUNormal>(696, 1, 232);
}
}
TEST_CASE("GPUCutoff", "[gpu_cutoff]") {
SECTION("[len:60/max:1/gridSize:60]") {
eval<Mode::GPUCutoff>(60, 1, 60);
}
SECTION("[len:600/max:1/gridSize:100]") {
eval<Mode::GPUCutoff>(600, 1, 100);
}
SECTION("[len:603/max:1/gridSize:201]") {
eval<Mode::GPUCutoff>(603, 1, 201);
}
SECTION("[len:409/max:1/gridSize:160]") {
eval<Mode::GPUCutoff>(409, 1, 160);
}
SECTION("[len:419/max:1/gridSize:100]") {
eval<Mode::GPUCutoff>(419, 1, 100);
}
SECTION("[len:8065/max:1/gridSize:201]") {
eval<Mode::GPUCutoff>(8065, 1, 201);
}
SECTION("[len:1440/max:1/gridSize:443]") {
eval<Mode::GPUCutoff>(1440, 1, 443);
}
SECTION("[len:400/max:1/gridSize:200]") {
eval<Mode::GPUCutoff>(400, 1, 200);
}
SECTION("[len:696/max:1/gridSize:232]") {
eval<Mode::GPUCutoff>(696, 1, 232);
}
}
TEST_CASE("GPUBinnedCPUPreprocessing", "[gpu_binned_cpu_preprocessing]") {
SECTION("[len:60/max:1/gridSize:60]") {
eval<Mode::GPUBinnedCPUPreprocessing>(60, 1, 60);
}
SECTION("[len:600/max:1/gridSize:100]") {
eval<Mode::GPUBinnedCPUPreprocessing>(600, 1, 100);
}
SECTION("[len:603/max:1/gridSize:201]") {
eval<Mode::GPUBinnedCPUPreprocessing>(603, 1, 201);
}
SECTION("[len:409/max:1/gridSize:160]") {
eval<Mode::GPUBinnedCPUPreprocessing>(409, 1, 160);
}
SECTION("[len:419/max:1/gridSize:100]") {
eval<Mode::GPUBinnedCPUPreprocessing>(419, 1, 100);
}
SECTION("[len:8065/max:1/gridSize:201]") {
eval<Mode::GPUBinnedCPUPreprocessing>(8065, 1, 201);
}
SECTION("[len:1440/max:1/gridSize:443]") {
eval<Mode::GPUBinnedCPUPreprocessing>(1440, 1, 443);
}
SECTION("[len:400/max:1/gridSize:200]") {
eval<Mode::GPUBinnedCPUPreprocessing>(400, 1, 200);
}
SECTION("[len:696/max:1/gridSize:232]") {
eval<Mode::GPUBinnedCPUPreprocessing>(696, 1, 232);
}
}
TEST_CASE("GPUBinnedGPUPreprocessing", "[gpu_binned_gpu_preprocessing]") {
SECTION("[len:60/max:1/gridSize:60]") {
eval<Mode::GPUBinnedGPUPreprocessing>(60, 1, 60);
}
SECTION("[len:600/max:1/gridSize:100]") {
eval<Mode::GPUBinnedGPUPreprocessing>(600, 1, 100);
}
SECTION("[len:603/max:1/gridSize:201]") {
eval<Mode::GPUBinnedGPUPreprocessing>(603, 1, 201);
}
SECTION("[len:409/max:1/gridSize:160]") {
eval<Mode::GPUBinnedGPUPreprocessing>(409, 1, 160);
}
SECTION("[len:419/max:1/gridSize:100]") {
eval<Mode::GPUBinnedGPUPreprocessing>(419, 1, 100);
}
SECTION("[len:8065/max:1/gridSize:201]") {
eval<Mode::GPUBinnedGPUPreprocessing>(8065, 1, 201);
}
SECTION("[len:1440/max:1/gridSize:443]") {
eval<Mode::GPUBinnedGPUPreprocessing>(1440, 1, 443);
}
SECTION("[len:400/max:1/gridSize:200]") {
eval<Mode::GPUBinnedGPUPreprocessing>(400, 1, 200);
}
SECTION("[len:696/max:1/gridSize:232]") {
eval<Mode::GPUBinnedGPUPreprocessing>(696, 1, 232);
}
}
| fd8399fa7312bd90f1747629fa7f1324348822b6.cu | #include "helper.hpp"
#include <math.h>
/******************************************************************************
GPU main computation kernels
*******************************************************************************/
__global__ void gpu_normal_kernel(float *in_val, float *in_pos, float *out,
int grid_size, int num_in) {
//@@ INSERT CODE HERE
int outIdx = blockIdx.x * blockDim.x + threadIdx.x;
float in_val2 = 0;
float tmp = 0;
float dist = 0;
if (outIdx < grid_size){
for(int inIdx = 0; inIdx < num_in; ++inIdx){
dist = in_pos[inIdx] - (float)outIdx;
in_val2 = in_val[inIdx] * in_val[inIdx];
tmp += in_val2 / (dist*dist);
}
out[outIdx] = tmp;
}
}
__global__ void gpu_cutoff_kernel(float *in_val, float *in_pos, float *out,
int grid_size, int num_in,
float cutoff2) {
//@@ INSERT CODE HERE
int outIdx = blockIdx.x * blockDim.x + threadIdx.x;
float in_val2 = 0;
float tmp = 0;
float dist = 0;
if (outIdx < grid_size){
for(int inIdx = 0; inIdx < num_in; ++inIdx){
dist = in_pos[inIdx] - (float)outIdx;
if((dist * dist) < cutoff2){
in_val2 = in_val[inIdx] * in_val[inIdx];
tmp += in_val2 / (dist*dist);
}
}
out[outIdx] = tmp;
}
}
__global__ void gpu_cutoff_binned_kernel(int *bin_ptrs,
float *in_val_sorted,
float *in_pos_sorted, float *out,
int grid_size, float cutoff2) {
//@@ INSERT CODE HERE
//NUM_BINS(1024) / grid_size = how many bins in one grid
int outIdx = blockIdx.x * blockDim.x + threadIdx.x;
int binIdx = (outIdx / grid_size) * NUM_BINS ;
float cutoff = sqrt(float(cutoff2));
int start = (binIdx - int(cutoff)) >= 0 ? bin_ptrs[binIdx - int(cutoff)] : 0;
int end = binIdx + int(cutoff) >= NUM_BINS ? bin_ptrs[NUM_BINS] : bin_ptrs[binIdx + int(cutoff)];
if(outIdx < grid_size){
float tmp = 0.0;
for(int inIdx = start; inIdx < end; ++inIdx){
const float in_val2 = in_val_sorted[inIdx] * in_val_sorted[inIdx];
const float dist = in_pos_sorted[inIdx] - (float)outIdx;
if(dist*dist < cutoff2){
tmp += in_val2 / (dist * dist);
}
}
out[outIdx] = tmp;
}
}
/******************************************************************************
Main computation functions
*******************************************************************************/
void cpu_normal(float *in_val, float *in_pos, float *out, int grid_size,
int num_in) {
for (int inIdx = 0; inIdx < num_in; ++inIdx) {
const float in_val2 = in_val[inIdx] * in_val[inIdx];
for (int outIdx = 0; outIdx < grid_size; ++outIdx) {
const float dist = in_pos[inIdx] - (float)outIdx;
out[outIdx] += in_val2 / (dist * dist);
}
}
}
void gpu_normal(float *in_val, float *in_pos, float *out, int grid_size,
int num_in) {
const int numThreadsPerBlock = 512;
const int numBlocks = (grid_size - 1) / numThreadsPerBlock + 1;
gpu_normal_kernel<<<numBlocks, numThreadsPerBlock>>>(in_val, in_pos, out,
grid_size, num_in);
}
void gpu_cutoff(float *in_val, float *in_pos, float *out, int grid_size,
int num_in, float cutoff2) {
const int numThreadsPerBlock = 512;
const int numBlocks = (grid_size - 1) / numThreadsPerBlock + 1;
gpu_cutoff_kernel<<<numBlocks, numThreadsPerBlock>>>(
in_val, in_pos, out, grid_size, num_in, cutoff2);
}
void gpu_cutoff_binned(int *bin_ptrs, float *in_val_sorted,
float *in_pos_sorted, float *out, int grid_size,
float cutoff2) {
const int numThreadsPerBlock = 512;
const int numBlocks = (grid_size - 1) / numThreadsPerBlock + 1;
gpu_cutoff_binned_kernel<<<numBlocks, numThreadsPerBlock>>>(
bin_ptrs, in_val_sorted, in_pos_sorted, out, grid_size, cutoff2);
}
/******************************************************************************
Preprocessing kernels
*******************************************************************************/
__global__ void histogram(float *in_pos, int *bin_counts, int num_in,
int grid_size) {
//@@ INSERT CODE HERE
int binIdx = 0;
int inIdx = blockDim.x * blockIdx.x + threadIdx.x;
if (inIdx < num_in) {
binIdx = (int) ((in_pos[inIdx] / grid_size) * NUM_BINS);
atomicAdd(&bin_counts[binIdx], 1);
}
}
__global__ void scan(int *bin_counts, int *bin_ptrs) {
//@@ INSERT CODE HERE
__shared__ int scan[NUM_BINS];
int tx = threadIdx.x;
scan[2 * tx + 1] = bin_counts[2 * tx +1];
scan[2 * tx] = bin_counts[2 * tx];
int stride = 1;
while(stride < NUM_BINS) {
__syncthreads();
int ind = (tx + 1) * stride * 2 -1;
if(ind < NUM_BINS && ind >= stride){
scan[ind] += scan[ind - stride];
}
stride = stride * 2;
}
int stride2 = NUM_BINS / 4;
while (stride2 > 0) {
__syncthreads();
int index = (tx + 1) * stride2 * 2 - 1;
if ((index + stride2) < NUM_BINS)
scan[index + stride2] += scan[index];
stride2 = stride2 / 2;
}
__syncthreads();
bin_ptrs[2 * tx + 1] = scan[2 * tx];
bin_ptrs[2 * tx + 2] = scan[2 * tx + 1];
if (tx == 0)
bin_ptrs[tx] = 0;
}
__global__ void sort(float *in_val, float *in_pos, float *in_val_sorted,
float *in_pos_sorted, int grid_size, int num_in,
int *bin_counts, int *bin_ptrs) {
//@@ INSERT CODE HERE
int inIdx = blockDim.x * blockIdx.x + threadIdx.x;
int binIdx = 0;
int newIdx = 0;
if (inIdx < num_in) {
binIdx = (int) ((in_pos[inIdx] / grid_size) * NUM_BINS);
newIdx = bin_ptrs[binIdx + 1] - atomicSub(&bin_counts[binIdx], 1);
in_val_sorted[newIdx] = in_val[inIdx];
in_pos_sorted[newIdx] = in_pos[inIdx];
}
}
/******************************************************************************
Preprocessing functions
*******************************************************************************/
static void cpu_preprocess(float *in_val, float *in_pos,
float *in_val_sorted, float *in_pos_sorted,
int grid_size, int num_in, int *bin_counts,
int *bin_ptrs) {
// Histogram the input positions
for (int binIdx = 0; binIdx < NUM_BINS; ++binIdx) {
bin_counts[binIdx] = 0;
}
for (int inIdx = 0; inIdx < num_in; ++inIdx) {
const int binIdx = (int)((in_pos[inIdx] / grid_size) * NUM_BINS);
++bin_counts[binIdx];
}
// Scan the histogram to get the bin pointers
bin_ptrs[0] = 0;
for (int binIdx = 0; binIdx < NUM_BINS; ++binIdx) {
bin_ptrs[binIdx + 1] = bin_ptrs[binIdx] + bin_counts[binIdx];
}
// Sort the inputs into the bins
for (int inIdx = 0; inIdx < num_in; ++inIdx) {
const int binIdx = (int)((in_pos[inIdx] / grid_size) * NUM_BINS);
const int newIdx = bin_ptrs[binIdx + 1] - bin_counts[binIdx];
--bin_counts[binIdx];
in_val_sorted[newIdx] = in_val[inIdx];
in_pos_sorted[newIdx] = in_pos[inIdx];
}
}
static void gpu_preprocess(float *in_val, float *in_pos,
float *in_val_sorted, float *in_pos_sorted,
int grid_size, int num_in, int *bin_counts,
int *bin_ptrs) {
const int numThreadsPerBlock = 512;
// Histogram the input positions
histogram<<<30, numThreadsPerBlock>>>(in_pos, bin_counts, num_in,
grid_size);
// Scan the histogram to get the bin pointers
if (NUM_BINS != 1024) {
FAIL("NUM_BINS must be 1024. Do not change.");
return;
}
scan<<<1, numThreadsPerBlock>>>(bin_counts, bin_ptrs);
// Sort the inputs into the bins
sort<<<30, numThreadsPerBlock>>>(in_val, in_pos, in_val_sorted,
in_pos_sorted, grid_size, num_in,
bin_counts, bin_ptrs);
}
template <Mode mode>
int eval(const int num_in, const int max, const int grid_size) {
const std::string mode_info = mode_name(mode);
const std::string conf_info =
std::string("[len:") + std::to_string(num_in) + "/max:" + std::to_string(max) + "/gridSize:" + std::to_string(grid_size) + "]";
// Initialize host variables
// ----------------------------------------------
// Variables
std::vector<float> in_val_h;
std::vector<float> in_pos_h;
float *in_val_d = nullptr;
float *in_pos_d = nullptr;
float *out_d = nullptr;
// Constants
const float cutoff = 3000.0f; // Cutoff distance for optimized computation
const float cutoff2 = cutoff * cutoff;
// Extras needed for input binning
std::vector<int> bin_counts_h;
std::vector<int> bin_ptrs_h;
std::vector<float> in_val_sorted_h;
std::vector<float> in_pos_sorted_h;
int *bin_counts_d = nullptr;
int *bin_ptrs_d = nullptr;
float *in_val_sorted_d = nullptr;
float *in_pos_sorted_d = nullptr;
in_val_h = generate_input(num_in, max);
in_pos_h = generate_input(num_in, grid_size);
std::vector<float> out_h(grid_size);
std::fill_n(out_h.begin(), grid_size, 0.0f);
INFO("Running " << mode_info << conf_info);
// CPU Preprocessing
// ------------------------------------------------------
if (mode == Mode::GPUBinnedCPUPreprocessing) {
timer_start("Allocating data for preprocessing");
// Data structures needed to preprocess the bins on the CPU
bin_counts_h.reserve(NUM_BINS);
bin_ptrs_h.reserve(NUM_BINS + 1);
in_val_sorted_h.reserve(num_in);
in_pos_sorted_h.reserve(num_in);
cpu_preprocess(in_val_h.data(), in_pos_h.data(), in_val_sorted_h.data(), in_pos_sorted_h.data(), grid_size, num_in, bin_counts_h.data(),
bin_ptrs_h.data());
timer_stop();
}
// Allocate device variables
// ----------------------------------------------
if (mode != Mode::CPUNormal) {
timer_start("Allocating data");
// If preprocessing on the CPU, GPU doesn't need the unsorted arrays
if (mode != Mode::GPUBinnedCPUPreprocessing) {
THROW_IF_ERROR(cudaMalloc((void **) &in_val_d, num_in * sizeof(float)));
THROW_IF_ERROR(cudaMalloc((void **) &in_pos_d, num_in * sizeof(float)));
}
// All modes need the output array
THROW_IF_ERROR(cudaMalloc((void **) &out_d, grid_size * sizeof(float)));
// Only binning modes need binning information
if (mode == Mode::GPUBinnedCPUPreprocessing || mode == Mode::GPUBinnedGPUPreprocessing) {
THROW_IF_ERROR(cudaMalloc((void **) &in_val_sorted_d, num_in * sizeof(float)));
THROW_IF_ERROR(cudaMalloc((void **) &in_pos_sorted_d, num_in * sizeof(float)));
THROW_IF_ERROR(cudaMalloc((void **) &bin_ptrs_d, (NUM_BINS + 1) * sizeof(int)));
if (mode == Mode::GPUBinnedGPUPreprocessing) {
// Only used in preprocessing but not the actual computation
THROW_IF_ERROR(cudaMalloc((void **) &bin_counts_d, NUM_BINS * sizeof(int)));
}
}
cudaDeviceSynchronize();
timer_stop();
}
// Copy host variables to device
// ------------------------------------------
if (mode != Mode::CPUNormal) {
timer_start("Copying data");
// If preprocessing on the CPU, GPU doesn't need the unsorted arrays
if (mode != Mode::GPUBinnedCPUPreprocessing) {
THROW_IF_ERROR(cudaMemcpy(in_val_d, in_val_h.data(), num_in * sizeof(float), cudaMemcpyHostToDevice));
THROW_IF_ERROR(cudaMemcpy(in_pos_d, in_pos_h.data(), num_in * sizeof(float), cudaMemcpyHostToDevice));
}
// All modes need the output array
THROW_IF_ERROR(cudaMemset(out_d, 0, grid_size * sizeof(float)));
if (mode == Mode::GPUBinnedCPUPreprocessing) {
THROW_IF_ERROR(cudaMemcpy(in_val_sorted_d, in_val_sorted_h.data(), num_in * sizeof(float), cudaMemcpyHostToDevice));
THROW_IF_ERROR(cudaMemcpy(in_pos_sorted_d, in_pos_sorted_h.data(), num_in * sizeof(float), cudaMemcpyHostToDevice));
THROW_IF_ERROR(cudaMemcpy(bin_ptrs_d, bin_ptrs_h.data(), (NUM_BINS + 1) * sizeof(int), cudaMemcpyHostToDevice));
} else if (mode == Mode::GPUBinnedGPUPreprocessing) {
// If preprocessing on the GPU, bin counts need to be initialized
// and nothing needs to be copied
THROW_IF_ERROR(cudaMemset(bin_counts_d, 0, NUM_BINS * sizeof(int)));
}
THROW_IF_ERROR(cudaDeviceSynchronize());
timer_stop();
}
// GPU Preprocessing
// ------------------------------------------------------
if (mode == Mode::GPUBinnedGPUPreprocessing) {
timer_start("Preprocessing data on the GPU...");
gpu_preprocess(in_val_d, in_pos_d, in_val_sorted_d, in_pos_sorted_d, grid_size, num_in, bin_counts_d, bin_ptrs_d);
THROW_IF_ERROR(cudaDeviceSynchronize());
timer_stop();
}
// Launch kernel
// ----------------------------------------------------------
timer_start(std::string("Performing ") + mode_info + conf_info + std::string(" computation"));
switch (mode) {
case Mode::CPUNormal:
cpu_normal(in_val_h.data(), in_pos_h.data(), out_h.data(), grid_size, num_in);
break;
case Mode::GPUNormal:
gpu_normal(in_val_d, in_pos_d, out_d, grid_size, num_in);
break;
case Mode::GPUCutoff:
gpu_cutoff(in_val_d, in_pos_d, out_d, grid_size, num_in, cutoff2);
break;
case Mode::GPUBinnedCPUPreprocessing:
case Mode::GPUBinnedGPUPreprocessing:
gpu_cutoff_binned(bin_ptrs_d, in_val_sorted_d, in_pos_sorted_d, out_d, grid_size, cutoff2);
break;
default:
FAIL("Invalid mode " << (int) mode);
}
THROW_IF_ERROR(cudaDeviceSynchronize());
timer_stop();
// Copy device variables from host
// ----------------------------------------
if (mode != Mode::CPUNormal) {
THROW_IF_ERROR(cudaMemcpy(out_h.data(), out_d, grid_size * sizeof(float), cudaMemcpyDeviceToHost));
THROW_IF_ERROR(cudaDeviceSynchronize());
}
// Verify correctness
// -----------------------------------------------------
const auto actual_output = compute_output(in_val_h, in_pos_h, num_in, grid_size);
verify(actual_output, out_h);
// Free memory
// ------------------------------------------------------------
if (mode != Mode::CPUNormal) {
if (mode != Mode::GPUBinnedCPUPreprocessing) {
cudaFree(in_val_d);
cudaFree(in_pos_d);
}
cudaFree(out_d);
if (mode == Mode::GPUBinnedCPUPreprocessing || mode == Mode::GPUBinnedGPUPreprocessing) {
cudaFree(in_val_sorted_d);
cudaFree(in_pos_sorted_d);
cudaFree(bin_ptrs_d);
if (mode == Mode::GPUBinnedGPUPreprocessing) {
cudaFree(bin_counts_d);
}
}
}
std::cout << "----------------------------------------\n";
return 0;
}
TEST_CASE("CPUNormal", "[cpu_normal]") {
SECTION("[len:60/max:1/gridSize:60]") {
eval<Mode::CPUNormal>(60, 1, 60);
}
SECTION("[len:600/max:1/gridSize:100]") {
eval<Mode::CPUNormal>(600, 1, 100);
}
SECTION("[len:603/max:1/gridSize:201]") {
eval<Mode::CPUNormal>(603, 1, 201);
}
SECTION("[len:409/max:1/gridSize:160]") {
eval<Mode::CPUNormal>(409, 1, 160);
}
SECTION("[len:419/max:1/gridSize:100]") {
eval<Mode::CPUNormal>(419, 1, 100);
}
SECTION("[len:8065/max:1/gridSize:201]") {
eval<Mode::CPUNormal>(8065, 1, 201);
}
SECTION("[len:1440/max:1/gridSize:443]") {
eval<Mode::CPUNormal>(1440, 1, 443);
}
SECTION("[len:400/max:1/gridSize:200]") {
eval<Mode::CPUNormal>(400, 1, 200);
}
SECTION("[len:696/max:1/gridSize:232]") {
eval<Mode::CPUNormal>(696, 1, 232);
}
}
TEST_CASE("GPUNormal", "[gpu_normal]") {
SECTION("[len:60/max:1/gridSize:60]") {
eval<Mode::GPUNormal>(60, 1, 60);
}
SECTION("[len:600/max:1/gridSize:100]") {
eval<Mode::GPUNormal>(600, 1, 100);
}
SECTION("[len:603/max:1/gridSize:201]") {
eval<Mode::GPUNormal>(603, 1, 201);
}
SECTION("[len:409/max:1/gridSize:160]") {
eval<Mode::GPUNormal>(409, 1, 160);
}
SECTION("[len:419/max:1/gridSize:100]") {
eval<Mode::GPUNormal>(419, 1, 100);
}
SECTION("[len:8065/max:1/gridSize:201]") {
eval<Mode::GPUNormal>(8065, 1, 201);
}
SECTION("[len:1440/max:1/gridSize:443]") {
eval<Mode::GPUNormal>(1440, 1, 443);
}
SECTION("[len:400/max:1/gridSize:200]") {
eval<Mode::GPUNormal>(400, 1, 200);
}
SECTION("[len:696/max:1/gridSize:232]") {
eval<Mode::GPUNormal>(696, 1, 232);
}
}
TEST_CASE("GPUCutoff", "[gpu_cutoff]") {
SECTION("[len:60/max:1/gridSize:60]") {
eval<Mode::GPUCutoff>(60, 1, 60);
}
SECTION("[len:600/max:1/gridSize:100]") {
eval<Mode::GPUCutoff>(600, 1, 100);
}
SECTION("[len:603/max:1/gridSize:201]") {
eval<Mode::GPUCutoff>(603, 1, 201);
}
SECTION("[len:409/max:1/gridSize:160]") {
eval<Mode::GPUCutoff>(409, 1, 160);
}
SECTION("[len:419/max:1/gridSize:100]") {
eval<Mode::GPUCutoff>(419, 1, 100);
}
SECTION("[len:8065/max:1/gridSize:201]") {
eval<Mode::GPUCutoff>(8065, 1, 201);
}
SECTION("[len:1440/max:1/gridSize:443]") {
eval<Mode::GPUCutoff>(1440, 1, 443);
}
SECTION("[len:400/max:1/gridSize:200]") {
eval<Mode::GPUCutoff>(400, 1, 200);
}
SECTION("[len:696/max:1/gridSize:232]") {
eval<Mode::GPUCutoff>(696, 1, 232);
}
}
TEST_CASE("GPUBinnedCPUPreprocessing", "[gpu_binned_cpu_preprocessing]") {
SECTION("[len:60/max:1/gridSize:60]") {
eval<Mode::GPUBinnedCPUPreprocessing>(60, 1, 60);
}
SECTION("[len:600/max:1/gridSize:100]") {
eval<Mode::GPUBinnedCPUPreprocessing>(600, 1, 100);
}
SECTION("[len:603/max:1/gridSize:201]") {
eval<Mode::GPUBinnedCPUPreprocessing>(603, 1, 201);
}
SECTION("[len:409/max:1/gridSize:160]") {
eval<Mode::GPUBinnedCPUPreprocessing>(409, 1, 160);
}
SECTION("[len:419/max:1/gridSize:100]") {
eval<Mode::GPUBinnedCPUPreprocessing>(419, 1, 100);
}
SECTION("[len:8065/max:1/gridSize:201]") {
eval<Mode::GPUBinnedCPUPreprocessing>(8065, 1, 201);
}
SECTION("[len:1440/max:1/gridSize:443]") {
eval<Mode::GPUBinnedCPUPreprocessing>(1440, 1, 443);
}
SECTION("[len:400/max:1/gridSize:200]") {
eval<Mode::GPUBinnedCPUPreprocessing>(400, 1, 200);
}
SECTION("[len:696/max:1/gridSize:232]") {
eval<Mode::GPUBinnedCPUPreprocessing>(696, 1, 232);
}
}
TEST_CASE("GPUBinnedGPUPreprocessing", "[gpu_binned_gpu_preprocessing]") {
SECTION("[len:60/max:1/gridSize:60]") {
eval<Mode::GPUBinnedGPUPreprocessing>(60, 1, 60);
}
SECTION("[len:600/max:1/gridSize:100]") {
eval<Mode::GPUBinnedGPUPreprocessing>(600, 1, 100);
}
SECTION("[len:603/max:1/gridSize:201]") {
eval<Mode::GPUBinnedGPUPreprocessing>(603, 1, 201);
}
SECTION("[len:409/max:1/gridSize:160]") {
eval<Mode::GPUBinnedGPUPreprocessing>(409, 1, 160);
}
SECTION("[len:419/max:1/gridSize:100]") {
eval<Mode::GPUBinnedGPUPreprocessing>(419, 1, 100);
}
SECTION("[len:8065/max:1/gridSize:201]") {
eval<Mode::GPUBinnedGPUPreprocessing>(8065, 1, 201);
}
SECTION("[len:1440/max:1/gridSize:443]") {
eval<Mode::GPUBinnedGPUPreprocessing>(1440, 1, 443);
}
SECTION("[len:400/max:1/gridSize:200]") {
eval<Mode::GPUBinnedGPUPreprocessing>(400, 1, 200);
}
SECTION("[len:696/max:1/gridSize:232]") {
eval<Mode::GPUBinnedGPUPreprocessing>(696, 1, 232);
}
}
|
4b577073d2b586d0633fa831d73a696b1cc9e378.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright 2011 Russel Steinbach, Jeffrey Blanchard, Bradley Gordon,
* and Toluwaloju Alabi
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* NOTE: The following algorithms depend upon a modifed version of Merrill's
* Radix Sort algorithm. Parts of some functions are identical to that in
* in file thrust/detail/device/cuda/detail/stable_radix_sort_merrill.inl
*/
#include "RadixSelect/RadixSelect_api.h"
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hip/hip_runtime_api.h>
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
//Include various thrust items that are used
#include "thrust/detail/util/align.h"
#include "inplaceRadixSelect.cu"
#define RADIX_CUTOFF 1 <<21
namespace RadixSelect
{
template<typename T>
void postProcess(uint *result ){}
template<>
void postProcess<float>(uint *result){
unsigned int mask = (result[0] & 0x80000000) ? 0x80000000 : 0xffffffff ;
result[0] ^= mask;
}
template<typename T>
void postProcess(unsigned long long *result){}
template<>
void postProcess<double>(unsigned long long *result){
const unsigned long long mask = (result[0] & 0x8000000000000000) ? 0x8000000000000000 : 0xffffffffffffffff;
result[0] ^= mask;
}
template<typename RandomAccessIterator, typename T>
void merrillSelect(RandomAccessIterator first,
RandomAccessIterator last,
uint k,uint pass,T *result, uint needToPreprocess, RandomAccessIterator temp_keys){
uint num_elements = last - first;
if (!thrust::detail::util::is_aligned(thrust::raw_pointer_cast(&*first), 2*sizeof(T)))
{
hipMemcpy(thrust::raw_pointer_cast(&*temp_keys),thrust::raw_pointer_cast(&*first), num_elements * sizeof(T), hipMemcpyDeviceToDevice);
merrillSelect(temp_keys,temp_keys + num_elements,k, pass,result,needToPreprocess,&*first);
return;
}
RadixSelect::RadixSortingEnactor<T> sorter(num_elements);
RadixSelect::RadixSortStorage<T> storage(k,num_elements,needToPreprocess);
// allocate temporary buffers
thrust::detail::raw_cuda_device_buffer<int> temp_spine(sorter.SpineElements());
thrust::detail::raw_cuda_device_buffer<bool> temp_from_alt(2);
//copy the pointers to storage
storage.d_keys = thrust::raw_pointer_cast(&*first);
storage.d_alt_keys = thrust::raw_pointer_cast(&*temp_keys);
storage.d_spine = thrust::raw_pointer_cast(&temp_spine[0]);
storage.d_from_alt_storage = thrust::raw_pointer_cast(&temp_from_alt[0]);
uint retval = sorter.EnactSort(storage,pass);
//num_elements is now the number of elements in the new list we are interested in
num_elements = storage.h_useful[4];
//if there are stil more passes to go, and there is more than one element that could be
//the kth largest element call merrilSelect that will look at the next four bits
if(pass < ((sizeof(T) * 2) -1) && num_elements > 1){
//if the elements of the list were not redistributed then pass the inputs to this
//pass to the next pass, except increment pass by one.
if(retval){
merrillSelect(first,last, k, pass + 1,result, needToPreprocess,temp_keys);
}
//otherwise the new list we are interested in is in temp_keys after being scattered
//we calculate the new start and stop values by adding the new start index h_useful[2] to
//begining of temp_keys, the new value of k that we are looking for is
//in h_useful[7], Additionally since one pass has already been run we know the list has been preprocessed so we should not
//preprocess again.
else{
merrillSelect(temp_keys + storage.h_useful[2], temp_keys + storage.h_useful[2] + num_elements, storage.h_useful[7], pass + 1,result,0,first);
}
}
//if we do not need to do another pass then we just copy the result back to the cpu, and call the postprocess function
else{
//if the size has not changed then we know the results will still be in the input, so grab the value from there
if(retval){
hipMemcpy(result,thrust::raw_pointer_cast(&*first), 1 * sizeof(T), hipMemcpyDeviceToHost);
if(! needToPreprocess){
postProcess<T>((uint*)result);
}
return;
}
//otherwise we grab the value from temp_keys since they have been scattered there.
else{
hipMemcpy(result,thrust::raw_pointer_cast(&*temp_keys)+storage.h_useful[2], 1 * sizeof(T), hipMemcpyDeviceToHost);
postProcess<T>((uint *)result);
return;
}
}
}
template<typename RandomAccessIterator>
void merrillSelect(RandomAccessIterator first,
RandomAccessIterator last,
uint k,uint pass,double *result, uint needToPreprocess,RandomAccessIterator temp_keys){
uint num_elements = last - first;
typedef typename thrust::iterator_value<RandomAccessIterator>::type K;
if (!thrust::detail::util::is_aligned(thrust::raw_pointer_cast(&*first), 2*sizeof(K)))
{
hipMemcpy(thrust::raw_pointer_cast(&*temp_keys),thrust::raw_pointer_cast(&*first), num_elements * sizeof(double), hipMemcpyDeviceToDevice);
merrillSelect(temp_keys,temp_keys + num_elements,k, pass,result,needToPreprocess,&*first);
return;
}
RadixSelect::RadixSortingEnactor<K> sorter(num_elements);
RadixSelect::RadixSortStorage<K> storage(k,num_elements,needToPreprocess);
// allocate temporary buffers
thrust::detail::raw_cuda_device_buffer<int> temp_spine(sorter.SpineElements());
thrust::detail::raw_cuda_device_buffer<bool> temp_from_alt(2);
//copy the pointers to storage
storage.d_keys = thrust::raw_pointer_cast(&*first);
storage.d_alt_keys = thrust::raw_pointer_cast(&*temp_keys);
storage.d_spine = thrust::raw_pointer_cast(&temp_spine[0]);
storage.d_from_alt_storage = thrust::raw_pointer_cast(&temp_from_alt[0]);
uint retval = sorter.EnactSort(storage,pass);
//num_elements is now the number of elements in the new list we are interested in
num_elements = storage.h_useful[4];
//if there are stil more passes to go, and there is more than one element that could be
//the kth largest element call merrilSelect that will look at the next four bits
if(pass < 15 && num_elements > 1){
//if the elements of the list were not redistributed then pass the inputs to this
//pass to the next pass, except increment pass by one.
if(retval){
merrillSelect(first,last, k, pass + 1,result, needToPreprocess, temp_keys);
}
//otherwise the new list we are interested in is in temp_keys after being scattered
//we calculate the new start and stop values by adding the new start index h_useful[2] to
//begining of temp_keys, the new value of k that we are looking for is
//in h_useful[7], Additionally since one pass has already been run we know the list has been preprocessed so we should not
//preprocess again.
else{
merrillSelect(temp_keys + storage.h_useful[2], temp_keys + storage.h_useful[2] + num_elements, storage.h_useful[7], pass + 1,result,0,first);
}
}
//if we do not need to do another pass then we just copy the result back to the cpu, and call the postprocess function
else{
//if the size has not changed then we know the results will still be in the input, so grab the value from there
if(retval){
hipMemcpy(result,thrust::raw_pointer_cast(&*first), 1 * sizeof(double), hipMemcpyDeviceToHost);
if(! needToPreprocess){
postProcess<double>((unsigned long long*)result);
}
return;
}
//otherwise we grab the value from temp_keys since they have been scattered there.
else{
hipMemcpy(result,thrust::raw_pointer_cast(&*temp_keys)+storage.h_useful[2], 1 * sizeof(double), hipMemcpyDeviceToHost);
postProcess<double>((unsigned long long *)result);
return;
}
}
}
uint RadixSelectWrapper(uint* d_vec,uint size, uint k){
uint result;
uint *temp_keys;
thrust::device_ptr<uint> dev_ptr(d_vec);
if(size < (1 << 21)){
result = InplaceRadix::inplaceRadixSelectWrapper(d_vec, size,k);
}
else{
hipMalloc(&temp_keys, size * sizeof(uint));
thrust::device_ptr<uint> dev_temp_ptr(temp_keys);
merrillSelect(dev_ptr, dev_ptr + size,k, 0, &result,1,dev_temp_ptr);
hipFree(temp_keys);
}
return result;
}
float RadixSelectWrapper(float* d_vec,uint size, uint k){
float result;
float *temp_keys;
if(size < (1 << 21)){
result = InplaceRadix::inplaceRadixSelectWrapper(d_vec, size,k);
}
else{
hipMalloc(&temp_keys, size * sizeof(float));
thrust::device_ptr<float> dev_ptr(d_vec);
thrust::device_ptr<float> dev_temp_ptr(temp_keys);
merrillSelect(dev_ptr, dev_ptr + size,k, 0, &result,1,dev_temp_ptr);
hipFree(temp_keys);
}
return result;
}
double RadixSelectWrapper(double* d_vec,uint size, uint k){
double result;
double *temp_keys;
if(size < (1 << 21)){
result = InplaceRadix::inplaceRadixSelectWrapper(d_vec, size,k);
}
else{
hipMalloc(&temp_keys, size * sizeof(double));
thrust::device_ptr<double> dev_ptr(d_vec);
thrust::device_ptr<double> dev_temp_ptr(temp_keys);
merrillSelect(dev_ptr, dev_ptr + size,k, 0, &result,1,dev_temp_ptr);
hipFree(temp_keys);
}
return result;
}
}
| 4b577073d2b586d0633fa831d73a696b1cc9e378.cu | /* Copyright 2011 Russel Steinbach, Jeffrey Blanchard, Bradley Gordon,
* and Toluwaloju Alabi
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* NOTE: The following algorithms depend upon a modifed version of Merrill's
* Radix Sort algorithm. Parts of some functions are identical to that in
* in file thrust/detail/device/cuda/detail/stable_radix_sort_merrill.inl
*/
#include "RadixSelect/RadixSelect_api.h"
#include <cuda.h>
#include <curand.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
//Include various thrust items that are used
#include "thrust/detail/util/align.h"
#include "inplaceRadixSelect.cu"
#define RADIX_CUTOFF 1 <<21
namespace RadixSelect
{
template<typename T>
void postProcess(uint *result ){}
template<>
void postProcess<float>(uint *result){
unsigned int mask = (result[0] & 0x80000000) ? 0x80000000 : 0xffffffff ;
result[0] ^= mask;
}
template<typename T>
void postProcess(unsigned long long *result){}
template<>
void postProcess<double>(unsigned long long *result){
const unsigned long long mask = (result[0] & 0x8000000000000000) ? 0x8000000000000000 : 0xffffffffffffffff;
result[0] ^= mask;
}
template<typename RandomAccessIterator, typename T>
void merrillSelect(RandomAccessIterator first,
RandomAccessIterator last,
uint k,uint pass,T *result, uint needToPreprocess, RandomAccessIterator temp_keys){
uint num_elements = last - first;
if (!thrust::detail::util::is_aligned(thrust::raw_pointer_cast(&*first), 2*sizeof(T)))
{
cudaMemcpy(thrust::raw_pointer_cast(&*temp_keys),thrust::raw_pointer_cast(&*first), num_elements * sizeof(T), cudaMemcpyDeviceToDevice);
merrillSelect(temp_keys,temp_keys + num_elements,k, pass,result,needToPreprocess,&*first);
return;
}
RadixSelect::RadixSortingEnactor<T> sorter(num_elements);
RadixSelect::RadixSortStorage<T> storage(k,num_elements,needToPreprocess);
// allocate temporary buffers
thrust::detail::raw_cuda_device_buffer<int> temp_spine(sorter.SpineElements());
thrust::detail::raw_cuda_device_buffer<bool> temp_from_alt(2);
//copy the pointers to storage
storage.d_keys = thrust::raw_pointer_cast(&*first);
storage.d_alt_keys = thrust::raw_pointer_cast(&*temp_keys);
storage.d_spine = thrust::raw_pointer_cast(&temp_spine[0]);
storage.d_from_alt_storage = thrust::raw_pointer_cast(&temp_from_alt[0]);
uint retval = sorter.EnactSort(storage,pass);
//num_elements is now the number of elements in the new list we are interested in
num_elements = storage.h_useful[4];
//if there are stil more passes to go, and there is more than one element that could be
//the kth largest element call merrilSelect that will look at the next four bits
if(pass < ((sizeof(T) * 2) -1) && num_elements > 1){
//if the elements of the list were not redistributed then pass the inputs to this
//pass to the next pass, except increment pass by one.
if(retval){
merrillSelect(first,last, k, pass + 1,result, needToPreprocess,temp_keys);
}
//otherwise the new list we are interested in is in temp_keys after being scattered
//we calculate the new start and stop values by adding the new start index h_useful[2] to
//begining of temp_keys, the new value of k that we are looking for is
//in h_useful[7], Additionally since one pass has already been run we know the list has been preprocessed so we should not
//preprocess again.
else{
merrillSelect(temp_keys + storage.h_useful[2], temp_keys + storage.h_useful[2] + num_elements, storage.h_useful[7], pass + 1,result,0,first);
}
}
//if we do not need to do another pass then we just copy the result back to the cpu, and call the postprocess function
else{
//if the size has not changed then we know the results will still be in the input, so grab the value from there
if(retval){
cudaMemcpy(result,thrust::raw_pointer_cast(&*first), 1 * sizeof(T), cudaMemcpyDeviceToHost);
if(! needToPreprocess){
postProcess<T>((uint*)result);
}
return;
}
//otherwise we grab the value from temp_keys since they have been scattered there.
else{
cudaMemcpy(result,thrust::raw_pointer_cast(&*temp_keys)+storage.h_useful[2], 1 * sizeof(T), cudaMemcpyDeviceToHost);
postProcess<T>((uint *)result);
return;
}
}
}
template<typename RandomAccessIterator>
void merrillSelect(RandomAccessIterator first,
RandomAccessIterator last,
uint k,uint pass,double *result, uint needToPreprocess,RandomAccessIterator temp_keys){
uint num_elements = last - first;
typedef typename thrust::iterator_value<RandomAccessIterator>::type K;
if (!thrust::detail::util::is_aligned(thrust::raw_pointer_cast(&*first), 2*sizeof(K)))
{
cudaMemcpy(thrust::raw_pointer_cast(&*temp_keys),thrust::raw_pointer_cast(&*first), num_elements * sizeof(double), cudaMemcpyDeviceToDevice);
merrillSelect(temp_keys,temp_keys + num_elements,k, pass,result,needToPreprocess,&*first);
return;
}
RadixSelect::RadixSortingEnactor<K> sorter(num_elements);
RadixSelect::RadixSortStorage<K> storage(k,num_elements,needToPreprocess);
// allocate temporary buffers
thrust::detail::raw_cuda_device_buffer<int> temp_spine(sorter.SpineElements());
thrust::detail::raw_cuda_device_buffer<bool> temp_from_alt(2);
//copy the pointers to storage
storage.d_keys = thrust::raw_pointer_cast(&*first);
storage.d_alt_keys = thrust::raw_pointer_cast(&*temp_keys);
storage.d_spine = thrust::raw_pointer_cast(&temp_spine[0]);
storage.d_from_alt_storage = thrust::raw_pointer_cast(&temp_from_alt[0]);
uint retval = sorter.EnactSort(storage,pass);
//num_elements is now the number of elements in the new list we are interested in
num_elements = storage.h_useful[4];
//if there are stil more passes to go, and there is more than one element that could be
//the kth largest element call merrilSelect that will look at the next four bits
if(pass < 15 && num_elements > 1){
//if the elements of the list were not redistributed then pass the inputs to this
//pass to the next pass, except increment pass by one.
if(retval){
merrillSelect(first,last, k, pass + 1,result, needToPreprocess, temp_keys);
}
//otherwise the new list we are interested in is in temp_keys after being scattered
//we calculate the new start and stop values by adding the new start index h_useful[2] to
//begining of temp_keys, the new value of k that we are looking for is
//in h_useful[7], Additionally since one pass has already been run we know the list has been preprocessed so we should not
//preprocess again.
else{
merrillSelect(temp_keys + storage.h_useful[2], temp_keys + storage.h_useful[2] + num_elements, storage.h_useful[7], pass + 1,result,0,first);
}
}
//if we do not need to do another pass then we just copy the result back to the cpu, and call the postprocess function
else{
//if the size has not changed then we know the results will still be in the input, so grab the value from there
if(retval){
cudaMemcpy(result,thrust::raw_pointer_cast(&*first), 1 * sizeof(double), cudaMemcpyDeviceToHost);
if(! needToPreprocess){
postProcess<double>((unsigned long long*)result);
}
return;
}
//otherwise we grab the value from temp_keys since they have been scattered there.
else{
cudaMemcpy(result,thrust::raw_pointer_cast(&*temp_keys)+storage.h_useful[2], 1 * sizeof(double), cudaMemcpyDeviceToHost);
postProcess<double>((unsigned long long *)result);
return;
}
}
}
uint RadixSelectWrapper(uint* d_vec,uint size, uint k){
uint result;
uint *temp_keys;
thrust::device_ptr<uint> dev_ptr(d_vec);
if(size < (1 << 21)){
result = InplaceRadix::inplaceRadixSelectWrapper(d_vec, size,k);
}
else{
cudaMalloc(&temp_keys, size * sizeof(uint));
thrust::device_ptr<uint> dev_temp_ptr(temp_keys);
merrillSelect(dev_ptr, dev_ptr + size,k, 0, &result,1,dev_temp_ptr);
cudaFree(temp_keys);
}
return result;
}
float RadixSelectWrapper(float* d_vec,uint size, uint k){
float result;
float *temp_keys;
if(size < (1 << 21)){
result = InplaceRadix::inplaceRadixSelectWrapper(d_vec, size,k);
}
else{
cudaMalloc(&temp_keys, size * sizeof(float));
thrust::device_ptr<float> dev_ptr(d_vec);
thrust::device_ptr<float> dev_temp_ptr(temp_keys);
merrillSelect(dev_ptr, dev_ptr + size,k, 0, &result,1,dev_temp_ptr);
cudaFree(temp_keys);
}
return result;
}
double RadixSelectWrapper(double* d_vec,uint size, uint k){
double result;
double *temp_keys;
if(size < (1 << 21)){
result = InplaceRadix::inplaceRadixSelectWrapper(d_vec, size,k);
}
else{
cudaMalloc(&temp_keys, size * sizeof(double));
thrust::device_ptr<double> dev_ptr(d_vec);
thrust::device_ptr<double> dev_temp_ptr(temp_keys);
merrillSelect(dev_ptr, dev_ptr + size,k, 0, &result,1,dev_temp_ptr);
cudaFree(temp_keys);
}
return result;
}
}
|
f37e09a8652ec59977dd9e5cda71402e0d571fbc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
//#define DEPTH 2
// dp - cost aggregation array
// cost_image - m x n x D array
// d - use every d channels of input to conserve register memory
// m - image rows
// n - image columns
// D - depth
// depth_stride - pitch along depth dimension
// row_stride - pitch along row dimension
__device__ float dp_criteria(float *dp, int ind, int depth_dim_size, int d, float P_one, float P_two, float * d_zero, float * d_one, float * d_two, float * d_three){
*d_zero = dp[ind];
if (d > 0)
*d_one = dp[ind - depth_dim_size] + P_one;
else
*d_one = 10000000;
if (d < D-1)
*d_two = dp[ind + depth_dim_size] + P_one;
else
*d_two = 10000000;
return fminf(fminf(*d_zero, *d_one), fminf(*d_two, *d_three)) - *d_three + P_two;
}
__global__ void __diagonal_tr_bl_aggregate(float *dp, float *cost_image, int m, int n)
{
// which column of array to work on
// thread with blockIdx.x == 0 and threadIdx.x == 0 will start at column n-2 (and aggregate
// using data from columns n-1)
int start_col = (n - 2) - (blockDim.x * blockIdx.x) - threadIdx.x;
int depth_dim_size = m*n;
// todo: maybe it will work better to take running average of every d
// slices
while(start_col >= 0)
{
int col = start_col;
for (int row = 1; row < m; row++)
{
//int arr_ind = 0;
float prev_min = 100000000.0;
int ind = (row - 1) * n + col + 1;
// calculate min cost disparity for this column from row-1
//#pragma unroll
for (int depth = 0; depth < D; depth+=D_STEP){
prev_min = fminf(dp[ind], prev_min);
ind += (depth_dim_size * D_STEP);
}
float d0 = 0;
float d1 = 0;
float d2 = 0;
float d3 = prev_min + (float) P2;
ind = (row - 1) * n + col + 1;
int current_ind = row * n + col;
// todo: try having this loop go from 1 to d-1 and removing the if else
for (int d = 0; d < D; d+=D_STEP){
// for each d I need dp[{d-1, d, d+1}, row-1, col],
dp[current_ind] += cost_image[current_ind] + dp_criteria(dp, ind, depth_dim_size, d, (float) P1, (float) P2, &d0, &d1, &d2, &d3);
ind += (depth_dim_size * D_STEP);
current_ind += (depth_dim_size * D_STEP);
}
col -= 1;
if (col < 0) // wrap each thread around once it gets to the last column
col = n-2;
}
start_col -= blockDim.x;
}
} | f37e09a8652ec59977dd9e5cda71402e0d571fbc.cu | #include "includes.h"
//#define DEPTH 2
// dp - cost aggregation array
// cost_image - m x n x D array
// d - use every d channels of input to conserve register memory
// m - image rows
// n - image columns
// D - depth
// depth_stride - pitch along depth dimension
// row_stride - pitch along row dimension
__device__ float dp_criteria(float *dp, int ind, int depth_dim_size, int d, float P_one, float P_two, float * d_zero, float * d_one, float * d_two, float * d_three){
*d_zero = dp[ind];
if (d > 0)
*d_one = dp[ind - depth_dim_size] + P_one;
else
*d_one = 10000000;
if (d < D-1)
*d_two = dp[ind + depth_dim_size] + P_one;
else
*d_two = 10000000;
return fminf(fminf(*d_zero, *d_one), fminf(*d_two, *d_three)) - *d_three + P_two;
}
__global__ void __diagonal_tr_bl_aggregate(float *dp, float *cost_image, int m, int n)
{
// which column of array to work on
// thread with blockIdx.x == 0 and threadIdx.x == 0 will start at column n-2 (and aggregate
// using data from columns n-1)
int start_col = (n - 2) - (blockDim.x * blockIdx.x) - threadIdx.x;
int depth_dim_size = m*n;
// todo: maybe it will work better to take running average of every d
// slices
while(start_col >= 0)
{
int col = start_col;
for (int row = 1; row < m; row++)
{
//int arr_ind = 0;
float prev_min = 100000000.0;
int ind = (row - 1) * n + col + 1;
// calculate min cost disparity for this column from row-1
//#pragma unroll
for (int depth = 0; depth < D; depth+=D_STEP){
prev_min = fminf(dp[ind], prev_min);
ind += (depth_dim_size * D_STEP);
}
float d0 = 0;
float d1 = 0;
float d2 = 0;
float d3 = prev_min + (float) P2;
ind = (row - 1) * n + col + 1;
int current_ind = row * n + col;
// todo: try having this loop go from 1 to d-1 and removing the if else
for (int d = 0; d < D; d+=D_STEP){
// for each d I need dp[{d-1, d, d+1}, row-1, col],
dp[current_ind] += cost_image[current_ind] + dp_criteria(dp, ind, depth_dim_size, d, (float) P1, (float) P2, &d0, &d1, &d2, &d3);
ind += (depth_dim_size * D_STEP);
current_ind += (depth_dim_size * D_STEP);
}
col -= 1;
if (col < 0) // wrap each thread around once it gets to the last column
col = n-2;
}
start_col -= blockDim.x;
}
} |
edf43971717f59a3839a0f6999aea0307f8cfdce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void addTwo(mint * A, mint length) {
int index = threadIdx.x + blockIdx.x*blockDim.x;
if (index < length)
A[index] += 2;
}
| edf43971717f59a3839a0f6999aea0307f8cfdce.cu | __global__ void addTwo(mint * A, mint length) {
int index = threadIdx.x + blockIdx.x*blockDim.x;
if (index < length)
A[index] += 2;
}
|
14134aab28a19688fb6293691268b25053b8610b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "hash_map_template.h"
using namespace CUDASTL;
__global__ void foo(HashMap<int,int> * h){
int i=get_thread_id();
// int i=threadIdx.x;
(*h)[i]=i*10;
}
__global__ void bar(HashMap<int,int> * h, int * arr){
// int i=get_thread_id();
// arr[i]=(*h)[i];
for(HashMap<int,int>::Iterator it=h->begin();it!=h->end();++it){
arr[it->key]=it->value;
}
}
const int numBlocks=10;
const int numThreads=10;
const int N=numThreads*numBlocks;
int main(){
int * d_arr;
hipMalloc((void **)&d_arr,sizeof(int)*N);
HashMap<int,int> * h=CreateHashMap<int,int,HashFunc<int> >(79, 2000);
hipLaunchKernelGGL(( foo), dim3(numBlocks),dim3(numThreads), 0, 0, h);
hipDeviceSynchronize();
// bar<<<numBlocks,numThreads>>>(h,d_arr);
hipLaunchKernelGGL(( bar), dim3(1),dim3(1), 0, 0, h,d_arr);
int * h_arr=(int *)malloc(sizeof(int)*N);
hipMemcpy(h_arr,d_arr,sizeof(int)*N,hipMemcpyDeviceToHost);
int i;
for(i=90;i<100;i++){
printf("%d\t",h_arr[i]);
if(i%8==7)
printf("\n");
}
printf("\n");
return 0;
}
| 14134aab28a19688fb6293691268b25053b8610b.cu | #include <cuda.h>
#include <stdio.h>
#include "hash_map_template.h"
using namespace CUDASTL;
__global__ void foo(HashMap<int,int> * h){
int i=get_thread_id();
// int i=threadIdx.x;
(*h)[i]=i*10;
}
__global__ void bar(HashMap<int,int> * h, int * arr){
// int i=get_thread_id();
// arr[i]=(*h)[i];
for(HashMap<int,int>::Iterator it=h->begin();it!=h->end();++it){
arr[it->key]=it->value;
}
}
const int numBlocks=10;
const int numThreads=10;
const int N=numThreads*numBlocks;
int main(){
int * d_arr;
cudaMalloc((void **)&d_arr,sizeof(int)*N);
HashMap<int,int> * h=CreateHashMap<int,int,HashFunc<int> >(79, 2000);
foo<<<numBlocks,numThreads>>> (h);
cudaThreadSynchronize();
// bar<<<numBlocks,numThreads>>>(h,d_arr);
bar<<<1,1>>>(h,d_arr);
int * h_arr=(int *)malloc(sizeof(int)*N);
cudaMemcpy(h_arr,d_arr,sizeof(int)*N,cudaMemcpyDeviceToHost);
int i;
for(i=90;i<100;i++){
printf("%d\t",h_arr[i]);
if(i%8==7)
printf("\n");
}
printf("\n");
return 0;
}
|
18bba040603d7900cedaa9f83fa4d01401dbdbae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "PhotonProcess.h"
#include "ElectronProcess.h"
namespace vecphys {
inline namespace cuda {
__global__
void KernelPhotonProcess(Random_t* devStates,
CrossSectionData* table,
int nTrackSize,
GUTrack* itrack,
int* materialIndex)
{
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
PhotonProcess process(devStates,tid,table);
while (tid < nTrackSize) {
process.GetStepLengthAndProcess<ScalarBackend>(itrack[tid], materialIndex[tid]);
tid += blockDim.x * gridDim.x;
}
}
__global__
void KernelElectronProcess(Random_t* devStates,
CrossSectionData* table,
int nTrackSize,
GUTrack* itrack,
int* materialIndex)
{
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
ElectronProcess process(devStates,tid,table);
while (tid < nTrackSize) {
process.GetStepLengthAndProcess<ScalarBackend>(itrack[tid], materialIndex[tid]);
tid += blockDim.x * gridDim.x;
}
}
} // end namespace cuda
// Cuda wrapper
void CudaPhotonProcess(int blocksPerGrid,
int threadsPerBlock,
Random_t* devStates,
CrossSectionData* table,
int nTrackSize,
GUTrack* itrack,
int* targetElements)
{
hipLaunchKernelGGL(( vecphys::cuda::KernelPhotonProcess), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
devStates,table,nTrackSize,itrack,targetElements);
}
void CudaElectronProcess(int blocksPerGrid,
int threadsPerBlock,
Random_t* devStates,
CrossSectionData* table,
int nTrackSize,
GUTrack* itrack,
int* targetElements)
{
hipLaunchKernelGGL(( vecphys::cuda::KernelElectronProcess), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
devStates,table,nTrackSize,itrack,targetElements);
}
} // end namespace vecphys
| 18bba040603d7900cedaa9f83fa4d01401dbdbae.cu | #include "PhotonProcess.h"
#include "ElectronProcess.h"
namespace vecphys {
inline namespace cuda {
__global__
void KernelPhotonProcess(Random_t* devStates,
CrossSectionData* table,
int nTrackSize,
GUTrack* itrack,
int* materialIndex)
{
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
PhotonProcess process(devStates,tid,table);
while (tid < nTrackSize) {
process.GetStepLengthAndProcess<ScalarBackend>(itrack[tid], materialIndex[tid]);
tid += blockDim.x * gridDim.x;
}
}
__global__
void KernelElectronProcess(Random_t* devStates,
CrossSectionData* table,
int nTrackSize,
GUTrack* itrack,
int* materialIndex)
{
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
ElectronProcess process(devStates,tid,table);
while (tid < nTrackSize) {
process.GetStepLengthAndProcess<ScalarBackend>(itrack[tid], materialIndex[tid]);
tid += blockDim.x * gridDim.x;
}
}
} // end namespace cuda
// Cuda wrapper
void CudaPhotonProcess(int blocksPerGrid,
int threadsPerBlock,
Random_t* devStates,
CrossSectionData* table,
int nTrackSize,
GUTrack* itrack,
int* targetElements)
{
vecphys::cuda::KernelPhotonProcess<<<blocksPerGrid, threadsPerBlock>>>(
devStates,table,nTrackSize,itrack,targetElements);
}
void CudaElectronProcess(int blocksPerGrid,
int threadsPerBlock,
Random_t* devStates,
CrossSectionData* table,
int nTrackSize,
GUTrack* itrack,
int* targetElements)
{
vecphys::cuda::KernelElectronProcess<<<blocksPerGrid, threadsPerBlock>>>(
devStates,table,nTrackSize,itrack,targetElements);
}
} // end namespace vecphys
|
907459f772eddca55cff0311fd40503479ef5434.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/unique.h>
#include <thrust/count.h>
#include <thrust/fill.h>
#include <thrust/iterator/counting_iterator.h>
#include "cnpy.h"
#include <cmath>
#include <stdlib.h>
#include <fstream>
#include <chrono>
using namespace std::chrono;
#include <iostream>
#include <sstream>
long long int ncells;
/*
------ Declarations of utility functions from utils.h -------
*/
cnpy::NpyArray read_velocity_field_data( std::string file_path_name, int* n_elements);
void define_xs_or_ys(float* xs, float dx, float x0, int gsize);
void save_master_Coos_to_file(std::string op_FnamePfx,std::string op_FnamePfx_2, int num_actions,
thrust::host_vector<long long int> &H_master_cooS1,
thrust::host_vector<long long int> &H_master_cooS2,
thrust::host_vector<float> &H_master_cooVal,
thrust::host_vector<float> &H_master_R,
thrust::host_vector<long long int>* H_Aarr_of_cooS1,
thrust::host_vector<long long int>* H_Aarr_of_cooS2,
thrust::host_vector<float>* H_Aarr_of_cooProb,
thrust::host_vector<float>* H_Aarr_of_Rs,
thrust::host_vector<float> &prob_params,
long long int* DP_relv_params,
unsigned long int num_DP_params);
// template<typename dType> template not working for thrust vectors
void print_device_vector(thrust::device_vector<long long int> &array, int start_id, int end_id, std::string array_name, std::string end, int method);
void make_dir(std::string dir_name);
void populate_ac_angles(float* ac_angles, int num_ac_angles);
void populate_ac_speeds(float* ac_speeds, int num_ac_speeds, float Fmax);
void populate_actions(float** H_actions, int num_ac_speeds, int num_ac_angles, float Fmax);
/*
----- Device functions -----
*/
__device__ int32_t get_thread_idx(){
// assigns idx to thread with which it accesses the flattened 3d vxrzns matrix
// for a given T and a given action.
// runs for both 2d and 3d grid
// TODO: may have to change this considering cache locality
// here i, j, k refer to a general matrix M[i][j][k]
int32_t i = threadIdx.x;
int32_t j = blockIdx.y;
int32_t k = blockIdx.x;
int32_t idx = k + (j*gridDim.x) + (i*gridDim.x*gridDim.y)+ blockIdx.z*blockDim.x*gridDim.x*gridDim.y;
return idx;
}
__device__ long long int state1D_from_spid(int32_t T, int32_t sp_id, long long int ncells){
// j ~ blockIdx.x
// i ~ blockIdx.y
// The above three consitute a spatial state index from i and j of grid
// last term is for including time index as well.
// return value when full spatial grid was used
// return (blockIdx.x + (blockIdx.y*gridDim.x) + (T*gridDim.x*gridDim.y) );
// return value for chunks concept
return sp_id + (T*ncells);
}
__device__ long long int state1D_from_ij(int32_t* posid, int32_t T, int32_t gsize){
// posid = {i , j}
// state id = j + i*dim(i) + T*dim(i)*dim(j)
// return value when full spatial grid was used
// return (posid[1] + posid[0]*gridDim.x + (T*gridDim.x*gridDim.y) ) ;
// return value for chunks concept
return (posid[1] + posid[0]*gsize + (T*gsize*gsize)*1LL ) ;
}
__device__ int32_t get_rzn_id(){
return (blockIdx.z * blockDim.x) + threadIdx.x;
}
__device__ void get_posids_from_sp_id(long long int sp_id, int gsize, int32_t* posids){
posids[0] = sp_id/gsize;
posids[1] = sp_id%gsize;
return;
}
__device__ long long int get_sp_id(){
// sp_id: 1d spatial id ranging from 0 to ncells
int i = blockIdx.y;
int j = blockIdx.x;
long long int sp_id = j + (i*gridDim.x)*1LL;
return sp_id;
}
__device__ void get_posids_relS2_0(int32_t m, int32_t* posids_S1, int32_t* posids_relS2_0){
// m*m is size of neighbour grid
// returns i,j index of upper left corner of neighbour grid
int32_t i1 = posids_S1[0];
int32_t j1 = posids_S1[1];
int32_t del = (m-1)/2;
posids_relS2_0[0] = i1 - del;
posids_relS2_0[1] = j1 - del;
return;
}
__device__ long long int get_rel_sp_id2(int32_t m, int32_t* posids_S2, int32_t* posids_relS2_0){
// returns relative sp_id for S2 in neighbour grid
int32_t del_i = posids_S2[0] - posids_relS2_0[0]; // i2 - rel_i0
int32_t del_j = posids_S2[1] - posids_relS2_0[1]; // j2 - rel_j0
long long int rel_sp_id2;
// if S2 outstde neighbor grid, assign special value to rel_sp_id2
// this will help keep count of no. of S2s that lie outside neighb grid
// ideally there should be 0 such S2s
if (del_i < m && del_j < m)
rel_sp_id2 = del_j + (m*del_i);
else
rel_sp_id2 = m*m;
return rel_sp_id2;
}
__device__ long long int get_sp_id2_from_rel_sp_id2(int32_t m, int32_t gsize,
long long int rel_sp_id2, int32_t* posids_relS2_0){
// returns Sp_id2 from rel_sp_id2
long long int sp_id2;
int32_t del_i = rel_sp_id2/m;
int32_t del_j = rel_sp_id2%m;
int32_t i2 = del_i + posids_relS2_0[0];
int32_t j2 = del_j + posids_relS2_0[1];
sp_id2 = j2 + gsize*i2;
return sp_id2;
}
__device__ bool is_edge_state(int32_t i, int32_t j){
// n = gsize -1 that is the last index of the domain assuming square domain
int32_t n = gridDim.x - 1;
if (i == 0 || i == n || j == 0 || j == n )
return true;
else
return false;
}
__device__ bool is_in_obstacle(int sp_id, int T, long long int ncells, int* all_mask_mat){
//returns true if obstacle is present in state T,i,j
long long int mean_id = state1D_from_spid(T, sp_id, ncells);
return(all_mask_mat[mean_id] == 1 );
}
__device__ bool is_terminal(int32_t i, int32_t j, float* params){
// terminal state indices (of UL corner of terminal subgrid if term_subgrid_size>1)
int32_t i_term = params[8];
int32_t j_term = params[9];
int tsgsize = params[12]; //term_subgrid_size
if( (i >= i_term && i < i_term + tsgsize) && (j >= j_term && j < j_term + tsgsize) )
return true;
else return false;
}
__device__ bool my_isnan(int s){
// By IEEE 754 rule, NaN is not equal to NaN
return s != s;
}
__device__ void get_xypos_from_ij(int32_t i, int32_t j, int32_t gsize ,float* xs, float* ys, float* x, float* y){
*x = xs[j];
// *y = ys[gridDim.x - 1 - i];
*y = ys[gsize - 1 - i];
return;
}
__device__ long long int get_sp_id_from_posid(int32_t* posids, int32_t gsize){
// gives sp_id from posids (i,j)
return posids[1] + gsize*posids[0]*1LL ;
}
__device__ float get_angle_in_0_2pi(float theta){
float f_pi = 3.141592;
if (theta < 0)
return theta + (2*f_pi);
else
return theta;
}
/*
----- move() and reward_functions() moverd to move_and_rewards.h/cu-----
*/
__device__ float calculate_one_step_reward(float ac_speed, float ac_angle, float rad1, float rad2, float* params){
int method = params[13];
float alpha = params[19];
float Cr = 1; // coeffecient for radaition term
float Cf = 1; // coeffecient for energy consumtion
float Ct = 0.01; // small coeffecient for time for to prevent lazy start
float dt = params[4];
float energy_reward, time_reward;
if (method == 0) //time
return -dt;
else if (method == 1){ //energy1
return -(Cf*ac_speed*ac_speed + Ct)*dt;
}
else if (method == 2){ //energy2: maximise (collection-consumption)
return ((Cr*(rad2 + rad1)/2) - (Cf*ac_speed*ac_speed) - Ct)*dt;
}
else if (method == 3){
return ((Cr*(rad2 + rad1)/2)- Ct)*dt;
}
else if (method == 4){
energy_reward = -(ac_speed*ac_speed)*dt;
time_reward = -dt;
return alpha*energy_reward + (1-alpha)*time_reward;
}
else
return 0;
}
__device__ void move(float ac_speed, float ac_angle, float vx, float vy, int32_t T, float* xs, float* ys, int32_t* posids, float* params, float* r ){
int32_t gsize = params[0];
int32_t n = params[0] - 1; // gsize - 1
// int32_t num_actions = params[1];
// int32_t nrzns = params[2];
// float F = params[3];
int32_t nt = params[10];
float F = ac_speed;
float dt = params[4];
float r_outbound = params[5];
float r_terminal = params[6];
// int32_t nT = params[10];
float Dj = fabsf(xs[1] - xs[0]);
float Di = fabsf(ys[1] - ys[0]);
int32_t i0 = posids[0];
int32_t j0 = posids[1];
float vnetx = F*cosf(ac_angle) + vx;
float vnety = F*sinf(ac_angle) + vy;
float x, y;
get_xypos_from_ij(i0, j0, gsize, xs, ys, &x, &y); // x, y stores centre coords of state i0,j0
float xnew = x + (vnetx * dt);
float ynew = y + (vnety * dt);
// float r_step = 0;
*r = 0; // intiilaise r with 0
if (xnew > xs[n])
{
xnew = xs[n];
*r += r_outbound;
}
else if (xnew < xs[0])
{
xnew = xs[0];
*r += r_outbound;
}
if (ynew > ys[n])
{
ynew = ys[n];
*r += r_outbound;
}
else if (ynew < ys[0])
{
ynew = ys[0];
*r += r_outbound;
}
int32_t xind, yind;
float remx = remainderf((xnew - xs[0]), Dj);
float remy = remainderf(-(ynew - ys[n]), Di);
xind = ((xnew - xs[0]) - remx)/Dj;
yind = (-(ynew - ys[n]) - remy)/Di;
if ((remx >= 0.5 * Dj) && (remy >= 0.5 * Di))
{
xind += 1;
yind += 1;
}
else if ((remx >= 0.5 * Dj && remy < 0.5 * Di))
{
xind += 1;
}
else if ((remx < 0.5 * Dj && remy >= 0.5 * Di))
{
yind += 1;
}
if (!(my_isnan(xind) || my_isnan(yind)))
{
// update posids
posids[0] = yind;
posids[1] = xind;
if (is_edge_state(posids[0], posids[1])) //line 110
{
*r += r_outbound;
}
}
if (is_terminal(posids[0], posids[1], params))
{
*r += r_terminal;
}
else{
// //reaching any state in the last timestep which is not terminal is penalised
// if (T == nt-2)
// *r += r_outbound;
//reaching any state in the last timestep which is not terminal is penalised
if (T > nt-20)
*r += 0.05*r_outbound;
}
}
__device__ void extract_velocity(int32_t* posids, long long int sp_id, long long int ncells, float* vx, float* vy,
int32_t T, float* all_u_mat, float* all_v_mat, float* all_ui_mat,
float* all_vi_mat, float* all_Yi, float* params){
int32_t nrzns = params[2];
int32_t nmodes = params[7];
int32_t gsize = params[0];
long long int sp_uvi, str_uvi, sp_Yi; //startpoints and strides for accessing all_ui_mat, all_vi_mat and all_Yi
// int str_Yi;
float sum_x = 0;
float sum_y = 0;
float vx_mean, vy_mean;
//thread index. also used to access resultant vxrzns[nrzns, gsize, gsize]
int32_t idx = get_thread_idx();
//rzn index to identify which of the 5k rzn it is. used to access all_Yi.
int32_t rzn_id = get_rzn_id() ;
//mean_id is the index used to access the flattened all_u_mat[t,i,j].
long long int mean_id = state1D_from_spid(T, sp_id, ncells);
//to access all_ui_mat and all_vi_mat
//str_uvi = gridDim.x * gridDim.y;
// sp_uvi = (T * nmodes * str_uvi) + (gridDim.x * blockIdx.y) + (blockIdx.x);
str_uvi = gsize*gsize*1LL;
sp_uvi = (T * nmodes * str_uvi) + (gsize * posids[0]) + (posids[1]);
// to access all_Yi
sp_Yi = (T * nrzns * nmodes * 1LL) + (rzn_id * nmodes);
vx_mean = all_u_mat[mean_id];
for(int i = 0; i < nmodes; i++)
{
sum_x += all_ui_mat[sp_uvi + (i*str_uvi)]*all_Yi[sp_Yi + i];
}
vy_mean = all_v_mat[mean_id];
for(int i = 0; i < nmodes; i++)
{
sum_y += all_vi_mat[sp_uvi + (i*str_uvi)]*all_Yi[sp_Yi + i];
}
*vx = vx_mean + sum_x;
*vy = vy_mean + sum_y;
return;
}
__device__ void extract_radiation(long long int sp_id, int32_t T, long long int ncells,
float* all_s_mat, float* rad){
// for DETERMINISTIC radiation (scalar) field
// extract radiation (scalar) from scalar matrix
long long int mean_id = state1D_from_spid(T, sp_id, ncells);
*rad = all_s_mat[mean_id];
return;
}
__device__ bool is_within_band(int i, int j, int i1, int j1, int i2, int j2, float* xs, float* ys, int gsize){
//returns true if i,j are within the band connecticng cells i1,j1 and i2,j2
if(i1==i2 || j1==j2){
return true;
}
else{
float x, y, x1, y1, x2, y2;
float cell_diag = fabsf(xs[1]-xs[0])*1.414213;
get_xypos_from_ij(i, j, gsize, xs, ys, &x, &y); // x, y stores centre coords of state i0,j0
get_xypos_from_ij(i1, j1, gsize, xs, ys, &x1, &y1);
get_xypos_from_ij(i2, j2, gsize, xs, ys, &x2, &y2);
float A = (y2-y1)/(x2-x1);
float B = -1;
float C = y1 - (A*x1);
float dist_btw_pt_line = fabsf(A*x + B*y + C)/sqrtf((A*A) + (B*B));
if (dist_btw_pt_line < cell_diag)
return true;
else
return false;
}
}
__device__ bool goes_through_obstacle(long long int sp_id1, long long int sp_id2, int T,
long long int ncells, int* D_all_mask_mat,
float* xs, float* ys, float* params){
// returns true if the transition involves going through obstacle
bool possible_collision = false;
int posid1[2];
int posid2[2];
int gsize = params[0];
long long int sp_id;
get_posids_from_sp_id(sp_id1, gsize, posid1);
get_posids_from_sp_id(sp_id2, gsize, posid2);
int imin = min(posid1[0], posid2[0]);
int imax = max(posid1[0], posid2[0]);
int jmin = min(posid1[1], posid2[1]);
int jmax = max(posid1[1], posid2[1]);
for(int i=imin; i<=imax; i++){
for(int j=jmin; j<=jmax; j++){
if(!(i==posid1[0]&&j==posid1[1])){
sp_id = j + gsize*i*1LL ;
if ( is_in_obstacle(sp_id, T, ncells, D_all_mask_mat) || is_in_obstacle(sp_id, T+1, ncells, D_all_mask_mat)){
if (is_within_band(i, j, posid1[0], posid1[1], posid2[0], posid2[1], xs, ys, gsize) == true){
possible_collision = true;
return true;
}
}
}
}
}
return possible_collision;
}
//test: changer from float* to float ac_angle
__global__ void transition_calc(float* T_arr, long long int ncells,
float* all_u_mat, float* all_v_mat, float* all_ui_mat, float* all_vi_mat, float* all_Yi,
float* D_all_s_mat, int* D_all_mask_mat,
float ac_speed, float ac_angle, float* xs, float* ys, float* params, float* sumR_sa,
float* results){
// resutls directions- 1: along S2; 2: along S1; 3: along columns towards count
int32_t gsize = params[0]; // size of grid along 1 direction. ASSUMING square grid.
int32_t nrzns = params[2];
float r_outbound = params[5];
// int32_t is_stationary = params[11];
int32_t T = (int32_t)T_arr[0]; // current timestep
int32_t idx = get_thread_idx();
long long int res_idx;
float vx, vy, rad1, rad2;
long long int sp_id = get_sp_id(); //sp_id is space_id. S1%(gsize*gsize)
long long int sp_id2;
long long int rel_sp_id2;
int32_t posids_relS2_0[2];
int32_t posids_S1[2];
int32_t m = (int32_t) params[18];
int32_t Nb = (m*m) + 1;
float one = 1.0;
if(idx < gridDim.x*gridDim.y*nrzns && sp_id < ncells) //or idx < arr_size
{
// int32_t posids[2] = {(int32_t)blockIdx.y, (int32_t)blockIdx.x}; //static declaration of array of size 2 to hold i and j values of S1.
int32_t posids[2]; //static declaration of array of size 2 to hold i and j values of S1.
get_posids_from_sp_id(sp_id, gsize, posids);
get_posids_from_sp_id(sp_id, gsize, posids_S1);
int32_t rzn_id = get_rzn_id();
// Afer move() these will be overwritten by i and j values of S2
float r=0; // to store immediate reward
float r_step;
extract_velocity(posids, sp_id, ncells, &vx, &vy, T, all_u_mat, all_v_mat, all_ui_mat, all_vi_mat, all_Yi, params);
extract_radiation(sp_id, T, ncells, D_all_s_mat, &rad1);
// if s1 not terminal
if (is_terminal(posids[0], posids[1], params) == false){
// if s1 not in obstacle
if (is_in_obstacle(sp_id, T, ncells, D_all_mask_mat) == false){
// moves agent and adds r_outbound and r_terminal to r
move(ac_speed, ac_angle, vx, vy, T, xs, ys, posids, params, &r);
sp_id2 = get_sp_id_from_posid(posids, gsize);
extract_radiation(sp_id2, T+1, ncells, D_all_s_mat, &rad2);
// adds one step-reward based on method. mehthod is available in params
r_step = calculate_one_step_reward(ac_speed, ac_angle, rad1, rad2, params);
r += r_step;
// if S2 is an obstacle cell. then penalise with r_outbound
// if (is_in_obstacle(sp_id2, T+1, ncells, D_all_mask_mat) == true )
// r = r_outbound;
if (goes_through_obstacle(sp_id, sp_id2, T, ncells, D_all_mask_mat, xs, ys, params) == true)
r = r_outbound;
}
// if s1 is in obstacle, then no update to posid
else
r = r_outbound;
}
get_posids_relS2_0(m, posids_S1, posids_relS2_0);
rel_sp_id2 = get_rel_sp_id2(m, posids, posids_relS2_0);
res_idx = sp_id*Nb + rel_sp_id2;
float b = atomicAdd(&results[res_idx], one);
//writing to sumR_sa. this array will later be divided by nrzns, to get the avg
float a = atomicAdd(&sumR_sa[sp_id], r);
__syncthreads();
}//if ends
return;
}
__global__ void compute_mean(float* D_master_sumRsa_arr, int size, int nrzns) {
// computes mean
int tid = (blockIdx.x*blockDim.x) + threadIdx.x;
if (tid < size)
D_master_sumRsa_arr[tid] = D_master_sumRsa_arr[tid]/nrzns;
return;
}
__global__ void count_kernel(float* D_master_S2_arr_ip, int nrzns, unsigned long long int* num_uq_s2_ptr) {
// D_master_S2_arr_ip contains count of relS2s for S1s for a given action
// This kernel counts no. of nnz elements for a given S1
// This is needed for getting total nnz to initiliase COO matrix
// ncells is gridDim, i.e. we have ncells blocks in grid
// Nb is blockDim, i.e we have Nb threads in block
int ncells = gridDim.x; // == ncells == ncells
int Nb = blockDim.x;
long long int tid = (blockIdx.x*Nb) + threadIdx.x;
int idx = blockIdx.x;
float nnz;
unsigned long long int one = 1.0;
if ((tid < ncells*Nb) && (threadIdx.x != Nb-1)){ // tid < Nb*ncells
if (D_master_S2_arr_ip[tid] != 0){
nnz = atomicAdd(&num_uq_s2_ptr[idx], one);
}
}
return;
}
__global__ void reduce_kernel(float* D_master_S2_arr_ip, int t, int Nb, int m,
long long int ncells, int nrzns, int gsize,
long long int* D_coo_s1_arr, long long int* D_coo_s2_arr,
float* D_coo_cnt_arr, unsigned long long int* num_uq_s2_ptr, unsigned long long int* prSum_num_uq_s2_ptr){
long long int tid = (blockIdx.x*blockDim.x) + threadIdx.x;
long long int start_idx = tid*Nb; // to access tid'th threads 0-pos in ip_arr
long long int n_uqs = num_uq_s2_ptr[tid]; //number of unique S2s for tid'th block
long long int op_st_id = prSum_num_uq_s2_ptr[tid]; //sum of number of uniqeu S2s uptil tid'th block. to access tid'th thread's 0-pos in op_arr
long long int ith_nuq = 0; //ranges from 0 to n_uqs , to index number between 0 and n_uqs
long long int rel_sp_id2;
long long int sp_id2;
long long int S2;
long long int sp_id1 = tid;
float count; //first if eval will lead to else condition and do count++
int32_t posids_relS2_0[2];
int32_t posids_S1[2];
if (tid < ncells){
// int32_t s1 = (tid%ncells) + (t*ncells); // TODO:xxdone change this to nbe a function of a arguments: sp_id and t
long long int s1 = tid + (t*ncells);
for(long long int i = 0; i< n_uqs; i++)
D_coo_s1_arr[op_st_id + i] = s1;
get_posids_relS2_0(m, posids_S1, posids_relS2_0);
for(long long int i = 0; i< Nb-1; i++){
count = D_master_S2_arr_ip[start_idx + i];
if (count != 0){
rel_sp_id2 = i;
get_posids_from_sp_id(sp_id1, gsize, posids_S1);
get_posids_relS2_0(m, posids_S1, posids_relS2_0);
sp_id2 = get_sp_id2_from_rel_sp_id2(m, gsize,
rel_sp_id2, posids_relS2_0);
S2 = state1D_from_spid(t+1, sp_id2, ncells);
D_coo_s2_arr[op_st_id + ith_nuq] = S2; // store old_s2 value in the [.. + ith] position
D_coo_cnt_arr[op_st_id + ith_nuq] = count/nrzns; // store prob value in the [.. + ith] position
ith_nuq++; // increment i
}
}
}
return;
}
template<typename dType>
void print_array(dType* array, int num_elems,std::string array_name, std::string end){
std::cout << array_name << std::endl;
for(int i = 0; i < num_elems; i++)
std::cout << array[i] << " " << end;
std::cout << std::endl;
}
std::string get_prob_name(int num_ac_speeds, int num_ac_angles, int i_term, int j_term,
int tsg_size){
std::string s_n_ac_sp = std::to_string(num_ac_speeds);
std::string s_n_ac_ac = std::to_string(num_ac_angles);
std::string s_i = std::to_string(i_term);
std::string s_j = std::to_string(j_term);
std::string s_tsg = std::to_string(tsg_size);
std::string name = "a" + s_n_ac_sp + "x" + s_n_ac_ac + "_"
+ "i" + s_i + "_" "j" + s_j + "_"
+ "ref" + s_tsg;
return name;
}
void build_sparse_transition_model_at_T_at_a(int t, int action_id, int bDimx, thrust::device_vector<float> &D_tdummy,
float* D_all_u_arr, float* D_all_v_arr, float* D_all_ui_arr,
float* D_all_vi_arr, float* D_all_yi_arr,
float* D_all_s_arr, int* D_all_mask_arr,
thrust::device_vector<float> &D_params,
thrust::host_vector<float> &H_params,
thrust::device_vector<float> &D_xs,
thrust::device_vector<float> &D_ys,
float** H_actions,
thrust::host_vector<int32_t> &H_coo_len_per_ac,
thrust::host_vector<long long int>* H_Aarr_of_cooS1,
thrust::host_vector<long long int>* H_Aarr_of_cooS2,
thrust::host_vector<float>* H_Aarr_of_cooProb,
thrust::host_vector<float>* H_Aarr_of_Rs
);
void build_sparse_transition_model_at_T_at_a(int t, int action_id, int bDimx, thrust::device_vector<float> &D_tdummy,
float* D_all_u_arr, float* D_all_v_arr, float* D_all_ui_arr,
float* D_all_vi_arr, float* D_all_yi_arr,
float* D_all_s_arr, int* D_all_mask_arr,
thrust::device_vector<float> &D_params,
thrust::host_vector<float> &H_params,
thrust::device_vector<float> &D_xs,
thrust::device_vector<float> &D_ys,
float** H_actions,
thrust::host_vector<int32_t> &H_coo_len_per_ac,
thrust::host_vector<long long int>* H_Aarr_of_cooS1,
thrust::host_vector<long long int>* H_Aarr_of_cooS2,
thrust::host_vector<float>* H_Aarr_of_cooProb,
thrust::host_vector<float>* H_Aarr_of_Rs
){
int gsize = (int) H_params[0];
int num_actions = (int)H_params[1];
int nrzns = (int) H_params[2];
int nt = (int) H_params[10];
int m = (int)H_params[18];
int Nb = (m*m) + 1; //+1 is to store no. of S2s not lying in nieghbour_array. Ideally it should have 0
// raw pointer casts
float* D_T_arr = thrust::raw_pointer_cast(&D_tdummy[0]);
float* xs = thrust::raw_pointer_cast(&D_xs[0]);
float* ys = thrust::raw_pointer_cast(&D_ys[0]);
float* params = thrust::raw_pointer_cast(&D_params[0]);
//Define Kernel launch parameters for transition calculation kernel
int DimGrid_z = (nrzns/bDimx)+1;
if (nrzns % bDimx == 0)
DimGrid_z = (nrzns/bDimx);
// initialse master S2 array
thrust::device_vector<float> D_master_S2_vector(ncells * Nb, 0);
float* D_master_S2_arr = thrust::raw_pointer_cast(&D_master_S2_vector[0]);
// initialise master sum_Rsa array - sumRsa's
// Important to initialise it with 0
thrust::device_vector<float> D_master_sumRsa_vector(ncells, 0);
float* D_master_sumRsa_arr = thrust::raw_pointer_cast(&D_master_sumRsa_vector[0]);
// define kerel block and grid configuration
dim3 DimGrid(gsize, gsize, DimGrid_z);
dim3 DimBlock(bDimx, 1, 1);
float ac_speed = H_actions[action_id][0];
float ac_angle = H_actions[action_id][1];
// launch kernel for @a @t
hipLaunchKernelGGL(( transition_calc), dim3(DimGrid), dim3(DimBlock) , 0, 0, D_T_arr,
ncells, D_all_u_arr, D_all_v_arr, D_all_ui_arr, D_all_vi_arr, D_all_yi_arr,
D_all_s_arr, D_all_mask_arr,
ac_speed, ac_angle, xs, ys, params, D_master_sumRsa_arr,
D_master_S2_arr);
hipDeviceSynchronize();
// checks
// std::cout << "D_xs= " << std::endl;
// for (int i = 0; i< 10; i++)
// std::cout << D_xs[i] << " " ;
// if (t == nt-2){
// std::cout << "t = " << t << "\n nt = " << nt << "\n" ;
// std::cout<<"gisze= " << gsize << std::endl;
// std::cout<<"DimGrid_z = " << DimGrid_z << std::endl;
// std::cout<<"bDimx = " << bDimx << std::endl;
// }
// // CHECK copy data back to host for check
// std::cout << "a" << n <<"\n vx at s1=0: " << D_params[31] << std::endl;
// std::cout <<"\n vx at s1=0: " << D_params[30] << std::endl;
// std::cout << "----a" << n <<"\n";
// std::cout <<"pre move " << "\n";
// std::cout<<"r1\n"<< D_params[23] << "," << D_params[24] << std::endl;
// std::cout<<"r2\n"<< D_params[25] << "," << D_params[26] << std::endl;
// std::cout <<"post move " << "\n";
// std::cout<<"r1\n"<< D_params[27] << "," << D_params[28] << std::endl;
// std::cout<<"r2\n"<< D_params[29] << "," << D_params[30] << std::endl;
// thrust::copy(D_master_S2_vector.begin() + n*arr_size, D_master_S2_vector.begin() + (n+1)*arr_size, H_S2_vec.begin());
// thrust::copy(D_master_sumRsa_vector.begin() + n*ncells, D_master_sumRsa_vector.begin() + (n+1)*ncells, H_sumR_sa.begin());
// std::cout << "post kernel" << std::endl;
// for(int i = 0; i < 10; i ++)
// std::cout << H_sumR_sa[i] << std::endl;
// for(int i = 0; i < 10; i ++)
// std::cout << H_S2_vec[i] << std::endl;
int Nthreads = D_master_sumRsa_vector.size();
assert(Nthreads == ncells);
int threads_per_block = 64;
int blocks_per_grid = (Nthreads/threads_per_block) + 1;
assert( blocks_per_grid * threads_per_block >= Nthreads);
hipLaunchKernelGGL(( compute_mean), dim3(blocks_per_grid), dim3(threads_per_block) , 0, 0, D_master_sumRsa_arr, Nthreads, nrzns);
// TODO: in optimazation phase move this line after initilisation num_uq_S2 vectors.
// hipDeviceSynchronize();
//initialising vectors for counting nnzs or number of uniqe S2s for S1s
thrust::device_vector<unsigned long long int> D_num_uq_s2(ncells,0);
thrust::device_vector<unsigned long long int> D_prSum_num_uq_s2(ncells);
unsigned long long int* num_uq_s2_ptr = thrust::raw_pointer_cast(&D_num_uq_s2[0]);
unsigned long long int* prSum_num_uq_s2_ptr = thrust::raw_pointer_cast(&D_prSum_num_uq_s2[0]);
//one thread per element
// count no. of ug unique S2 for each S1 and fill in num_uq_s2
hipLaunchKernelGGL(( count_kernel), dim3(ncells), dim3(Nb), 0, 0, D_master_S2_arr, nrzns, num_uq_s2_ptr);
hipDeviceSynchronize();
//CHECKs
// std::cout << "D_num_uq_s2_pc\n";
// int tempflag = 0;
// int tempnum;
// int cnt2 = 0;
// int cnt1 = 0;
// for (int i =0; i < efCszNa; i++){
// tempnum = D_num_uq_s2_pc[i];
// if (tempnum == 1)
// cnt1++;
// else if (tempnum == 2)
// cnt2++;
// else
// std::cout << " --------------------------- WRONG-----------\n";
// }
// std::cout << "cnt1 = " << cnt1 << "\ncnt2 = " << cnt2 <<"\n";
// calc nnz: number of non zero elements(or unique S2s) for a given S1 and action
long long int nnz = thrust::reduce(D_num_uq_s2.begin(), D_num_uq_s2.end(), (float) 0, thrust::plus<float>());
// get prefix sum of D_num_uq_s2. This helps threads to access apt COO indices in reduce_kernel
thrust::exclusive_scan(D_num_uq_s2.begin(), D_num_uq_s2.end(), D_prSum_num_uq_s2.begin());
// std::cout << "nnz = " << nnz<< "\n";
//initilise coo arrays (concated across actions)
thrust::device_vector<long long int> D_coo_s1(nnz);
thrust::device_vector<long long int> D_coo_s2(nnz);
thrust::device_vector<float> D_coo_count(nnz); // TODO: makde this int32_t and introduce another array for prob
long long int* D_coo_s1_arr = thrust::raw_pointer_cast(&D_coo_s1[0]);
long long int* D_coo_s2_arr = thrust::raw_pointer_cast(&D_coo_s2[0]);
float* D_coo_cnt_arr = thrust::raw_pointer_cast(&D_coo_count[0]);
Nthreads = ncells;
assert(Nthreads == ncells);
threads_per_block = 64;
blocks_per_grid = (Nthreads/threads_per_block) + 1;
// reduce operation to fill COO arrays
hipLaunchKernelGGL(( reduce_kernel), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, D_master_S2_arr, t, Nb, m,
ncells, nrzns, gsize, D_coo_s1_arr, D_coo_s2_arr, D_coo_cnt_arr,
num_uq_s2_ptr, prSum_num_uq_s2_ptr);
hipDeviceSynchronize();
// nnz should be filled in a global array
H_coo_len_per_ac[action_id] = nnz;
// Copy Device COO rusults to Host COO vectors across actions and append vectors across time
assert(action_id >=0);
H_Aarr_of_cooS1[action_id].insert(H_Aarr_of_cooS1[action_id].end(), D_coo_s1.begin(), D_coo_s1.end());
H_Aarr_of_cooS2[action_id].insert(H_Aarr_of_cooS2[action_id].end(), D_coo_s2.begin(), D_coo_s2.end());
H_Aarr_of_cooProb[action_id].insert(H_Aarr_of_cooProb[action_id].end(), D_coo_count.begin(), D_coo_count.end());
H_Aarr_of_Rs[action_id].insert(H_Aarr_of_Rs[action_id].end(), D_master_sumRsa_vector.begin(), D_master_sumRsa_vector.end());
//checks
// print_device_vector(D_coo_s1, 0, 10, "D_coo_s1", " ", 0);
// print_device_vector(D_coo_s2, 0, 10, "D_coo_s2", " ", 0);
// std::cout << "H_coo_len_per_ac" << std::endl;
// for (int n = 0; n < num_actions; n++)
// std::cout << H_coo_len_per_ac[n] << std::endl;
// std::cout << "H_Aarr_of_cooS1" << std::endl;
// for (int n = 0; n < num_actions; n++){
// for (int i = 0; i < H_Aarr_of_cooS1[n].size(); i++)
// std::cout << H_Aarr_of_cooS1[n][i] << " , " << H_Aarr_of_cooS2[n][i] << " , " << H_Aarr_of_cooProb[n][i] << std::endl;
// std::cout << std::endl;
// }
// std::cout << "H_Aarr_of_Rs" << std::endl;
// for (int n = 0; n < num_actions; n++){
// for (int i = 0; i < ncells; i++)
// std::cout << H_Aarr_of_Rs[n][i] << std::endl;
// std::cout << std::endl;
// }
// // array of num_actions decive_vvectors for sum_Rsa_vec
// // initialasation with 0 is important. because values are added to this
// thrust::host_vector<float> H_arr_sumR_sa[num_actions];
// for(int n = 0; n < num_actions; n++){
// H_arr_sumR_sa[n] = thrust::host_vector<float>(nnz[i]);
}
int get_reward_type(std::string prob_type){
// returns
// 0 for time
// 1 for energy1
// 2 for energy2
// 3 for energy3
// 4 for custom1
if (prob_type == "time")
return 0;
else if (prob_type == "energy1")
return 1;
else if (prob_type == "energy2")
return 2;
else if (prob_type == "energy3")
return 3;
else if (prob_type == "custom1")
return 4;
else
return -1;
}
// ------------------------------- MAIN () ---------------------------------
int main(int argc, char *argv[]){
// add input arguement part
// -------------------- input data starts here ---------------------------------
#include "input_to_build_model.h"
float alpha = alpha_header;
if(argc>1){
std::cout << alpha << " and " << argv[0] << " and " << argv[1] << "\n";
alpha = std::stof(argv[1]);
alpha = alpha/100;
// std::stringstream convert{ argv[2] };
// if(!(convert >> alpha)) alpha = 0;
std::cout << alpha << "\n";
}
std::cout << argc << "\n";
std::cout << alpha << " and " << argv[0] << " and " << argv[1] << "\n";
int reward_type = get_reward_type(prob_type);
std::cout << "Reward type: " << reward_type << "\n";
// define full problem name and print them to a temporary file
// the temp file will be read by python scripts for conversion
std::string prob_specs = get_prob_name(num_ac_speeds, num_ac_angles, i_term,
j_term, term_subgrid_size);
std::string op_Fname_upto_prob_name = "data_modelOutput/" + prob_type + "/"
+ prob_name + "/" ;
std::string op_FnamePfx = op_Fname_upto_prob_name + prob_specs + "/"; //path for storing op npy data.
std::string op_Fname_withAlpha = op_FnamePfx + std::to_string(alpha) +"/";
make_dir(op_Fname_upto_prob_name);
make_dir(op_FnamePfx);
if(reward_type>3){
op_FnamePfx = op_Fname_withAlpha;
make_dir(op_FnamePfx);
}
std::ofstream fout("temp_modelOp_dirName.txt");
fout << prob_type << "\n";
fout << prob_name << "\n";
fout << prob_specs << "\n";
fout << std::to_string(alpha) << "\n";
fout << op_FnamePfx;
fout.close();
// TODO: Make sure files are stored in np.float32 format
std::string data_path = "data_input/" + prob_name + "/";
std::string all_u_fname = data_path + "all_u_mat.npy";
std::string all_v_fname = data_path + "all_v_mat.npy";
std::string all_ui_fname = data_path + "all_ui_mat.npy";
std::string all_vi_fname = data_path + "all_vi_mat.npy";
std::string all_yi_fname = data_path + "all_Yi.npy";
std::string all_s_fname = data_path + "all_s_mat.npy";
std::string all_mask_fname = data_path + "obstacle_mask.npy"; //this file stored in int32
// -------------------- input data ends here ---------------------------------
// make directory for storing output data from this file
make_dir(op_Fname_upto_prob_name);
make_dir(op_FnamePfx);
int all_u_n_elms;
int all_v_n_elms;
int all_ui_n_elms;
int all_vi_n_elms;
int all_yi_n_elms;
int all_s_n_elms;
int all_mask_n_elms;
cnpy::NpyArray all_u_cnpy = read_velocity_field_data(all_u_fname, &all_u_n_elms);
cnpy::NpyArray all_v_cnpy = read_velocity_field_data(all_v_fname, &all_v_n_elms);
cnpy::NpyArray all_ui_cnpy = read_velocity_field_data(all_ui_fname, &all_ui_n_elms);
cnpy::NpyArray all_vi_cnpy = read_velocity_field_data(all_vi_fname, &all_vi_n_elms);
cnpy::NpyArray all_yi_cnpy = read_velocity_field_data(all_yi_fname, &all_yi_n_elms);
cnpy::NpyArray all_s_cnpy = read_velocity_field_data(all_s_fname, &all_s_n_elms);
cnpy::NpyArray all_mask_cnpy = read_velocity_field_data(all_mask_fname, &all_mask_n_elms);
float* all_u_mat = all_u_cnpy.data<float>();
float* all_v_mat = all_v_cnpy.data<float>();
float* all_ui_mat = all_ui_cnpy.data<float>();
float* all_vi_mat = all_vi_cnpy.data<float>();
float* all_yi_mat = all_yi_cnpy.data<float>();
float* all_s_mat = all_s_cnpy.data<float>();
int* all_mask_mat = all_mask_cnpy.data<int>();
// CHECKS:
// print_array<float>(all_u_mat, all_u_n_elms, "all_u_mat", " ");
// print_array<float>(all_ui_mat, all_ui_n_elms,"all_ui_mat", " ");
// print_array<float>(all_yi_mat, all_yi_n_elms,"all_yi_mat", " ");
std::cout << "Finished reading Velocity Field Data !" << std::endl;
assert(neighb_gsize <= gsize);
assert((int)neighb_gsize%2 == 1); // neighb_gsize must be odd
//TODO: fill params in a function
// Contains implicit casting from int32_t to float
thrust::host_vector<float> H_params(32);
H_params[0] = gsize;
H_params[1] = num_actions;
H_params[2] = nrzns;
H_params[3] = F;
H_params[4] = dt;
H_params[5] = r_outbound;
H_params[6] = r_terminal;
H_params[7] = nmodes;
H_params[8] = i_term;
H_params[9] = j_term;
H_params[10] = nt;
H_params[11] = is_stationary;
H_params[12] = term_subgrid_size;
H_params[13] = reward_type;
H_params[14] = num_ac_speeds;
H_params[15] = num_ac_angles;
H_params[16] = dx;
H_params[17] = dy;
H_params[18] = neighb_gsize; // referred to as m in functions
H_params[19] = alpha;
for( int i =20; i<32; i++)
H_params[i] = z;
// Define grid ticks in host
thrust::host_vector<float> H_xs(gsize, -1);
thrust::host_vector<float> H_ys(gsize, -1);
float* xs = thrust::raw_pointer_cast(&H_xs[0]);
float* ys = thrust::raw_pointer_cast(&H_ys[0]);
define_xs_or_ys(xs, dx, x0, gsize);
define_xs_or_ys(ys, dy, y0, gsize);
// define angles in host
float** H_actions = new float*[num_actions];
for(int i=0; i<num_actions; i++)
H_actions[i] = new float[2];
populate_actions(H_actions, num_ac_speeds, num_ac_angles, F);
// std::cout << "CHECK: ACTIONS: \n";
// for(int i=0; i<num_actions; i++){
// std::cout << H_actions[i][0] << ", " << H_actions[i][1] << "\n";
// }
//----- start copying data to device --------
// Copy vel field data to device memory using thrust device_vector
thrust::device_vector<float> D_all_u_vec (all_u_mat, all_u_mat + all_u_n_elms);
thrust::device_vector<float> D_all_v_vec (all_v_mat, all_v_mat + all_v_n_elms);
thrust::device_vector<float> D_all_ui_vec (all_ui_mat, all_ui_mat + all_ui_n_elms);
thrust::device_vector<float> D_all_vi_vec (all_vi_mat, all_vi_mat + all_vi_n_elms);
thrust::device_vector<float> D_all_yi_vec (all_yi_mat, all_yi_mat + all_yi_n_elms);
thrust::device_vector<float> D_all_s_vec (all_s_mat, all_s_mat + all_s_n_elms);
thrust::device_vector<int> D_all_mask_vec (all_mask_mat, all_mask_mat + all_mask_n_elms);
float* D_all_u_arr = thrust::raw_pointer_cast(&D_all_u_vec[0]);
float* D_all_v_arr = thrust::raw_pointer_cast(&D_all_v_vec[0]);
float* D_all_ui_arr = thrust::raw_pointer_cast(&D_all_ui_vec[0]);
float* D_all_vi_arr = thrust::raw_pointer_cast(&D_all_vi_vec[0]);
float* D_all_yi_arr = thrust::raw_pointer_cast(&D_all_yi_vec[0]);
float* D_all_s_arr = thrust::raw_pointer_cast(&D_all_s_vec[0]);
int* D_all_mask_arr = thrust::raw_pointer_cast(&D_all_mask_vec[0]);
std::cout << "Copied to Device : Velocity Field Data !" << std::endl;
thrust::device_vector<float> D_tdummy(2,0);
// initialise empty device vectors. These contain time-invariant data
thrust::device_vector<float> D_params(32);
thrust::device_vector<float> D_xs(gsize);
thrust::device_vector<float> D_ys(gsize);
// initialise reuseable host vectors
thrust::host_vector<int32_t> H_coo_len_per_ac(num_actions);
thrust::host_vector<long long int> H_Aarr_of_cooS1[(int)num_actions];
thrust::host_vector<long long int> H_Aarr_of_cooS2[(int)num_actions];
thrust::host_vector<float> H_Aarr_of_cooProb[(int)num_actions];
thrust::host_vector<float> H_Aarr_of_Rs[(int)num_actions];
//initialised with 0 size. later data from device is inserted/appended to the end of vector
for (int i =0; i < num_actions; i++){
H_Aarr_of_cooS1[i] = thrust::host_vector<long long int> (0);
}
for (int i =0; i < num_actions; i++){
H_Aarr_of_cooS2[i] = thrust::host_vector<long long int> (0);
}
for (int i =0; i < num_actions; i++){
H_Aarr_of_cooProb[i] = thrust::host_vector<float> (0);
}
for (int i =0; i < num_actions; i++){
H_Aarr_of_Rs[i] = thrust::host_vector<float> (0);
}
// assign value to global variable
ncells = gsize*gsize;
// copy data from host to device
D_params = H_params;
D_xs = H_xs;
D_ys = H_ys;
// run time loop and compute transition data for each time step
auto start = high_resolution_clock::now();
auto end = high_resolution_clock::now();
auto duration_t = duration_cast<microseconds>(end - start);
//IMP: Run time loop till nt-1. There ar no S2s to S1s in the last timestep
for(int t = 0; t < nt-1; t++){
std::cout << "*** Computing data for timestep, T = " << t << std::endl;
D_tdummy[0] = t;
start = high_resolution_clock::now();
for(int action_id = 0; action_id < num_actions; action_id++){
// std::cout << " * action_id= " << action_id;
// this function also concats coos across time.
build_sparse_transition_model_at_T_at_a(t, action_id, bDimx, D_tdummy, D_all_u_arr, D_all_v_arr,
D_all_ui_arr, D_all_vi_arr, D_all_yi_arr,
D_all_s_arr, D_all_mask_arr,
D_params, H_params, D_xs, D_ys, H_actions,
H_coo_len_per_ac,
H_Aarr_of_cooS1, H_Aarr_of_cooS2, H_Aarr_of_cooProb,
H_Aarr_of_Rs);
// output_data )
}
end = high_resolution_clock::now();
std::cout << std::endl ;
duration_t = duration_cast<microseconds>(end - start);
std::cout << "duration@t = "<< duration_t.count()/1e6 << "sec" << std::endl;
std::cout << 3*H_Aarr_of_cooS1[0].size()*4*1e-6 << " MB" << std::endl;
std::cout << std::endl << std::endl;
}
// fill R vectors of each action for the last time step with high negative values.
// this has to be done seaprately because the above loop runs till nt-1.
/*
TODO: 1. Verify rewards as last time step
*/
thrust::host_vector<float> H_rewards_at_end_t(ncells, 0);
for (int i =0; i < num_actions; i++){
H_Aarr_of_Rs[i].insert(H_Aarr_of_Rs[i].end(), H_rewards_at_end_t.begin(), H_rewards_at_end_t.end());
}
//Check
// for (int i =0; i < num_actions; i++)
// std::cout << H_Aarr_of_Rs[i].size() << " ";
// find nnz per action
thrust::host_vector<long long int> H_master_PrSum_nnz_per_ac(num_actions);
long long int DP_relv_params[2] = {ncells*nt, num_actions*1LL};
long long int master_nnz = 0; //running sum of nnz going across actions
// calculate inclusive prefix sum of nnz's across actions
// will be used to access indeces while concatenating results across across actions
for(int i = 0; i < num_actions; i++){
master_nnz += H_Aarr_of_cooS1[i].size();
H_master_PrSum_nnz_per_ac[i] = master_nnz;
}
unsigned long int num_DP_params = sizeof(DP_relv_params) / sizeof(DP_relv_params[0]);
// print_array<long long int>(DP_relv_params, 2, "DP_relv_params", " ");
// std::cout << "chek num = " << sizeof(DP_relv_params) << std::endl;
// std::cout << "chek denom = " << sizeof(DP_relv_params[0]) << std::endl;
//checks
// std::cout << "total/master_nnz = " << master_nnz << std::endl;
// std::cout << "H_Aarr_of_cooS1[i].size()" << std::endl;
// for(int i = 0; i < num_actions; i++)
// std::cout << H_Aarr_of_cooS1[i].size() << std::endl;
// print_array<long long int>(&H_Aarr_of_cooS2[0][0], 10, "H_Aarr_of_cooS2[0]", " ");
// save final coo data
thrust::host_vector<long long int> H_master_cooS1(master_nnz);
thrust::host_vector<long long int> H_master_cooS2(master_nnz);
thrust::host_vector<float> H_master_cooVal(master_nnz);
thrust::host_vector<float> H_master_R(ncells*nt*num_actions, -99999); //TODO: veriffy -99999
std::string op_FnamePfx_2 = "data_solverOutput/" + prob_type + "/"
+ prob_name + "/" + prob_specs + "/";
std::string op_Fname_withAlpha_2 = "data_solverOutput/" + prob_type + "/"
+ prob_name + "/" + prob_specs + std::to_string(alpha) +"/";
if(reward_type>3){
op_FnamePfx_2 = op_Fname_withAlpha_2;
}
save_master_Coos_to_file(op_FnamePfx,op_FnamePfx_2, num_actions,
H_master_cooS1,
H_master_cooS2,
H_master_cooVal,
H_master_R,
H_Aarr_of_cooS1,
H_Aarr_of_cooS2,
H_Aarr_of_cooProb,
H_Aarr_of_Rs,
H_params,
DP_relv_params,
num_DP_params);
return 0;
}
//------------ main ends here ------------------------------------------
void save_master_Coos_to_file(std::string op_FnamePfx, std::string op_FnamePfx_2, int num_actions,
thrust::host_vector<long long int> &H_master_cooS1,
thrust::host_vector<long long int> &H_master_cooS2,
thrust::host_vector<float> &H_master_cooVal,
thrust::host_vector<float> &H_master_R,
thrust::host_vector<long long int>* H_Aarr_of_cooS1,
thrust::host_vector<long long int>* H_Aarr_of_cooS2,
thrust::host_vector<float>* H_Aarr_of_cooProb,
thrust::host_vector<float>* H_Aarr_of_Rs,
thrust::host_vector<float> &prob_params,
long long int* DP_relv_params,
unsigned long int num_DP_params){
// Convertes floats to int32 for COO row and col idxs
// copies from each action vector to a master vector
// master_coo vectors is concatation first across time, then across action
// ALSO, MODIFIES S1(t,i,j) to S1(t,i,j,a)
unsigned long long int master_nnz = H_master_cooS1.size();
unsigned long long int prob_params_size = prob_params.size();
long long int m_idx = 0;
int n_states = DP_relv_params[0];
for(int i = 0; i < num_actions; i++){
for(int j = 0; j< H_Aarr_of_cooS1[i].size(); j++){
// TODO: modify to include actions
H_master_cooS1[m_idx] = H_Aarr_of_cooS1[i][j] + i*n_states;
m_idx++;
}
}
m_idx = 0;
for(int i = 0; i < num_actions; i++){
for(int j = 0; j< H_Aarr_of_cooS2[i].size(); j++){
H_master_cooS2[m_idx] = H_Aarr_of_cooS2[i][j];
m_idx++;
}
}
m_idx = 0;
for(int i = 0; i < num_actions; i++){
for(int j = 0; j< H_Aarr_of_cooProb[i].size(); j++){
H_master_cooVal[m_idx] = H_Aarr_of_cooProb[i][j];
m_idx++;
}
}
m_idx = 0;
for(int i = 0; i < num_actions; i++){
for(int j = 0; j< H_Aarr_of_Rs[i].size(); j++){
H_master_R[m_idx] = H_Aarr_of_Rs[i][j];
m_idx++;
}
}
std::cout << "check num_DP_params = " << num_DP_params << std::endl;
std::cout << "op_FnamePfx= " << op_FnamePfx << "\n";
cnpy::npy_save(op_FnamePfx + "master_cooS1.npy", &H_master_cooS1[0], {master_nnz,1},"w");
cnpy::npy_save(op_FnamePfx + "master_cooS2.npy", &H_master_cooS2[0], {master_nnz,1},"w");
cnpy::npy_save(op_FnamePfx + "master_cooVal.npy", &H_master_cooVal[0], {master_nnz,1},"w");
cnpy::npy_save(op_FnamePfx + "master_R.npy", &H_master_R[0], {H_master_R.size(),1},"w");
cnpy::npy_save(op_FnamePfx + "DP_relv_params.npy", &DP_relv_params[0], {num_DP_params,1},"w");
cnpy::npy_save(op_FnamePfx + "prob_params.npy", &prob_params[0], {prob_params_size,1},"w");
// cnpy::npy_save(op_FnamePfx_2 + "prob_params.npy", &prob_params[0], {prob_params_size,1},"w");
std::cout << "saved files \n" ;
}
cnpy::NpyArray read_velocity_field_data( std::string file_path_name, int* n_elements){
// reads numpy file from input and
// returns cnpy::NpyArray stucture and also fills in num_elements in the passed reference n_elements
// extraction in main: float* vel_data = arr.data<float>();
// TODO: make it general. currently hard-coded for float arrays.
//print filename
std::cout << "file path and name: " << file_path_name << std::endl;
cnpy::NpyArray arr = cnpy::npy_load(file_path_name);
//prints for checks
int dim = arr.shape.size();
int num_elements = 1;
std::cout << "shape: " ;
for (int i = 0; i < dim; i++){
std::cout << arr.shape[i] << " , " ;
num_elements = num_elements*arr.shape[i];
}
*n_elements = num_elements;
std::cout << std::endl << "num_elements: " << num_elements << std::endl;
float* vel_data = arr.data<float>();
// print check first 10 elements
std::cout << "First 10 elements of loaded array are: " << std::endl;
for (int i = 0; i < 10; i++)
std::cout << vel_data[i] << " " ;
std::cout << std::endl << std::endl;
return arr;
}
/*
--- print_device_vector() ---
IMP: datatype has to be explicityle changed in that file
*/
// template<typename dType>
void print_device_vector( thrust::device_vector<long long int> &array, int start_id, int end_id, std::string array_name, std::string end, int method){
std::cout << array_name << " from id " << start_id << " to " << end_id << std::endl;
if (method == 1){
float temp = -10000000;
for(int i = start_id; i < end_id; i++){
if (array[i] != temp){
std::cout << i << "\n";
std::cout << array[i] << " " << end;
std::cout << "\n";
temp = array[i];
}
}
}
else if (method == 0){
for(int i = start_id; i < end_id; i++)
std::cout << array[i] << " " << end;
}
else
std::cout << "Invalid input for argument: method";
std::cout << std::endl;
}
void make_dir(std::string dir_name){
int mkdir_status;
std::string comm_mkdir = "mkdir ";
std::string str = comm_mkdir + dir_name;
const char * full_command = str.c_str();
mkdir_status = system(full_command);
std::cout << "mkdir_status = " << mkdir_status << std::endl;
}
void define_xs_or_ys(float* xs, float dx, float x0, int gsize){
for(int i = 0; i < gsize; i++)
xs[i] = x0 + i*dx;
}
void populate_ac_angles(float* ac_angles, int num_ac_angles){
//fills array with equally spaced angles in radians
for (int i = 0; i < num_ac_angles; i++)
ac_angles[i] = i*(2*M_PI)/num_ac_angles;
return;
}
void populate_ac_speeds(float* ac_speeds, int num_ac_speeds, float Fmax){
//fills array with ac_speeds
// std::cout << "infunc CHeck- num_ac_speeds = " << num_ac_speeds << "\n";
float delF = 0;
if (num_ac_speeds == 1)
ac_speeds[0] = Fmax;
else if (num_ac_speeds > 1){
// -----include 0 speed
// delF = Fmax/(num_ac_speeds-1);
// for(int i = 0; i<num_ac_speeds; i++)
// ac_speeds[i] = i*delF;
// ------exclude 0 speed
delF = Fmax/(num_ac_speeds);
for(int i = 0; i<num_ac_speeds; i++){
ac_speeds[i] = (i+1)*delF;
// std::cout << ac_speeds[i] << "\n";
}
}
else
std::cout << "Invalid num_ac_speeds\n";
return;
}
void populate_actions(float **H_actions, int num_ac_speeds, int num_ac_angles, float Fmax){
// populates 2d vector with possible actions
float* ac_angles = new float[num_ac_angles];
populate_ac_angles(ac_angles, num_ac_angles);
float* ac_speeds = new float[num_ac_speeds];
populate_ac_speeds(ac_speeds, num_ac_speeds, Fmax);
int idx;
for (int i=0; i<num_ac_speeds; i++){
for(int j=0; j<num_ac_angles; j++){
idx = j + num_ac_angles*i;
// std::cout << ac_speeds[i] << "\n";
H_actions[idx][0] = ac_speeds[i];
H_actions[idx][1] = ac_angles[j];
}
}
return;
} | 907459f772eddca55cff0311fd40503479ef5434.cu | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/unique.h>
#include <thrust/count.h>
#include <thrust/fill.h>
#include <thrust/iterator/counting_iterator.h>
#include "cnpy.h"
#include <cmath>
#include <stdlib.h>
#include <fstream>
#include <chrono>
using namespace std::chrono;
#include <iostream>
#include <sstream>
long long int ncells;
/*
------ Declarations of utility functions from utils.h -------
*/
cnpy::NpyArray read_velocity_field_data( std::string file_path_name, int* n_elements);
void define_xs_or_ys(float* xs, float dx, float x0, int gsize);
void save_master_Coos_to_file(std::string op_FnamePfx,std::string op_FnamePfx_2, int num_actions,
thrust::host_vector<long long int> &H_master_cooS1,
thrust::host_vector<long long int> &H_master_cooS2,
thrust::host_vector<float> &H_master_cooVal,
thrust::host_vector<float> &H_master_R,
thrust::host_vector<long long int>* H_Aarr_of_cooS1,
thrust::host_vector<long long int>* H_Aarr_of_cooS2,
thrust::host_vector<float>* H_Aarr_of_cooProb,
thrust::host_vector<float>* H_Aarr_of_Rs,
thrust::host_vector<float> &prob_params,
long long int* DP_relv_params,
unsigned long int num_DP_params);
// template<typename dType> template not working for thrust vectors
void print_device_vector(thrust::device_vector<long long int> &array, int start_id, int end_id, std::string array_name, std::string end, int method);
void make_dir(std::string dir_name);
void populate_ac_angles(float* ac_angles, int num_ac_angles);
void populate_ac_speeds(float* ac_speeds, int num_ac_speeds, float Fmax);
void populate_actions(float** H_actions, int num_ac_speeds, int num_ac_angles, float Fmax);
/*
----- Device functions -----
*/
__device__ int32_t get_thread_idx(){
// assigns idx to thread with which it accesses the flattened 3d vxrzns matrix
// for a given T and a given action.
// runs for both 2d and 3d grid
// TODO: may have to change this considering cache locality
// here i, j, k refer to a general matrix M[i][j][k]
int32_t i = threadIdx.x;
int32_t j = blockIdx.y;
int32_t k = blockIdx.x;
int32_t idx = k + (j*gridDim.x) + (i*gridDim.x*gridDim.y)+ blockIdx.z*blockDim.x*gridDim.x*gridDim.y;
return idx;
}
__device__ long long int state1D_from_spid(int32_t T, int32_t sp_id, long long int ncells){
// j ~ blockIdx.x
// i ~ blockIdx.y
// The above three consitute a spatial state index from i and j of grid
// last term is for including time index as well.
// return value when full spatial grid was used
// return (blockIdx.x + (blockIdx.y*gridDim.x) + (T*gridDim.x*gridDim.y) );
// return value for chunks concept
return sp_id + (T*ncells);
}
__device__ long long int state1D_from_ij(int32_t* posid, int32_t T, int32_t gsize){
// posid = {i , j}
// state id = j + i*dim(i) + T*dim(i)*dim(j)
// return value when full spatial grid was used
// return (posid[1] + posid[0]*gridDim.x + (T*gridDim.x*gridDim.y) ) ;
// return value for chunks concept
return (posid[1] + posid[0]*gsize + (T*gsize*gsize)*1LL ) ;
}
__device__ int32_t get_rzn_id(){
return (blockIdx.z * blockDim.x) + threadIdx.x;
}
__device__ void get_posids_from_sp_id(long long int sp_id, int gsize, int32_t* posids){
posids[0] = sp_id/gsize;
posids[1] = sp_id%gsize;
return;
}
__device__ long long int get_sp_id(){
// sp_id: 1d spatial id ranging from 0 to ncells
int i = blockIdx.y;
int j = blockIdx.x;
long long int sp_id = j + (i*gridDim.x)*1LL;
return sp_id;
}
__device__ void get_posids_relS2_0(int32_t m, int32_t* posids_S1, int32_t* posids_relS2_0){
// m*m is size of neighbour grid
// returns i,j index of upper left corner of neighbour grid
int32_t i1 = posids_S1[0];
int32_t j1 = posids_S1[1];
int32_t del = (m-1)/2;
posids_relS2_0[0] = i1 - del;
posids_relS2_0[1] = j1 - del;
return;
}
__device__ long long int get_rel_sp_id2(int32_t m, int32_t* posids_S2, int32_t* posids_relS2_0){
// returns relative sp_id for S2 in neighbour grid
int32_t del_i = posids_S2[0] - posids_relS2_0[0]; // i2 - rel_i0
int32_t del_j = posids_S2[1] - posids_relS2_0[1]; // j2 - rel_j0
long long int rel_sp_id2;
// if S2 outstde neighbor grid, assign special value to rel_sp_id2
// this will help keep count of no. of S2s that lie outside neighb grid
// ideally there should be 0 such S2s
if (del_i < m && del_j < m)
rel_sp_id2 = del_j + (m*del_i);
else
rel_sp_id2 = m*m;
return rel_sp_id2;
}
__device__ long long int get_sp_id2_from_rel_sp_id2(int32_t m, int32_t gsize,
long long int rel_sp_id2, int32_t* posids_relS2_0){
// returns Sp_id2 from rel_sp_id2
long long int sp_id2;
int32_t del_i = rel_sp_id2/m;
int32_t del_j = rel_sp_id2%m;
int32_t i2 = del_i + posids_relS2_0[0];
int32_t j2 = del_j + posids_relS2_0[1];
sp_id2 = j2 + gsize*i2;
return sp_id2;
}
__device__ bool is_edge_state(int32_t i, int32_t j){
// n = gsize -1 that is the last index of the domain assuming square domain
int32_t n = gridDim.x - 1;
if (i == 0 || i == n || j == 0 || j == n )
return true;
else
return false;
}
__device__ bool is_in_obstacle(int sp_id, int T, long long int ncells, int* all_mask_mat){
//returns true if obstacle is present in state T,i,j
long long int mean_id = state1D_from_spid(T, sp_id, ncells);
return(all_mask_mat[mean_id] == 1 );
}
__device__ bool is_terminal(int32_t i, int32_t j, float* params){
// terminal state indices (of UL corner of terminal subgrid if term_subgrid_size>1)
int32_t i_term = params[8];
int32_t j_term = params[9];
int tsgsize = params[12]; //term_subgrid_size
if( (i >= i_term && i < i_term + tsgsize) && (j >= j_term && j < j_term + tsgsize) )
return true;
else return false;
}
__device__ bool my_isnan(int s){
// By IEEE 754 rule, NaN is not equal to NaN
return s != s;
}
__device__ void get_xypos_from_ij(int32_t i, int32_t j, int32_t gsize ,float* xs, float* ys, float* x, float* y){
*x = xs[j];
// *y = ys[gridDim.x - 1 - i];
*y = ys[gsize - 1 - i];
return;
}
__device__ long long int get_sp_id_from_posid(int32_t* posids, int32_t gsize){
// gives sp_id from posids (i,j)
return posids[1] + gsize*posids[0]*1LL ;
}
__device__ float get_angle_in_0_2pi(float theta){
float f_pi = 3.141592;
if (theta < 0)
return theta + (2*f_pi);
else
return theta;
}
/*
----- move() and reward_functions() moverd to move_and_rewards.h/cu-----
*/
__device__ float calculate_one_step_reward(float ac_speed, float ac_angle, float rad1, float rad2, float* params){
int method = params[13];
float alpha = params[19];
float Cr = 1; // coeffecient for radaition term
float Cf = 1; // coeffecient for energy consumtion
float Ct = 0.01; // small coeffecient for time for to prevent lazy start
float dt = params[4];
float energy_reward, time_reward;
if (method == 0) //time
return -dt;
else if (method == 1){ //energy1
return -(Cf*ac_speed*ac_speed + Ct)*dt;
}
else if (method == 2){ //energy2: maximise (collection-consumption)
return ((Cr*(rad2 + rad1)/2) - (Cf*ac_speed*ac_speed) - Ct)*dt;
}
else if (method == 3){
return ((Cr*(rad2 + rad1)/2)- Ct)*dt;
}
else if (method == 4){
energy_reward = -(ac_speed*ac_speed)*dt;
time_reward = -dt;
return alpha*energy_reward + (1-alpha)*time_reward;
}
else
return 0;
}
__device__ void move(float ac_speed, float ac_angle, float vx, float vy, int32_t T, float* xs, float* ys, int32_t* posids, float* params, float* r ){
int32_t gsize = params[0];
int32_t n = params[0] - 1; // gsize - 1
// int32_t num_actions = params[1];
// int32_t nrzns = params[2];
// float F = params[3];
int32_t nt = params[10];
float F = ac_speed;
float dt = params[4];
float r_outbound = params[5];
float r_terminal = params[6];
// int32_t nT = params[10];
float Dj = fabsf(xs[1] - xs[0]);
float Di = fabsf(ys[1] - ys[0]);
int32_t i0 = posids[0];
int32_t j0 = posids[1];
float vnetx = F*cosf(ac_angle) + vx;
float vnety = F*sinf(ac_angle) + vy;
float x, y;
get_xypos_from_ij(i0, j0, gsize, xs, ys, &x, &y); // x, y stores centre coords of state i0,j0
float xnew = x + (vnetx * dt);
float ynew = y + (vnety * dt);
// float r_step = 0;
*r = 0; // intiilaise r with 0
if (xnew > xs[n])
{
xnew = xs[n];
*r += r_outbound;
}
else if (xnew < xs[0])
{
xnew = xs[0];
*r += r_outbound;
}
if (ynew > ys[n])
{
ynew = ys[n];
*r += r_outbound;
}
else if (ynew < ys[0])
{
ynew = ys[0];
*r += r_outbound;
}
int32_t xind, yind;
float remx = remainderf((xnew - xs[0]), Dj);
float remy = remainderf(-(ynew - ys[n]), Di);
xind = ((xnew - xs[0]) - remx)/Dj;
yind = (-(ynew - ys[n]) - remy)/Di;
if ((remx >= 0.5 * Dj) && (remy >= 0.5 * Di))
{
xind += 1;
yind += 1;
}
else if ((remx >= 0.5 * Dj && remy < 0.5 * Di))
{
xind += 1;
}
else if ((remx < 0.5 * Dj && remy >= 0.5 * Di))
{
yind += 1;
}
if (!(my_isnan(xind) || my_isnan(yind)))
{
// update posids
posids[0] = yind;
posids[1] = xind;
if (is_edge_state(posids[0], posids[1])) //line 110
{
*r += r_outbound;
}
}
if (is_terminal(posids[0], posids[1], params))
{
*r += r_terminal;
}
else{
// //reaching any state in the last timestep which is not terminal is penalised
// if (T == nt-2)
// *r += r_outbound;
//reaching any state in the last timestep which is not terminal is penalised
if (T > nt-20)
*r += 0.05*r_outbound;
}
}
__device__ void extract_velocity(int32_t* posids, long long int sp_id, long long int ncells, float* vx, float* vy,
int32_t T, float* all_u_mat, float* all_v_mat, float* all_ui_mat,
float* all_vi_mat, float* all_Yi, float* params){
int32_t nrzns = params[2];
int32_t nmodes = params[7];
int32_t gsize = params[0];
long long int sp_uvi, str_uvi, sp_Yi; //startpoints and strides for accessing all_ui_mat, all_vi_mat and all_Yi
// int str_Yi;
float sum_x = 0;
float sum_y = 0;
float vx_mean, vy_mean;
//thread index. also used to access resultant vxrzns[nrzns, gsize, gsize]
int32_t idx = get_thread_idx();
//rzn index to identify which of the 5k rzn it is. used to access all_Yi.
int32_t rzn_id = get_rzn_id() ;
//mean_id is the index used to access the flattened all_u_mat[t,i,j].
long long int mean_id = state1D_from_spid(T, sp_id, ncells);
//to access all_ui_mat and all_vi_mat
//str_uvi = gridDim.x * gridDim.y;
// sp_uvi = (T * nmodes * str_uvi) + (gridDim.x * blockIdx.y) + (blockIdx.x);
str_uvi = gsize*gsize*1LL;
sp_uvi = (T * nmodes * str_uvi) + (gsize * posids[0]) + (posids[1]);
// to access all_Yi
sp_Yi = (T * nrzns * nmodes * 1LL) + (rzn_id * nmodes);
vx_mean = all_u_mat[mean_id];
for(int i = 0; i < nmodes; i++)
{
sum_x += all_ui_mat[sp_uvi + (i*str_uvi)]*all_Yi[sp_Yi + i];
}
vy_mean = all_v_mat[mean_id];
for(int i = 0; i < nmodes; i++)
{
sum_y += all_vi_mat[sp_uvi + (i*str_uvi)]*all_Yi[sp_Yi + i];
}
*vx = vx_mean + sum_x;
*vy = vy_mean + sum_y;
return;
}
__device__ void extract_radiation(long long int sp_id, int32_t T, long long int ncells,
float* all_s_mat, float* rad){
// for DETERMINISTIC radiation (scalar) field
// extract radiation (scalar) from scalar matrix
long long int mean_id = state1D_from_spid(T, sp_id, ncells);
*rad = all_s_mat[mean_id];
return;
}
__device__ bool is_within_band(int i, int j, int i1, int j1, int i2, int j2, float* xs, float* ys, int gsize){
//returns true if i,j are within the band connecticng cells i1,j1 and i2,j2
if(i1==i2 || j1==j2){
return true;
}
else{
float x, y, x1, y1, x2, y2;
float cell_diag = fabsf(xs[1]-xs[0])*1.414213;
get_xypos_from_ij(i, j, gsize, xs, ys, &x, &y); // x, y stores centre coords of state i0,j0
get_xypos_from_ij(i1, j1, gsize, xs, ys, &x1, &y1);
get_xypos_from_ij(i2, j2, gsize, xs, ys, &x2, &y2);
float A = (y2-y1)/(x2-x1);
float B = -1;
float C = y1 - (A*x1);
float dist_btw_pt_line = fabsf(A*x + B*y + C)/sqrtf((A*A) + (B*B));
if (dist_btw_pt_line < cell_diag)
return true;
else
return false;
}
}
__device__ bool goes_through_obstacle(long long int sp_id1, long long int sp_id2, int T,
long long int ncells, int* D_all_mask_mat,
float* xs, float* ys, float* params){
// returns true if the transition involves going through obstacle
bool possible_collision = false;
int posid1[2];
int posid2[2];
int gsize = params[0];
long long int sp_id;
get_posids_from_sp_id(sp_id1, gsize, posid1);
get_posids_from_sp_id(sp_id2, gsize, posid2);
int imin = min(posid1[0], posid2[0]);
int imax = max(posid1[0], posid2[0]);
int jmin = min(posid1[1], posid2[1]);
int jmax = max(posid1[1], posid2[1]);
for(int i=imin; i<=imax; i++){
for(int j=jmin; j<=jmax; j++){
if(!(i==posid1[0]&&j==posid1[1])){
sp_id = j + gsize*i*1LL ;
if ( is_in_obstacle(sp_id, T, ncells, D_all_mask_mat) || is_in_obstacle(sp_id, T+1, ncells, D_all_mask_mat)){
if (is_within_band(i, j, posid1[0], posid1[1], posid2[0], posid2[1], xs, ys, gsize) == true){
possible_collision = true;
return true;
}
}
}
}
}
return possible_collision;
}
//test: changer from float* to float ac_angle
__global__ void transition_calc(float* T_arr, long long int ncells,
float* all_u_mat, float* all_v_mat, float* all_ui_mat, float* all_vi_mat, float* all_Yi,
float* D_all_s_mat, int* D_all_mask_mat,
float ac_speed, float ac_angle, float* xs, float* ys, float* params, float* sumR_sa,
float* results){
// resutls directions- 1: along S2; 2: along S1; 3: along columns towards count
int32_t gsize = params[0]; // size of grid along 1 direction. ASSUMING square grid.
int32_t nrzns = params[2];
float r_outbound = params[5];
// int32_t is_stationary = params[11];
int32_t T = (int32_t)T_arr[0]; // current timestep
int32_t idx = get_thread_idx();
long long int res_idx;
float vx, vy, rad1, rad2;
long long int sp_id = get_sp_id(); //sp_id is space_id. S1%(gsize*gsize)
long long int sp_id2;
long long int rel_sp_id2;
int32_t posids_relS2_0[2];
int32_t posids_S1[2];
int32_t m = (int32_t) params[18];
int32_t Nb = (m*m) + 1;
float one = 1.0;
if(idx < gridDim.x*gridDim.y*nrzns && sp_id < ncells) //or idx < arr_size
{
// int32_t posids[2] = {(int32_t)blockIdx.y, (int32_t)blockIdx.x}; //static declaration of array of size 2 to hold i and j values of S1.
int32_t posids[2]; //static declaration of array of size 2 to hold i and j values of S1.
get_posids_from_sp_id(sp_id, gsize, posids);
get_posids_from_sp_id(sp_id, gsize, posids_S1);
int32_t rzn_id = get_rzn_id();
// Afer move() these will be overwritten by i and j values of S2
float r=0; // to store immediate reward
float r_step;
extract_velocity(posids, sp_id, ncells, &vx, &vy, T, all_u_mat, all_v_mat, all_ui_mat, all_vi_mat, all_Yi, params);
extract_radiation(sp_id, T, ncells, D_all_s_mat, &rad1);
// if s1 not terminal
if (is_terminal(posids[0], posids[1], params) == false){
// if s1 not in obstacle
if (is_in_obstacle(sp_id, T, ncells, D_all_mask_mat) == false){
// moves agent and adds r_outbound and r_terminal to r
move(ac_speed, ac_angle, vx, vy, T, xs, ys, posids, params, &r);
sp_id2 = get_sp_id_from_posid(posids, gsize);
extract_radiation(sp_id2, T+1, ncells, D_all_s_mat, &rad2);
// adds one step-reward based on method. mehthod is available in params
r_step = calculate_one_step_reward(ac_speed, ac_angle, rad1, rad2, params);
r += r_step;
// if S2 is an obstacle cell. then penalise with r_outbound
// if (is_in_obstacle(sp_id2, T+1, ncells, D_all_mask_mat) == true )
// r = r_outbound;
if (goes_through_obstacle(sp_id, sp_id2, T, ncells, D_all_mask_mat, xs, ys, params) == true)
r = r_outbound;
}
// if s1 is in obstacle, then no update to posid
else
r = r_outbound;
}
get_posids_relS2_0(m, posids_S1, posids_relS2_0);
rel_sp_id2 = get_rel_sp_id2(m, posids, posids_relS2_0);
res_idx = sp_id*Nb + rel_sp_id2;
float b = atomicAdd(&results[res_idx], one);
//writing to sumR_sa. this array will later be divided by nrzns, to get the avg
float a = atomicAdd(&sumR_sa[sp_id], r);
__syncthreads();
}//if ends
return;
}
__global__ void compute_mean(float* D_master_sumRsa_arr, int size, int nrzns) {
// computes mean
int tid = (blockIdx.x*blockDim.x) + threadIdx.x;
if (tid < size)
D_master_sumRsa_arr[tid] = D_master_sumRsa_arr[tid]/nrzns;
return;
}
__global__ void count_kernel(float* D_master_S2_arr_ip, int nrzns, unsigned long long int* num_uq_s2_ptr) {
// D_master_S2_arr_ip contains count of relS2s for S1s for a given action
// This kernel counts no. of nnz elements for a given S1
// This is needed for getting total nnz to initiliase COO matrix
// ncells is gridDim, i.e. we have ncells blocks in grid
// Nb is blockDim, i.e we have Nb threads in block
int ncells = gridDim.x; // == ncells == ncells
int Nb = blockDim.x;
long long int tid = (blockIdx.x*Nb) + threadIdx.x;
int idx = blockIdx.x;
float nnz;
unsigned long long int one = 1.0;
if ((tid < ncells*Nb) && (threadIdx.x != Nb-1)){ // tid < Nb*ncells
if (D_master_S2_arr_ip[tid] != 0){
nnz = atomicAdd(&num_uq_s2_ptr[idx], one);
}
}
return;
}
__global__ void reduce_kernel(float* D_master_S2_arr_ip, int t, int Nb, int m,
long long int ncells, int nrzns, int gsize,
long long int* D_coo_s1_arr, long long int* D_coo_s2_arr,
float* D_coo_cnt_arr, unsigned long long int* num_uq_s2_ptr, unsigned long long int* prSum_num_uq_s2_ptr){
long long int tid = (blockIdx.x*blockDim.x) + threadIdx.x;
long long int start_idx = tid*Nb; // to access tid'th threads 0-pos in ip_arr
long long int n_uqs = num_uq_s2_ptr[tid]; //number of unique S2s for tid'th block
long long int op_st_id = prSum_num_uq_s2_ptr[tid]; //sum of number of uniqeu S2s uptil tid'th block. to access tid'th thread's 0-pos in op_arr
long long int ith_nuq = 0; //ranges from 0 to n_uqs , to index number between 0 and n_uqs
long long int rel_sp_id2;
long long int sp_id2;
long long int S2;
long long int sp_id1 = tid;
float count; //first if eval will lead to else condition and do count++
int32_t posids_relS2_0[2];
int32_t posids_S1[2];
if (tid < ncells){
// int32_t s1 = (tid%ncells) + (t*ncells); // TODO:xxdone change this to nbe a function of a arguments: sp_id and t
long long int s1 = tid + (t*ncells);
for(long long int i = 0; i< n_uqs; i++)
D_coo_s1_arr[op_st_id + i] = s1;
get_posids_relS2_0(m, posids_S1, posids_relS2_0);
for(long long int i = 0; i< Nb-1; i++){
count = D_master_S2_arr_ip[start_idx + i];
if (count != 0){
rel_sp_id2 = i;
get_posids_from_sp_id(sp_id1, gsize, posids_S1);
get_posids_relS2_0(m, posids_S1, posids_relS2_0);
sp_id2 = get_sp_id2_from_rel_sp_id2(m, gsize,
rel_sp_id2, posids_relS2_0);
S2 = state1D_from_spid(t+1, sp_id2, ncells);
D_coo_s2_arr[op_st_id + ith_nuq] = S2; // store old_s2 value in the [.. + ith] position
D_coo_cnt_arr[op_st_id + ith_nuq] = count/nrzns; // store prob value in the [.. + ith] position
ith_nuq++; // increment i
}
}
}
return;
}
template<typename dType>
void print_array(dType* array, int num_elems,std::string array_name, std::string end){
std::cout << array_name << std::endl;
for(int i = 0; i < num_elems; i++)
std::cout << array[i] << " " << end;
std::cout << std::endl;
}
std::string get_prob_name(int num_ac_speeds, int num_ac_angles, int i_term, int j_term,
int tsg_size){
std::string s_n_ac_sp = std::to_string(num_ac_speeds);
std::string s_n_ac_ac = std::to_string(num_ac_angles);
std::string s_i = std::to_string(i_term);
std::string s_j = std::to_string(j_term);
std::string s_tsg = std::to_string(tsg_size);
std::string name = "a" + s_n_ac_sp + "x" + s_n_ac_ac + "_"
+ "i" + s_i + "_" "j" + s_j + "_"
+ "ref" + s_tsg;
return name;
}
void build_sparse_transition_model_at_T_at_a(int t, int action_id, int bDimx, thrust::device_vector<float> &D_tdummy,
float* D_all_u_arr, float* D_all_v_arr, float* D_all_ui_arr,
float* D_all_vi_arr, float* D_all_yi_arr,
float* D_all_s_arr, int* D_all_mask_arr,
thrust::device_vector<float> &D_params,
thrust::host_vector<float> &H_params,
thrust::device_vector<float> &D_xs,
thrust::device_vector<float> &D_ys,
float** H_actions,
thrust::host_vector<int32_t> &H_coo_len_per_ac,
thrust::host_vector<long long int>* H_Aarr_of_cooS1,
thrust::host_vector<long long int>* H_Aarr_of_cooS2,
thrust::host_vector<float>* H_Aarr_of_cooProb,
thrust::host_vector<float>* H_Aarr_of_Rs
);
void build_sparse_transition_model_at_T_at_a(int t, int action_id, int bDimx, thrust::device_vector<float> &D_tdummy,
float* D_all_u_arr, float* D_all_v_arr, float* D_all_ui_arr,
float* D_all_vi_arr, float* D_all_yi_arr,
float* D_all_s_arr, int* D_all_mask_arr,
thrust::device_vector<float> &D_params,
thrust::host_vector<float> &H_params,
thrust::device_vector<float> &D_xs,
thrust::device_vector<float> &D_ys,
float** H_actions,
thrust::host_vector<int32_t> &H_coo_len_per_ac,
thrust::host_vector<long long int>* H_Aarr_of_cooS1,
thrust::host_vector<long long int>* H_Aarr_of_cooS2,
thrust::host_vector<float>* H_Aarr_of_cooProb,
thrust::host_vector<float>* H_Aarr_of_Rs
){
int gsize = (int) H_params[0];
int num_actions = (int)H_params[1];
int nrzns = (int) H_params[2];
int nt = (int) H_params[10];
int m = (int)H_params[18];
int Nb = (m*m) + 1; //+1 is to store no. of S2s not lying in nieghbour_array. Ideally it should have 0
// raw pointer casts
float* D_T_arr = thrust::raw_pointer_cast(&D_tdummy[0]);
float* xs = thrust::raw_pointer_cast(&D_xs[0]);
float* ys = thrust::raw_pointer_cast(&D_ys[0]);
float* params = thrust::raw_pointer_cast(&D_params[0]);
//Define Kernel launch parameters for transition calculation kernel
int DimGrid_z = (nrzns/bDimx)+1;
if (nrzns % bDimx == 0)
DimGrid_z = (nrzns/bDimx);
// initialse master S2 array
thrust::device_vector<float> D_master_S2_vector(ncells * Nb, 0);
float* D_master_S2_arr = thrust::raw_pointer_cast(&D_master_S2_vector[0]);
// initialise master sum_Rsa array - sumRsa's
// Important to initialise it with 0
thrust::device_vector<float> D_master_sumRsa_vector(ncells, 0);
float* D_master_sumRsa_arr = thrust::raw_pointer_cast(&D_master_sumRsa_vector[0]);
// define kerel block and grid configuration
dim3 DimGrid(gsize, gsize, DimGrid_z);
dim3 DimBlock(bDimx, 1, 1);
float ac_speed = H_actions[action_id][0];
float ac_angle = H_actions[action_id][1];
// launch kernel for @a @t
transition_calc<<< DimGrid, DimBlock >>> (D_T_arr,
ncells, D_all_u_arr, D_all_v_arr, D_all_ui_arr, D_all_vi_arr, D_all_yi_arr,
D_all_s_arr, D_all_mask_arr,
ac_speed, ac_angle, xs, ys, params, D_master_sumRsa_arr,
D_master_S2_arr);
cudaDeviceSynchronize();
// checks
// std::cout << "D_xs= " << std::endl;
// for (int i = 0; i< 10; i++)
// std::cout << D_xs[i] << " " ;
// if (t == nt-2){
// std::cout << "t = " << t << "\n nt = " << nt << "\n" ;
// std::cout<<"gisze= " << gsize << std::endl;
// std::cout<<"DimGrid_z = " << DimGrid_z << std::endl;
// std::cout<<"bDimx = " << bDimx << std::endl;
// }
// // CHECK copy data back to host for check
// std::cout << "a" << n <<"\n vx at s1=0: " << D_params[31] << std::endl;
// std::cout <<"\n vx at s1=0: " << D_params[30] << std::endl;
// std::cout << "----a" << n <<"\n";
// std::cout <<"pre move " << "\n";
// std::cout<<"r1\n"<< D_params[23] << "," << D_params[24] << std::endl;
// std::cout<<"r2\n"<< D_params[25] << "," << D_params[26] << std::endl;
// std::cout <<"post move " << "\n";
// std::cout<<"r1\n"<< D_params[27] << "," << D_params[28] << std::endl;
// std::cout<<"r2\n"<< D_params[29] << "," << D_params[30] << std::endl;
// thrust::copy(D_master_S2_vector.begin() + n*arr_size, D_master_S2_vector.begin() + (n+1)*arr_size, H_S2_vec.begin());
// thrust::copy(D_master_sumRsa_vector.begin() + n*ncells, D_master_sumRsa_vector.begin() + (n+1)*ncells, H_sumR_sa.begin());
// std::cout << "post kernel" << std::endl;
// for(int i = 0; i < 10; i ++)
// std::cout << H_sumR_sa[i] << std::endl;
// for(int i = 0; i < 10; i ++)
// std::cout << H_S2_vec[i] << std::endl;
int Nthreads = D_master_sumRsa_vector.size();
assert(Nthreads == ncells);
int threads_per_block = 64;
int blocks_per_grid = (Nthreads/threads_per_block) + 1;
assert( blocks_per_grid * threads_per_block >= Nthreads);
compute_mean<<< blocks_per_grid, threads_per_block >>>(D_master_sumRsa_arr, Nthreads, nrzns);
// TODO: in optimazation phase move this line after initilisation num_uq_S2 vectors.
// cudaDeviceSynchronize();
//initialising vectors for counting nnzs or number of uniqe S2s for S1s
thrust::device_vector<unsigned long long int> D_num_uq_s2(ncells,0);
thrust::device_vector<unsigned long long int> D_prSum_num_uq_s2(ncells);
unsigned long long int* num_uq_s2_ptr = thrust::raw_pointer_cast(&D_num_uq_s2[0]);
unsigned long long int* prSum_num_uq_s2_ptr = thrust::raw_pointer_cast(&D_prSum_num_uq_s2[0]);
//one thread per element
// count no. of ug unique S2 for each S1 and fill in num_uq_s2
count_kernel<<<ncells, Nb>>>(D_master_S2_arr, nrzns, num_uq_s2_ptr);
cudaDeviceSynchronize();
//CHECKs
// std::cout << "D_num_uq_s2_pc\n";
// int tempflag = 0;
// int tempnum;
// int cnt2 = 0;
// int cnt1 = 0;
// for (int i =0; i < efCszNa; i++){
// tempnum = D_num_uq_s2_pc[i];
// if (tempnum == 1)
// cnt1++;
// else if (tempnum == 2)
// cnt2++;
// else
// std::cout << " --------------------------- WRONG-----------\n";
// }
// std::cout << "cnt1 = " << cnt1 << "\ncnt2 = " << cnt2 <<"\n";
// calc nnz: number of non zero elements(or unique S2s) for a given S1 and action
long long int nnz = thrust::reduce(D_num_uq_s2.begin(), D_num_uq_s2.end(), (float) 0, thrust::plus<float>());
// get prefix sum of D_num_uq_s2. This helps threads to access apt COO indices in reduce_kernel
thrust::exclusive_scan(D_num_uq_s2.begin(), D_num_uq_s2.end(), D_prSum_num_uq_s2.begin());
// std::cout << "nnz = " << nnz<< "\n";
//initilise coo arrays (concated across actions)
thrust::device_vector<long long int> D_coo_s1(nnz);
thrust::device_vector<long long int> D_coo_s2(nnz);
thrust::device_vector<float> D_coo_count(nnz); // TODO: makde this int32_t and introduce another array for prob
long long int* D_coo_s1_arr = thrust::raw_pointer_cast(&D_coo_s1[0]);
long long int* D_coo_s2_arr = thrust::raw_pointer_cast(&D_coo_s2[0]);
float* D_coo_cnt_arr = thrust::raw_pointer_cast(&D_coo_count[0]);
Nthreads = ncells;
assert(Nthreads == ncells);
threads_per_block = 64;
blocks_per_grid = (Nthreads/threads_per_block) + 1;
// reduce operation to fill COO arrays
reduce_kernel<<<blocks_per_grid, threads_per_block>>>(D_master_S2_arr, t, Nb, m,
ncells, nrzns, gsize, D_coo_s1_arr, D_coo_s2_arr, D_coo_cnt_arr,
num_uq_s2_ptr, prSum_num_uq_s2_ptr);
cudaDeviceSynchronize();
// nnz should be filled in a global array
H_coo_len_per_ac[action_id] = nnz;
// Copy Device COO rusults to Host COO vectors across actions and append vectors across time
assert(action_id >=0);
H_Aarr_of_cooS1[action_id].insert(H_Aarr_of_cooS1[action_id].end(), D_coo_s1.begin(), D_coo_s1.end());
H_Aarr_of_cooS2[action_id].insert(H_Aarr_of_cooS2[action_id].end(), D_coo_s2.begin(), D_coo_s2.end());
H_Aarr_of_cooProb[action_id].insert(H_Aarr_of_cooProb[action_id].end(), D_coo_count.begin(), D_coo_count.end());
H_Aarr_of_Rs[action_id].insert(H_Aarr_of_Rs[action_id].end(), D_master_sumRsa_vector.begin(), D_master_sumRsa_vector.end());
//checks
// print_device_vector(D_coo_s1, 0, 10, "D_coo_s1", " ", 0);
// print_device_vector(D_coo_s2, 0, 10, "D_coo_s2", " ", 0);
// std::cout << "H_coo_len_per_ac" << std::endl;
// for (int n = 0; n < num_actions; n++)
// std::cout << H_coo_len_per_ac[n] << std::endl;
// std::cout << "H_Aarr_of_cooS1" << std::endl;
// for (int n = 0; n < num_actions; n++){
// for (int i = 0; i < H_Aarr_of_cooS1[n].size(); i++)
// std::cout << H_Aarr_of_cooS1[n][i] << " , " << H_Aarr_of_cooS2[n][i] << " , " << H_Aarr_of_cooProb[n][i] << std::endl;
// std::cout << std::endl;
// }
// std::cout << "H_Aarr_of_Rs" << std::endl;
// for (int n = 0; n < num_actions; n++){
// for (int i = 0; i < ncells; i++)
// std::cout << H_Aarr_of_Rs[n][i] << std::endl;
// std::cout << std::endl;
// }
// // array of num_actions decive_vvectors for sum_Rsa_vec
// // initialasation with 0 is important. because values are added to this
// thrust::host_vector<float> H_arr_sumR_sa[num_actions];
// for(int n = 0; n < num_actions; n++){
// H_arr_sumR_sa[n] = thrust::host_vector<float>(nnz[i]);
}
int get_reward_type(std::string prob_type){
// returns
// 0 for time
// 1 for energy1
// 2 for energy2
// 3 for energy3
// 4 for custom1
if (prob_type == "time")
return 0;
else if (prob_type == "energy1")
return 1;
else if (prob_type == "energy2")
return 2;
else if (prob_type == "energy3")
return 3;
else if (prob_type == "custom1")
return 4;
else
return -1;
}
// ------------------------------- MAIN () ---------------------------------
int main(int argc, char *argv[]){
// add input arguement part
// -------------------- input data starts here ---------------------------------
#include "input_to_build_model.h"
float alpha = alpha_header;
if(argc>1){
std::cout << alpha << " and " << argv[0] << " and " << argv[1] << "\n";
alpha = std::stof(argv[1]);
alpha = alpha/100;
// std::stringstream convert{ argv[2] };
// if(!(convert >> alpha)) alpha = 0;
std::cout << alpha << "\n";
}
std::cout << argc << "\n";
std::cout << alpha << " and " << argv[0] << " and " << argv[1] << "\n";
int reward_type = get_reward_type(prob_type);
std::cout << "Reward type: " << reward_type << "\n";
// define full problem name and print them to a temporary file
// the temp file will be read by python scripts for conversion
std::string prob_specs = get_prob_name(num_ac_speeds, num_ac_angles, i_term,
j_term, term_subgrid_size);
std::string op_Fname_upto_prob_name = "data_modelOutput/" + prob_type + "/"
+ prob_name + "/" ;
std::string op_FnamePfx = op_Fname_upto_prob_name + prob_specs + "/"; //path for storing op npy data.
std::string op_Fname_withAlpha = op_FnamePfx + std::to_string(alpha) +"/";
make_dir(op_Fname_upto_prob_name);
make_dir(op_FnamePfx);
if(reward_type>3){
op_FnamePfx = op_Fname_withAlpha;
make_dir(op_FnamePfx);
}
std::ofstream fout("temp_modelOp_dirName.txt");
fout << prob_type << "\n";
fout << prob_name << "\n";
fout << prob_specs << "\n";
fout << std::to_string(alpha) << "\n";
fout << op_FnamePfx;
fout.close();
// TODO: Make sure files are stored in np.float32 format
std::string data_path = "data_input/" + prob_name + "/";
std::string all_u_fname = data_path + "all_u_mat.npy";
std::string all_v_fname = data_path + "all_v_mat.npy";
std::string all_ui_fname = data_path + "all_ui_mat.npy";
std::string all_vi_fname = data_path + "all_vi_mat.npy";
std::string all_yi_fname = data_path + "all_Yi.npy";
std::string all_s_fname = data_path + "all_s_mat.npy";
std::string all_mask_fname = data_path + "obstacle_mask.npy"; //this file stored in int32
// -------------------- input data ends here ---------------------------------
// make directory for storing output data from this file
make_dir(op_Fname_upto_prob_name);
make_dir(op_FnamePfx);
int all_u_n_elms;
int all_v_n_elms;
int all_ui_n_elms;
int all_vi_n_elms;
int all_yi_n_elms;
int all_s_n_elms;
int all_mask_n_elms;
cnpy::NpyArray all_u_cnpy = read_velocity_field_data(all_u_fname, &all_u_n_elms);
cnpy::NpyArray all_v_cnpy = read_velocity_field_data(all_v_fname, &all_v_n_elms);
cnpy::NpyArray all_ui_cnpy = read_velocity_field_data(all_ui_fname, &all_ui_n_elms);
cnpy::NpyArray all_vi_cnpy = read_velocity_field_data(all_vi_fname, &all_vi_n_elms);
cnpy::NpyArray all_yi_cnpy = read_velocity_field_data(all_yi_fname, &all_yi_n_elms);
cnpy::NpyArray all_s_cnpy = read_velocity_field_data(all_s_fname, &all_s_n_elms);
cnpy::NpyArray all_mask_cnpy = read_velocity_field_data(all_mask_fname, &all_mask_n_elms);
float* all_u_mat = all_u_cnpy.data<float>();
float* all_v_mat = all_v_cnpy.data<float>();
float* all_ui_mat = all_ui_cnpy.data<float>();
float* all_vi_mat = all_vi_cnpy.data<float>();
float* all_yi_mat = all_yi_cnpy.data<float>();
float* all_s_mat = all_s_cnpy.data<float>();
int* all_mask_mat = all_mask_cnpy.data<int>();
// CHECKS:
// print_array<float>(all_u_mat, all_u_n_elms, "all_u_mat", " ");
// print_array<float>(all_ui_mat, all_ui_n_elms,"all_ui_mat", " ");
// print_array<float>(all_yi_mat, all_yi_n_elms,"all_yi_mat", " ");
std::cout << "Finished reading Velocity Field Data !" << std::endl;
assert(neighb_gsize <= gsize);
assert((int)neighb_gsize%2 == 1); // neighb_gsize must be odd
//TODO: fill params in a function
// Contains implicit casting from int32_t to float
thrust::host_vector<float> H_params(32);
H_params[0] = gsize;
H_params[1] = num_actions;
H_params[2] = nrzns;
H_params[3] = F;
H_params[4] = dt;
H_params[5] = r_outbound;
H_params[6] = r_terminal;
H_params[7] = nmodes;
H_params[8] = i_term;
H_params[9] = j_term;
H_params[10] = nt;
H_params[11] = is_stationary;
H_params[12] = term_subgrid_size;
H_params[13] = reward_type;
H_params[14] = num_ac_speeds;
H_params[15] = num_ac_angles;
H_params[16] = dx;
H_params[17] = dy;
H_params[18] = neighb_gsize; // referred to as m in functions
H_params[19] = alpha;
for( int i =20; i<32; i++)
H_params[i] = z;
// Define grid ticks in host
thrust::host_vector<float> H_xs(gsize, -1);
thrust::host_vector<float> H_ys(gsize, -1);
float* xs = thrust::raw_pointer_cast(&H_xs[0]);
float* ys = thrust::raw_pointer_cast(&H_ys[0]);
define_xs_or_ys(xs, dx, x0, gsize);
define_xs_or_ys(ys, dy, y0, gsize);
// define angles in host
float** H_actions = new float*[num_actions];
for(int i=0; i<num_actions; i++)
H_actions[i] = new float[2];
populate_actions(H_actions, num_ac_speeds, num_ac_angles, F);
// std::cout << "CHECK: ACTIONS: \n";
// for(int i=0; i<num_actions; i++){
// std::cout << H_actions[i][0] << ", " << H_actions[i][1] << "\n";
// }
//----- start copying data to device --------
// Copy vel field data to device memory using thrust device_vector
thrust::device_vector<float> D_all_u_vec (all_u_mat, all_u_mat + all_u_n_elms);
thrust::device_vector<float> D_all_v_vec (all_v_mat, all_v_mat + all_v_n_elms);
thrust::device_vector<float> D_all_ui_vec (all_ui_mat, all_ui_mat + all_ui_n_elms);
thrust::device_vector<float> D_all_vi_vec (all_vi_mat, all_vi_mat + all_vi_n_elms);
thrust::device_vector<float> D_all_yi_vec (all_yi_mat, all_yi_mat + all_yi_n_elms);
thrust::device_vector<float> D_all_s_vec (all_s_mat, all_s_mat + all_s_n_elms);
thrust::device_vector<int> D_all_mask_vec (all_mask_mat, all_mask_mat + all_mask_n_elms);
float* D_all_u_arr = thrust::raw_pointer_cast(&D_all_u_vec[0]);
float* D_all_v_arr = thrust::raw_pointer_cast(&D_all_v_vec[0]);
float* D_all_ui_arr = thrust::raw_pointer_cast(&D_all_ui_vec[0]);
float* D_all_vi_arr = thrust::raw_pointer_cast(&D_all_vi_vec[0]);
float* D_all_yi_arr = thrust::raw_pointer_cast(&D_all_yi_vec[0]);
float* D_all_s_arr = thrust::raw_pointer_cast(&D_all_s_vec[0]);
int* D_all_mask_arr = thrust::raw_pointer_cast(&D_all_mask_vec[0]);
std::cout << "Copied to Device : Velocity Field Data !" << std::endl;
thrust::device_vector<float> D_tdummy(2,0);
// initialise empty device vectors. These contain time-invariant data
thrust::device_vector<float> D_params(32);
thrust::device_vector<float> D_xs(gsize);
thrust::device_vector<float> D_ys(gsize);
// initialise reuseable host vectors
thrust::host_vector<int32_t> H_coo_len_per_ac(num_actions);
thrust::host_vector<long long int> H_Aarr_of_cooS1[(int)num_actions];
thrust::host_vector<long long int> H_Aarr_of_cooS2[(int)num_actions];
thrust::host_vector<float> H_Aarr_of_cooProb[(int)num_actions];
thrust::host_vector<float> H_Aarr_of_Rs[(int)num_actions];
//initialised with 0 size. later data from device is inserted/appended to the end of vector
for (int i =0; i < num_actions; i++){
H_Aarr_of_cooS1[i] = thrust::host_vector<long long int> (0);
}
for (int i =0; i < num_actions; i++){
H_Aarr_of_cooS2[i] = thrust::host_vector<long long int> (0);
}
for (int i =0; i < num_actions; i++){
H_Aarr_of_cooProb[i] = thrust::host_vector<float> (0);
}
for (int i =0; i < num_actions; i++){
H_Aarr_of_Rs[i] = thrust::host_vector<float> (0);
}
// assign value to global variable
ncells = gsize*gsize;
// copy data from host to device
D_params = H_params;
D_xs = H_xs;
D_ys = H_ys;
// run time loop and compute transition data for each time step
auto start = high_resolution_clock::now();
auto end = high_resolution_clock::now();
auto duration_t = duration_cast<microseconds>(end - start);
//IMP: Run time loop till nt-1. There ar no S2s to S1s in the last timestep
for(int t = 0; t < nt-1; t++){
std::cout << "*** Computing data for timestep, T = " << t << std::endl;
D_tdummy[0] = t;
start = high_resolution_clock::now();
for(int action_id = 0; action_id < num_actions; action_id++){
// std::cout << " * action_id= " << action_id;
// this function also concats coos across time.
build_sparse_transition_model_at_T_at_a(t, action_id, bDimx, D_tdummy, D_all_u_arr, D_all_v_arr,
D_all_ui_arr, D_all_vi_arr, D_all_yi_arr,
D_all_s_arr, D_all_mask_arr,
D_params, H_params, D_xs, D_ys, H_actions,
H_coo_len_per_ac,
H_Aarr_of_cooS1, H_Aarr_of_cooS2, H_Aarr_of_cooProb,
H_Aarr_of_Rs);
// output_data )
}
end = high_resolution_clock::now();
std::cout << std::endl ;
duration_t = duration_cast<microseconds>(end - start);
std::cout << "duration@t = "<< duration_t.count()/1e6 << "sec" << std::endl;
std::cout << 3*H_Aarr_of_cooS1[0].size()*4*1e-6 << " MB" << std::endl;
std::cout << std::endl << std::endl;
}
// fill R vectors of each action for the last time step with high negative values.
// this has to be done seaprately because the above loop runs till nt-1.
/*
TODO: 1. Verify rewards as last time step
*/
thrust::host_vector<float> H_rewards_at_end_t(ncells, 0);
for (int i =0; i < num_actions; i++){
H_Aarr_of_Rs[i].insert(H_Aarr_of_Rs[i].end(), H_rewards_at_end_t.begin(), H_rewards_at_end_t.end());
}
//Check
// for (int i =0; i < num_actions; i++)
// std::cout << H_Aarr_of_Rs[i].size() << " ";
// find nnz per action
thrust::host_vector<long long int> H_master_PrSum_nnz_per_ac(num_actions);
long long int DP_relv_params[2] = {ncells*nt, num_actions*1LL};
long long int master_nnz = 0; //running sum of nnz going across actions
// calculate inclusive prefix sum of nnz's across actions
// will be used to access indeces while concatenating results across across actions
for(int i = 0; i < num_actions; i++){
master_nnz += H_Aarr_of_cooS1[i].size();
H_master_PrSum_nnz_per_ac[i] = master_nnz;
}
unsigned long int num_DP_params = sizeof(DP_relv_params) / sizeof(DP_relv_params[0]);
// print_array<long long int>(DP_relv_params, 2, "DP_relv_params", " ");
// std::cout << "chek num = " << sizeof(DP_relv_params) << std::endl;
// std::cout << "chek denom = " << sizeof(DP_relv_params[0]) << std::endl;
//checks
// std::cout << "total/master_nnz = " << master_nnz << std::endl;
// std::cout << "H_Aarr_of_cooS1[i].size()" << std::endl;
// for(int i = 0; i < num_actions; i++)
// std::cout << H_Aarr_of_cooS1[i].size() << std::endl;
// print_array<long long int>(&H_Aarr_of_cooS2[0][0], 10, "H_Aarr_of_cooS2[0]", " ");
// save final coo data
thrust::host_vector<long long int> H_master_cooS1(master_nnz);
thrust::host_vector<long long int> H_master_cooS2(master_nnz);
thrust::host_vector<float> H_master_cooVal(master_nnz);
thrust::host_vector<float> H_master_R(ncells*nt*num_actions, -99999); //TODO: veriffy -99999
std::string op_FnamePfx_2 = "data_solverOutput/" + prob_type + "/"
+ prob_name + "/" + prob_specs + "/";
std::string op_Fname_withAlpha_2 = "data_solverOutput/" + prob_type + "/"
+ prob_name + "/" + prob_specs + std::to_string(alpha) +"/";
if(reward_type>3){
op_FnamePfx_2 = op_Fname_withAlpha_2;
}
save_master_Coos_to_file(op_FnamePfx,op_FnamePfx_2, num_actions,
H_master_cooS1,
H_master_cooS2,
H_master_cooVal,
H_master_R,
H_Aarr_of_cooS1,
H_Aarr_of_cooS2,
H_Aarr_of_cooProb,
H_Aarr_of_Rs,
H_params,
DP_relv_params,
num_DP_params);
return 0;
}
//------------ main ends here ------------------------------------------
void save_master_Coos_to_file(std::string op_FnamePfx, std::string op_FnamePfx_2, int num_actions,
thrust::host_vector<long long int> &H_master_cooS1,
thrust::host_vector<long long int> &H_master_cooS2,
thrust::host_vector<float> &H_master_cooVal,
thrust::host_vector<float> &H_master_R,
thrust::host_vector<long long int>* H_Aarr_of_cooS1,
thrust::host_vector<long long int>* H_Aarr_of_cooS2,
thrust::host_vector<float>* H_Aarr_of_cooProb,
thrust::host_vector<float>* H_Aarr_of_Rs,
thrust::host_vector<float> &prob_params,
long long int* DP_relv_params,
unsigned long int num_DP_params){
// Convertes floats to int32 for COO row and col idxs
// copies from each action vector to a master vector
// master_coo vectors is concatation first across time, then across action
// ALSO, MODIFIES S1(t,i,j) to S1(t,i,j,a)
unsigned long long int master_nnz = H_master_cooS1.size();
unsigned long long int prob_params_size = prob_params.size();
long long int m_idx = 0;
int n_states = DP_relv_params[0];
for(int i = 0; i < num_actions; i++){
for(int j = 0; j< H_Aarr_of_cooS1[i].size(); j++){
// TODO: modify to include actions
H_master_cooS1[m_idx] = H_Aarr_of_cooS1[i][j] + i*n_states;
m_idx++;
}
}
m_idx = 0;
for(int i = 0; i < num_actions; i++){
for(int j = 0; j< H_Aarr_of_cooS2[i].size(); j++){
H_master_cooS2[m_idx] = H_Aarr_of_cooS2[i][j];
m_idx++;
}
}
m_idx = 0;
for(int i = 0; i < num_actions; i++){
for(int j = 0; j< H_Aarr_of_cooProb[i].size(); j++){
H_master_cooVal[m_idx] = H_Aarr_of_cooProb[i][j];
m_idx++;
}
}
m_idx = 0;
for(int i = 0; i < num_actions; i++){
for(int j = 0; j< H_Aarr_of_Rs[i].size(); j++){
H_master_R[m_idx] = H_Aarr_of_Rs[i][j];
m_idx++;
}
}
std::cout << "check num_DP_params = " << num_DP_params << std::endl;
std::cout << "op_FnamePfx= " << op_FnamePfx << "\n";
cnpy::npy_save(op_FnamePfx + "master_cooS1.npy", &H_master_cooS1[0], {master_nnz,1},"w");
cnpy::npy_save(op_FnamePfx + "master_cooS2.npy", &H_master_cooS2[0], {master_nnz,1},"w");
cnpy::npy_save(op_FnamePfx + "master_cooVal.npy", &H_master_cooVal[0], {master_nnz,1},"w");
cnpy::npy_save(op_FnamePfx + "master_R.npy", &H_master_R[0], {H_master_R.size(),1},"w");
cnpy::npy_save(op_FnamePfx + "DP_relv_params.npy", &DP_relv_params[0], {num_DP_params,1},"w");
cnpy::npy_save(op_FnamePfx + "prob_params.npy", &prob_params[0], {prob_params_size,1},"w");
// cnpy::npy_save(op_FnamePfx_2 + "prob_params.npy", &prob_params[0], {prob_params_size,1},"w");
std::cout << "saved files \n" ;
}
cnpy::NpyArray read_velocity_field_data( std::string file_path_name, int* n_elements){
// reads numpy file from input and
// returns cnpy::NpyArray stucture and also fills in num_elements in the passed reference n_elements
// extraction in main: float* vel_data = arr.data<float>();
// TODO: make it general. currently hard-coded for float arrays.
//print filename
std::cout << "file path and name: " << file_path_name << std::endl;
cnpy::NpyArray arr = cnpy::npy_load(file_path_name);
//prints for checks
int dim = arr.shape.size();
int num_elements = 1;
std::cout << "shape: " ;
for (int i = 0; i < dim; i++){
std::cout << arr.shape[i] << " , " ;
num_elements = num_elements*arr.shape[i];
}
*n_elements = num_elements;
std::cout << std::endl << "num_elements: " << num_elements << std::endl;
float* vel_data = arr.data<float>();
// print check first 10 elements
std::cout << "First 10 elements of loaded array are: " << std::endl;
for (int i = 0; i < 10; i++)
std::cout << vel_data[i] << " " ;
std::cout << std::endl << std::endl;
return arr;
}
/*
--- print_device_vector() ---
IMP: datatype has to be explicityle changed in that file
*/
// template<typename dType>
void print_device_vector( thrust::device_vector<long long int> &array, int start_id, int end_id, std::string array_name, std::string end, int method){
std::cout << array_name << " from id " << start_id << " to " << end_id << std::endl;
if (method == 1){
float temp = -10000000;
for(int i = start_id; i < end_id; i++){
if (array[i] != temp){
std::cout << i << "\n";
std::cout << array[i] << " " << end;
std::cout << "\n";
temp = array[i];
}
}
}
else if (method == 0){
for(int i = start_id; i < end_id; i++)
std::cout << array[i] << " " << end;
}
else
std::cout << "Invalid input for argument: method";
std::cout << std::endl;
}
void make_dir(std::string dir_name){
int mkdir_status;
std::string comm_mkdir = "mkdir ";
std::string str = comm_mkdir + dir_name;
const char * full_command = str.c_str();
mkdir_status = system(full_command);
std::cout << "mkdir_status = " << mkdir_status << std::endl;
}
void define_xs_or_ys(float* xs, float dx, float x0, int gsize){
for(int i = 0; i < gsize; i++)
xs[i] = x0 + i*dx;
}
void populate_ac_angles(float* ac_angles, int num_ac_angles){
//fills array with equally spaced angles in radians
for (int i = 0; i < num_ac_angles; i++)
ac_angles[i] = i*(2*M_PI)/num_ac_angles;
return;
}
void populate_ac_speeds(float* ac_speeds, int num_ac_speeds, float Fmax){
//fills array with ac_speeds
// std::cout << "infunc CHeck- num_ac_speeds = " << num_ac_speeds << "\n";
float delF = 0;
if (num_ac_speeds == 1)
ac_speeds[0] = Fmax;
else if (num_ac_speeds > 1){
// -----include 0 speed
// delF = Fmax/(num_ac_speeds-1);
// for(int i = 0; i<num_ac_speeds; i++)
// ac_speeds[i] = i*delF;
// ------exclude 0 speed
delF = Fmax/(num_ac_speeds);
for(int i = 0; i<num_ac_speeds; i++){
ac_speeds[i] = (i+1)*delF;
// std::cout << ac_speeds[i] << "\n";
}
}
else
std::cout << "Invalid num_ac_speeds\n";
return;
}
void populate_actions(float **H_actions, int num_ac_speeds, int num_ac_angles, float Fmax){
// populates 2d vector with possible actions
float* ac_angles = new float[num_ac_angles];
populate_ac_angles(ac_angles, num_ac_angles);
float* ac_speeds = new float[num_ac_speeds];
populate_ac_speeds(ac_speeds, num_ac_speeds, Fmax);
int idx;
for (int i=0; i<num_ac_speeds; i++){
for(int j=0; j<num_ac_angles; j++){
idx = j + num_ac_angles*i;
// std::cout << ac_speeds[i] << "\n";
H_actions[idx][0] = ac_speeds[i];
H_actions[idx][1] = ac_angles[j];
}
}
return;
} |
3f1d0d2c23a0ab544217a66953e059ead9bc0d26.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__
void shuffleUp(float* out, float* in, int count) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx > count) return;
float local_value = in[idx];
local_value = __shfl_up_sync(0xffffffff, local_value, 1);
out[idx] = local_value;
}
__global__
void shuffleDown(float* out, float* in, int count) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx > count) return;
float local_value = in[idx];
local_value = __shfl_down_sync(0xffffffff, local_value, 1);
out[idx] = local_value;
}
__global__
void shuffleButterfly(float* out, float* in, int count) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx > count) return;
float local_value = in[idx];
local_value = __shfl_xor_sync(0xffffffff, local_value, 1);
out[idx] = local_value;
}
void printArray(const char* label, float* array, int count) {
printf("%-20s ", label);
for (int x = 0; x < count; x++) { printf("%3d", int(array[x])); }
printf("\n");
}
int main(void) {
printf("\n");
int count = 16;
dim3 block(8);
dim3 grid((count + block.x - 1) / block.x);
float* h_array = (float*)malloc(count*sizeof(float));
for (int x = 0; x < count; x++) { h_array[x] = x; }
printArray("array: ", h_array, count);
float* h_result_array = (float*)malloc(count*sizeof(float));
float *d_array, *d_result_array;
hipMalloc((float**)&d_array, count*sizeof(float));
hipMemcpy(d_array, h_array, count*sizeof(float), hipMemcpyHostToDevice);
hipMalloc((float**)&d_result_array, count*sizeof(float));
hipLaunchKernelGGL(( shuffleUp), dim3(grid), dim3(block), 0, 0, d_result_array, d_array, count);
hipDeviceSynchronize();
hipMemcpy(h_result_array, d_result_array, count*sizeof(float), hipMemcpyDeviceToHost);
printArray("shuffleUp: ", h_result_array, count);
hipLaunchKernelGGL(( shuffleDown), dim3(grid), dim3(block), 0, 0, d_result_array, d_array, count);
hipDeviceSynchronize();
hipMemcpy(h_result_array, d_result_array, count*sizeof(float), hipMemcpyDeviceToHost);
printArray("shuffleDown: ", h_result_array, count);
hipLaunchKernelGGL(( shuffleButterfly), dim3(grid), dim3(block), 0, 0, d_result_array, d_array, count);
hipDeviceSynchronize();
hipMemcpy(h_result_array, d_result_array, count*sizeof(float), hipMemcpyDeviceToHost);
printArray("shuffleButterfly: ", h_result_array, count);
free(h_array);
free(h_result_array);
hipFree(d_array);
hipFree(d_result_array);
hipDeviceReset();
printf("\n");
return 0;
} | 3f1d0d2c23a0ab544217a66953e059ead9bc0d26.cu | #include <stdio.h>
__global__
void shuffleUp(float* out, float* in, int count) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx > count) return;
float local_value = in[idx];
local_value = __shfl_up_sync(0xffffffff, local_value, 1);
out[idx] = local_value;
}
__global__
void shuffleDown(float* out, float* in, int count) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx > count) return;
float local_value = in[idx];
local_value = __shfl_down_sync(0xffffffff, local_value, 1);
out[idx] = local_value;
}
__global__
void shuffleButterfly(float* out, float* in, int count) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx > count) return;
float local_value = in[idx];
local_value = __shfl_xor_sync(0xffffffff, local_value, 1);
out[idx] = local_value;
}
void printArray(const char* label, float* array, int count) {
printf("%-20s ", label);
for (int x = 0; x < count; x++) { printf("%3d", int(array[x])); }
printf("\n");
}
int main(void) {
printf("\n");
int count = 16;
dim3 block(8);
dim3 grid((count + block.x - 1) / block.x);
float* h_array = (float*)malloc(count*sizeof(float));
for (int x = 0; x < count; x++) { h_array[x] = x; }
printArray("array: ", h_array, count);
float* h_result_array = (float*)malloc(count*sizeof(float));
float *d_array, *d_result_array;
cudaMalloc((float**)&d_array, count*sizeof(float));
cudaMemcpy(d_array, h_array, count*sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((float**)&d_result_array, count*sizeof(float));
shuffleUp<<<grid, block>>>(d_result_array, d_array, count);
cudaDeviceSynchronize();
cudaMemcpy(h_result_array, d_result_array, count*sizeof(float), cudaMemcpyDeviceToHost);
printArray("shuffleUp: ", h_result_array, count);
shuffleDown<<<grid, block>>>(d_result_array, d_array, count);
cudaDeviceSynchronize();
cudaMemcpy(h_result_array, d_result_array, count*sizeof(float), cudaMemcpyDeviceToHost);
printArray("shuffleDown: ", h_result_array, count);
shuffleButterfly<<<grid, block>>>(d_result_array, d_array, count);
cudaDeviceSynchronize();
cudaMemcpy(h_result_array, d_result_array, count*sizeof(float), cudaMemcpyDeviceToHost);
printArray("shuffleButterfly: ", h_result_array, count);
free(h_array);
free(h_result_array);
cudaFree(d_array);
cudaFree(d_result_array);
cudaDeviceReset();
printf("\n");
return 0;
} |
268fad490669a426e3d343160b9b09996ae0e73f.hip | // !!! This is a file automatically generated by hipify!!!
/**TODO: Add copyright*/
#if COMPILE_WITH_CUDA
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
#define EIGEN_USE_GPU
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <unsupported/Eigen/CXX11/Tensor>
#include <EvoNet/core/Preprocessing.h>
#include <EvoNet/ml/IntegrationFunctionTensor.h>
using namespace EvoNet;
using namespace std;
void test_operationfunctionSumTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_input(batch_size, memory_size, sink_layer_size);
sink_input.setConstant(0);
hipStream_t stream; assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
SumTensorOp<double, Eigen::GpuDevice> operation;
operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{0}, {2}}, {{0}, {4}}, {{0}, {6}}, {{0}, {8}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionProdTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_input(batch_size, memory_size, sink_layer_size);
//sink_input.setZero(); // Pre initNode update
sink_input.setConstant(1);
hipStream_t stream; assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
ProdTensorOp<double, Eigen::GpuDevice> operation;
operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
//expected.setValues({ {{0}, {1}}, {{0}, {4}}, {{0}, {9}}, {{0}, {16}} }); // Pre initNode update
expected.setValues({ {{1}, {1}}, {{1}, {4}}, {{1}, {9}}, {{1}, {16}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionProdTensorOp2()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 2;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setZero();
weights(0, 0) = 2; weights(1, 1) = 2;
Eigen::Tensor<double, 3> sink_input(batch_size, memory_size, sink_layer_size);
//sink_input.setZero(); // Pre initNode update
sink_input.setConstant(1);
hipStream_t stream; assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
ProdTensorOp<double, Eigen::GpuDevice> operation;
operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{1, 1}, {2, 2}}, {{1, 1}, {4, 4}}, {{1, 1}, {6, 6}}, {{1, 1}, {8,8}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionProdSCTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 2;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setZero();
weights(0, 0) = 2; weights(1, 1) = 2;
Eigen::Tensor<double, 3> sink_input(batch_size, memory_size, sink_layer_size);
//sink_input.setZero(); // Pre initNode update
sink_input.setConstant(1);
hipStream_t stream; assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
ProdSCTensorOp<double, Eigen::GpuDevice> operation;
operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{1, 1}, {2, 2}}, {{1, 1}, {4, 4}}, {{1, 1}, {6, 6}}, {{1, 1}, {8,8}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionMaxTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 2}, {0, 0}},
{{2, 3}, {0, 0}},
{{3, 4}, {0, 0}},
{{4, 5}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_input(batch_size, memory_size, sink_layer_size);
sink_input.setConstant(0);
hipStream_t stream; assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
MaxTensorOp<double, Eigen::GpuDevice> operation;
operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{0}, {2}}, {{0}, {3}}, {{0}, {4}}, {{0}, {5}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionMinTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 2}, {0, 0}},
{{2, 3}, {0, 0}},
{{3, 4}, {0, 0}},
{{4, 5}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_input(batch_size, memory_size, sink_layer_size);
sink_input.setConstant(2);
hipStream_t stream; assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
MinTensorOp<double, Eigen::GpuDevice> operation;
operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{2}, {1}}, {{2}, {2}}, {{2}, {2}}, {{2}, {2}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionMeanTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 2}, {0, 0}},
{{2, 3}, {0, 0}},
{{3, 4}, {0, 0}},
{{4, 5}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_input(batch_size, memory_size, sink_layer_size);
sink_input.setConstant(0);
hipStream_t stream; assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
MeanTensorOp<double, Eigen::GpuDevice> operation;
operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{0}, {1.5}}, {{0}, {2.5}}, {{0}, {3.5}}, {{0}, {4.5}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionVarModTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 2}, {0, 0}},
{{2, 3}, {0, 0}},
{{3, 4}, {0, 0}},
{{4, 5}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_input(batch_size, memory_size, sink_layer_size);
sink_input.setConstant(0);
hipStream_t stream; assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
VarModTensorOp<double, Eigen::GpuDevice> operation;
operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{0}, {2.5}}, {{0}, {6.5}}, {{0}, {12.5}}, {{0}, {20.5}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionVarTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 2}, {0, 0}},
{{2, 3}, {0, 0}},
{{3, 4}, {0, 0}},
{{4, 5}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_input(batch_size, memory_size, sink_layer_size);
sink_input.setConstant(0);
hipStream_t stream; assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
VarTensorOp<double, Eigen::GpuDevice> operation;
operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{0}, {0.25}}, {{0}, {0.25}}, {{0}, {0.25}}, {{0}, {0.25}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionCountTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_input(batch_size, memory_size, sink_layer_size);
sink_input.setConstant(0);
hipStream_t stream; assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
CountTensorOp<double, Eigen::GpuDevice> operation;
operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{0}, {2}}, {{0}, {2}}, {{0}, {2}}, {{0}, {2}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionSumErrorTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_error(batch_size, memory_size, source_layer_size);
source_error.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_derivative(batch_size, memory_size, sink_layer_size);
sink_derivative.setConstant(2);
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setConstant(0);
Eigen::Tensor<double, 3> sink_output(batch_size, memory_size, sink_layer_size);
sink_output.setConstant(1);
hipStream_t stream; assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
SumErrorTensorOp<double, Eigen::GpuDevice> operation;
operation(source_error.data(), source_input.data(), weights.data(), sink_output.data(), sink_error.data(), sink_derivative.data(), sink_layer_size,
batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{0}, {4}}, {{0}, {8}}, {{0}, {12}}, {{0}, {16}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_error(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_error(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionProdErrorTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_error(batch_size, memory_size, source_layer_size);
source_error.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_derivative(batch_size, memory_size, sink_layer_size);
sink_derivative.setConstant(2);
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setConstant(0);
Eigen::Tensor<double, 3> sink_output(batch_size, memory_size, sink_layer_size);
sink_output.setConstant(1);
hipStream_t stream; assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
ProdErrorTensorOp<double, Eigen::GpuDevice> operation;
operation(source_error.data(), source_input.data(), weights.data(), sink_output.data(), sink_error.data(), sink_derivative.data(), sink_layer_size,
batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{0}, {4}}, {{0}, {16}}, {{0}, {36}}, {{0}, {64}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_error(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_error(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionMaxErrorTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_error(batch_size, memory_size, source_layer_size);
source_error.setValues({ {{1, 2}, {0, 0}},
{{2, 3}, {0, 0}},
{{3, 4}, {0, 0}},
{{4, 5}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{1, 2}, {0, 0}},
{{2, 3}, {0, 0}},
{{3, 4}, {0, 0}},
{{4, 5}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_derivative(batch_size, memory_size, sink_layer_size);
sink_derivative.setConstant(2);
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setConstant(0);
Eigen::Tensor<double, 3> sink_output(batch_size, memory_size, sink_layer_size);
sink_output.setValues({ {{0}, {2}},
{{0}, {3}},
{{0}, {4}},
{{0}, {5}} });
hipStream_t stream; assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
MaxErrorTensorOp<double, Eigen::GpuDevice> operation;
operation(source_error.data(), source_input.data(), weights.data(), sink_output.data(), sink_error.data(), sink_derivative.data(), sink_layer_size,
batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{0}, {4}}, {{0}, {6}}, {{0}, {8}}, {{0}, {10}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_error(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_error(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionMinErrorTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_error(batch_size, memory_size, source_layer_size);
source_error.setValues({ {{1, 2}, {0, 0}},
{{2, 3}, {0, 0}},
{{3, 4}, {0, 0}},
{{4, 5}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{1, 2}, {0, 0}},
{{2, 3}, {0, 0}},
{{3, 4}, {0, 0}},
{{4, 5}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_derivative(batch_size, memory_size, sink_layer_size);
sink_derivative.setConstant(2);
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setConstant(0);
Eigen::Tensor<double, 3> sink_output(batch_size, memory_size, sink_layer_size);
sink_output.setValues({ {{0}, {1}},
{{0}, {2}},
{{0}, {3}},
{{0}, {4}} });
hipStream_t stream; assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
MinErrorTensorOp<double, Eigen::GpuDevice> operation;
operation(source_error.data(), source_input.data(), weights.data(), sink_output.data(), sink_error.data(), sink_derivative.data(), sink_layer_size,
batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{0}, {2}}, {{0}, {4}}, {{0}, {6}}, {{0}, {8}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_error(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_error(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionMeanErrorTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_error(batch_size, memory_size, source_layer_size);
source_error.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_derivative(batch_size, memory_size, sink_layer_size);
sink_derivative.setConstant(2);
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setConstant(0);
Eigen::Tensor<double, 3> sink_output(batch_size, memory_size, sink_layer_size);
sink_output.setConstant(1);
hipStream_t stream; assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
MeanErrorTensorOp<double, Eigen::GpuDevice> operation;
operation(source_error.data(), source_input.data(), weights.data(), sink_output.data(), sink_error.data(), sink_derivative.data(), 4, //NOTE: used only for testing purposes!
batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{0}, {1}}, {{0}, {2}}, {{0}, {3}}, {{0}, {4}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_error(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_error(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionVarModErrorTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_error(batch_size, memory_size, source_layer_size);
source_error.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_derivative(batch_size, memory_size, sink_layer_size);
sink_derivative.setConstant(2);
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setConstant(0);
Eigen::Tensor<double, 3> sink_output(batch_size, memory_size, sink_layer_size);
sink_output.setConstant(1);
hipStream_t stream; assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
VarModErrorTensorOp<double, Eigen::GpuDevice> operation;
operation(source_error.data(), source_input.data(), weights.data(), sink_output.data(), sink_error.data(), sink_derivative.data(), 4, //NOTE: used only for testing purposes!
batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{0}, {2}}, {{0}, {4}}, {{0}, {6}}, {{0}, {8}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_error(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_error(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionVarErrorTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_error(batch_size, memory_size, source_layer_size);
source_error.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_derivative(batch_size, memory_size, sink_layer_size);
sink_derivative.setConstant(2);
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setConstant(0);
Eigen::Tensor<double, 3> sink_output(batch_size, memory_size, sink_layer_size);
sink_output.setConstant(1);
hipStream_t stream; assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
VarErrorTensorOp<double, Eigen::GpuDevice> operation;
operation(source_error.data(), source_input.data(), weights.data(), sink_output.data(), sink_error.data(), sink_derivative.data(), sink_layer_size,
batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{0}, {4}}, {{0}, {8}}, {{0}, {12}}, {{0}, {16}} });
// TODO: update
//for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
// for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
// for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
// //std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_error(batch_iter, memory_iter, layer_iter) << std::endl;
// assert(assert_close(sink_error(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
// }
// }
//}
}
void test_operationfunctionCountErrorTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_error(batch_size, memory_size, source_layer_size);
source_error.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_derivative(batch_size, memory_size, sink_layer_size);
sink_derivative.setConstant(2);
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setConstant(0);
Eigen::Tensor<double, 3> sink_output(batch_size, memory_size, sink_layer_size);
sink_output.setConstant(1);
hipStream_t stream; assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
CountErrorTensorOp<double, Eigen::GpuDevice> operation;
operation(source_error.data(), source_input.data(), weights.data(), sink_output.data(), sink_error.data(), sink_derivative.data(), sink_layer_size,
batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{0}, {0}}, {{0}, {0}}, {{0}, {0}}, {{0}, {0}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_error(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_error(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionSumWeightGradTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setValues({ {{1}, {1}},
{{2}, {1}},
{{3}, {1}},
//{{3}, {0}},
{{4}, {0}} });
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {1, 1}},
{{2, 2}, {2, 2}},
{{1, 0}, {1, 0}},
//{{1, 1}, {0, 0}},
{{2, 2}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{2, 2}, {0, 0}},
{{4, 4}, {0, 0}},
{{2, 2}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 2> weight_error(source_layer_size, sink_layer_size);
weight_error.setConstant(0);
hipStream_t stream; assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
SumWeightGradTensorOp<double, Eigen::GpuDevice> operation;
operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size,
batch_size, memory_size, source_layer_size, sink_layer_size, device);
Eigen::Tensor<double, 2> expected(source_layer_size, sink_layer_size);
expected.setValues({ {-5}, {-4} });
//expected.setValues({ {-4.75}, {-4.75} });
for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) {
for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) {
//std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl;
assert(assert_close(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter)));
}
}
}
void test_operationfunctionProdWeightGradTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setValues({ {{1}, {1}},
{{2}, {1}},
{{3}, {0}},
{{4}, {0}} });
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {1, 1}},
{{2, 2}, {2, 2}},
{{1, 1}, {0, 0}},
{{2, 2}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{2, 2}, {0, 0}},
{{4, 4}, {0, 0}},
{{2, 2}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 2> weight_error(source_layer_size, sink_layer_size);
weight_error.setConstant(0);
hipStream_t stream; assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
ProdWeightGradTensorOp<double, Eigen::GpuDevice> operation;
operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size,
batch_size, memory_size, source_layer_size, sink_layer_size, device);
Eigen::Tensor<double, 2> expected(source_layer_size, sink_layer_size);
expected.setValues({ {-8}, {-8} });
for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) {
for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) {
//std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl;
assert(assert_close(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter)));
}
}
}
void test_operationfunctionMaxWeightGradTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setValues({ {{1}, {1}},
{{2}, {1}},
{{3}, {0}},
{{4}, {0}} });
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {1, 1}},
{{2, 2}, {2, 2}},
{{1, 1}, {0, 0}},
{{2, 2}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{2, 2}, {0, 0}},
{{4, 4}, {0, 0}},
{{2, 2}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 2> weight_error(source_layer_size, sink_layer_size);
weight_error.setConstant(0);
hipStream_t stream; assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
MaxWeightGradTensorOp<double, Eigen::GpuDevice> operation;
operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size,
batch_size, memory_size, source_layer_size, sink_layer_size, device);
Eigen::Tensor<double, 2> expected(source_layer_size, sink_layer_size);
expected.setValues({ {-4.75}, {-4.75} });
for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) {
for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) {
//std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl;
assert(assert_close(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter)));
}
}
}
void test_operationfunctionMinWeightGradTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setValues({ {{1}, {1}},
{{2}, {1}},
{{3}, {0}},
{{4}, {0}} });
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {1, 1}},
{{2, 2}, {2, 2}},
{{1, 1}, {0, 0}},
{{2, 2}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{2, 2}, {0, 0}},
{{4, 4}, {0, 0}},
{{2, 2}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 2> weight_error(source_layer_size, sink_layer_size);
weight_error.setConstant(0);
hipStream_t stream; assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
MinWeightGradTensorOp<double, Eigen::GpuDevice> operation;
operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size,
batch_size, memory_size, source_layer_size, sink_layer_size, device);
Eigen::Tensor<double, 2> expected(source_layer_size, sink_layer_size);
expected.setValues({ {-4.75}, {-4.75} });
for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) {
for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) {
//std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl;
assert(assert_close(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter)));
}
}
}
void test_operationfunctionMeanWeightGradTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setValues({ {{1}, {1}},
{{2}, {1}},
{{3}, {0}},
{{4}, {0}} });
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {1, 1}},
{{2, 2}, {2, 2}},
{{1, 1}, {0, 0}},
{{2, 2}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{2, 2}, {0, 0}},
{{4, 4}, {0, 0}},
{{2, 2}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 2> weight_error(source_layer_size, sink_layer_size);
weight_error.setConstant(0);
hipStream_t stream; assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
MeanWeightGradTensorOp<double, Eigen::GpuDevice> operation;
operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size,
batch_size, memory_size, source_layer_size, sink_layer_size, device);
Eigen::Tensor<double, 2> expected(source_layer_size, sink_layer_size);
expected.setValues({ {-2.375}, {-2.375} });
for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) {
for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) {
//std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl;
assert(assert_close(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter)));
}
}
}
void test_operationfunctionVarModWeightGradTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setValues({ {{1}, {1}},
{{2}, {1}},
{{3}, {0}},
{{4}, {0}} });
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {1, 1}},
{{2, 2}, {2, 2}},
{{1, 1}, {0, 0}},
{{2, 2}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{2, 2}, {0, 0}},
{{4, 4}, {0, 0}},
{{2, 2}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 2> weight_error(source_layer_size, sink_layer_size);
weight_error.setConstant(0);
hipStream_t stream; assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
VarModWeightGradTensorOp<double, Eigen::GpuDevice> operation;
operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size,
batch_size, memory_size, source_layer_size, sink_layer_size, device);
Eigen::Tensor<double, 2> expected(source_layer_size, sink_layer_size);
expected.setValues({ {-4.75}, {-4.75} });
for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) {
for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) {
//std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl;
assert(assert_close(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter)));
}
}
}
void test_operationfunctionVarWeightGradTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setValues({ {{1}, {1}},
{{2}, {1}},
{{3}, {0}},
{{4}, {0}} });
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {1, 1}},
{{2, 2}, {2, 2}},
{{1, 1}, {0, 0}},
{{2, 2}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{2, 2}, {0, 0}},
{{4, 4}, {0, 0}},
{{2, 2}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 2> weight_error(source_layer_size, sink_layer_size);
weight_error.setConstant(0);
hipStream_t stream; assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
VarWeightGradTensorOp<double, Eigen::GpuDevice> operation;
operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size,
batch_size, memory_size, source_layer_size, sink_layer_size, device);
Eigen::Tensor<double, 2> expected(source_layer_size, sink_layer_size);
expected.setValues({ {-4.75}, {-4.75} });
// TODO: update
//for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) {
// for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) {
// //std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl;
// assert(assert_close(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter)));
// }
//}
}
void test_operationfunctionCountWeightGradTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setValues({ {{1}, {1}},
{{2}, {1}},
{{3}, {0}},
{{4}, {0}} });
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {1, 1}},
{{2, 2}, {2, 2}},
{{1, 1}, {0, 0}},
{{2, 2}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{2, 2}, {0, 0}},
{{4, 4}, {0, 0}},
{{2, 2}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 2> weight_error(source_layer_size, sink_layer_size);
weight_error.setConstant(0);
hipStream_t stream; assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
CountWeightGradTensorOp<double, Eigen::GpuDevice> operation;
operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size,
batch_size, memory_size, source_layer_size, sink_layer_size, device);
Eigen::Tensor<double, 2> expected(source_layer_size, sink_layer_size);
expected.setValues({ {0}, {0} });
for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) {
for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) {
//std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl;
assert(assert_close(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter)));
}
}
}
int main(int argc, char** argv)
{
test_operationfunctionSumTensorOp();
test_operationfunctionProdTensorOp();
test_operationfunctionProdTensorOp2();
test_operationfunctionProdSCTensorOp();
test_operationfunctionMaxTensorOp();
test_operationfunctionMinTensorOp();
test_operationfunctionMeanTensorOp();
test_operationfunctionVarModTensorOp();
test_operationfunctionVarTensorOp();
test_operationfunctionCountTensorOp();
test_operationfunctionSumErrorTensorOp();
test_operationfunctionProdErrorTensorOp();
test_operationfunctionMaxErrorTensorOp();
test_operationfunctionMinErrorTensorOp();
test_operationfunctionMeanErrorTensorOp();
test_operationfunctionVarModErrorTensorOp();
test_operationfunctionVarErrorTensorOp();
test_operationfunctionCountErrorTensorOp();
test_operationfunctionSumWeightGradTensorOp();
test_operationfunctionProdWeightGradTensorOp();
test_operationfunctionMaxWeightGradTensorOp();
test_operationfunctionMinWeightGradTensorOp();
test_operationfunctionMeanWeightGradTensorOp();
test_operationfunctionVarModWeightGradTensorOp();
test_operationfunctionVarWeightGradTensorOp();
test_operationfunctionCountWeightGradTensorOp();
return 0;
}
#endif | 268fad490669a426e3d343160b9b09996ae0e73f.cu | /**TODO: Add copyright*/
#if COMPILE_WITH_CUDA
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
#define EIGEN_USE_GPU
#include <cuda.h>
#include <cuda_runtime.h>
#include <unsupported/Eigen/CXX11/Tensor>
#include <EvoNet/core/Preprocessing.h>
#include <EvoNet/ml/IntegrationFunctionTensor.h>
using namespace EvoNet;
using namespace std;
void test_operationfunctionSumTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_input(batch_size, memory_size, sink_layer_size);
sink_input.setConstant(0);
cudaStream_t stream; assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
SumTensorOp<double, Eigen::GpuDevice> operation;
operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{0}, {2}}, {{0}, {4}}, {{0}, {6}}, {{0}, {8}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionProdTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_input(batch_size, memory_size, sink_layer_size);
//sink_input.setZero(); // Pre initNode update
sink_input.setConstant(1);
cudaStream_t stream; assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
ProdTensorOp<double, Eigen::GpuDevice> operation;
operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
//expected.setValues({ {{0}, {1}}, {{0}, {4}}, {{0}, {9}}, {{0}, {16}} }); // Pre initNode update
expected.setValues({ {{1}, {1}}, {{1}, {4}}, {{1}, {9}}, {{1}, {16}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionProdTensorOp2()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 2;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setZero();
weights(0, 0) = 2; weights(1, 1) = 2;
Eigen::Tensor<double, 3> sink_input(batch_size, memory_size, sink_layer_size);
//sink_input.setZero(); // Pre initNode update
sink_input.setConstant(1);
cudaStream_t stream; assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
ProdTensorOp<double, Eigen::GpuDevice> operation;
operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{1, 1}, {2, 2}}, {{1, 1}, {4, 4}}, {{1, 1}, {6, 6}}, {{1, 1}, {8,8}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionProdSCTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 2;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setZero();
weights(0, 0) = 2; weights(1, 1) = 2;
Eigen::Tensor<double, 3> sink_input(batch_size, memory_size, sink_layer_size);
//sink_input.setZero(); // Pre initNode update
sink_input.setConstant(1);
cudaStream_t stream; assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
ProdSCTensorOp<double, Eigen::GpuDevice> operation;
operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{1, 1}, {2, 2}}, {{1, 1}, {4, 4}}, {{1, 1}, {6, 6}}, {{1, 1}, {8,8}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionMaxTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 2}, {0, 0}},
{{2, 3}, {0, 0}},
{{3, 4}, {0, 0}},
{{4, 5}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_input(batch_size, memory_size, sink_layer_size);
sink_input.setConstant(0);
cudaStream_t stream; assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
MaxTensorOp<double, Eigen::GpuDevice> operation;
operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{0}, {2}}, {{0}, {3}}, {{0}, {4}}, {{0}, {5}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionMinTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 2}, {0, 0}},
{{2, 3}, {0, 0}},
{{3, 4}, {0, 0}},
{{4, 5}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_input(batch_size, memory_size, sink_layer_size);
sink_input.setConstant(2);
cudaStream_t stream; assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
MinTensorOp<double, Eigen::GpuDevice> operation;
operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{2}, {1}}, {{2}, {2}}, {{2}, {2}}, {{2}, {2}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionMeanTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 2}, {0, 0}},
{{2, 3}, {0, 0}},
{{3, 4}, {0, 0}},
{{4, 5}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_input(batch_size, memory_size, sink_layer_size);
sink_input.setConstant(0);
cudaStream_t stream; assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
MeanTensorOp<double, Eigen::GpuDevice> operation;
operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{0}, {1.5}}, {{0}, {2.5}}, {{0}, {3.5}}, {{0}, {4.5}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionVarModTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 2}, {0, 0}},
{{2, 3}, {0, 0}},
{{3, 4}, {0, 0}},
{{4, 5}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_input(batch_size, memory_size, sink_layer_size);
sink_input.setConstant(0);
cudaStream_t stream; assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
VarModTensorOp<double, Eigen::GpuDevice> operation;
operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{0}, {2.5}}, {{0}, {6.5}}, {{0}, {12.5}}, {{0}, {20.5}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionVarTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 2}, {0, 0}},
{{2, 3}, {0, 0}},
{{3, 4}, {0, 0}},
{{4, 5}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_input(batch_size, memory_size, sink_layer_size);
sink_input.setConstant(0);
cudaStream_t stream; assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
VarTensorOp<double, Eigen::GpuDevice> operation;
operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{0}, {0.25}}, {{0}, {0.25}}, {{0}, {0.25}}, {{0}, {0.25}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionCountTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_input(batch_size, memory_size, sink_layer_size);
sink_input.setConstant(0);
cudaStream_t stream; assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
CountTensorOp<double, Eigen::GpuDevice> operation;
operation(source_output.data(), weights.data(), sink_input.data(), batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{0}, {2}}, {{0}, {2}}, {{0}, {2}}, {{0}, {2}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_input(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_input(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionSumErrorTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_error(batch_size, memory_size, source_layer_size);
source_error.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_derivative(batch_size, memory_size, sink_layer_size);
sink_derivative.setConstant(2);
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setConstant(0);
Eigen::Tensor<double, 3> sink_output(batch_size, memory_size, sink_layer_size);
sink_output.setConstant(1);
cudaStream_t stream; assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
SumErrorTensorOp<double, Eigen::GpuDevice> operation;
operation(source_error.data(), source_input.data(), weights.data(), sink_output.data(), sink_error.data(), sink_derivative.data(), sink_layer_size,
batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{0}, {4}}, {{0}, {8}}, {{0}, {12}}, {{0}, {16}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_error(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_error(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionProdErrorTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_error(batch_size, memory_size, source_layer_size);
source_error.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_derivative(batch_size, memory_size, sink_layer_size);
sink_derivative.setConstant(2);
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setConstant(0);
Eigen::Tensor<double, 3> sink_output(batch_size, memory_size, sink_layer_size);
sink_output.setConstant(1);
cudaStream_t stream; assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
ProdErrorTensorOp<double, Eigen::GpuDevice> operation;
operation(source_error.data(), source_input.data(), weights.data(), sink_output.data(), sink_error.data(), sink_derivative.data(), sink_layer_size,
batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{0}, {4}}, {{0}, {16}}, {{0}, {36}}, {{0}, {64}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_error(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_error(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionMaxErrorTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_error(batch_size, memory_size, source_layer_size);
source_error.setValues({ {{1, 2}, {0, 0}},
{{2, 3}, {0, 0}},
{{3, 4}, {0, 0}},
{{4, 5}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{1, 2}, {0, 0}},
{{2, 3}, {0, 0}},
{{3, 4}, {0, 0}},
{{4, 5}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_derivative(batch_size, memory_size, sink_layer_size);
sink_derivative.setConstant(2);
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setConstant(0);
Eigen::Tensor<double, 3> sink_output(batch_size, memory_size, sink_layer_size);
sink_output.setValues({ {{0}, {2}},
{{0}, {3}},
{{0}, {4}},
{{0}, {5}} });
cudaStream_t stream; assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
MaxErrorTensorOp<double, Eigen::GpuDevice> operation;
operation(source_error.data(), source_input.data(), weights.data(), sink_output.data(), sink_error.data(), sink_derivative.data(), sink_layer_size,
batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{0}, {4}}, {{0}, {6}}, {{0}, {8}}, {{0}, {10}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_error(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_error(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionMinErrorTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_error(batch_size, memory_size, source_layer_size);
source_error.setValues({ {{1, 2}, {0, 0}},
{{2, 3}, {0, 0}},
{{3, 4}, {0, 0}},
{{4, 5}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{1, 2}, {0, 0}},
{{2, 3}, {0, 0}},
{{3, 4}, {0, 0}},
{{4, 5}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_derivative(batch_size, memory_size, sink_layer_size);
sink_derivative.setConstant(2);
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setConstant(0);
Eigen::Tensor<double, 3> sink_output(batch_size, memory_size, sink_layer_size);
sink_output.setValues({ {{0}, {1}},
{{0}, {2}},
{{0}, {3}},
{{0}, {4}} });
cudaStream_t stream; assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
MinErrorTensorOp<double, Eigen::GpuDevice> operation;
operation(source_error.data(), source_input.data(), weights.data(), sink_output.data(), sink_error.data(), sink_derivative.data(), sink_layer_size,
batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{0}, {2}}, {{0}, {4}}, {{0}, {6}}, {{0}, {8}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_error(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_error(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionMeanErrorTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_error(batch_size, memory_size, source_layer_size);
source_error.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_derivative(batch_size, memory_size, sink_layer_size);
sink_derivative.setConstant(2);
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setConstant(0);
Eigen::Tensor<double, 3> sink_output(batch_size, memory_size, sink_layer_size);
sink_output.setConstant(1);
cudaStream_t stream; assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
MeanErrorTensorOp<double, Eigen::GpuDevice> operation;
operation(source_error.data(), source_input.data(), weights.data(), sink_output.data(), sink_error.data(), sink_derivative.data(), 4, //NOTE: used only for testing purposes!
batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{0}, {1}}, {{0}, {2}}, {{0}, {3}}, {{0}, {4}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_error(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_error(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionVarModErrorTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_error(batch_size, memory_size, source_layer_size);
source_error.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_derivative(batch_size, memory_size, sink_layer_size);
sink_derivative.setConstant(2);
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setConstant(0);
Eigen::Tensor<double, 3> sink_output(batch_size, memory_size, sink_layer_size);
sink_output.setConstant(1);
cudaStream_t stream; assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
VarModErrorTensorOp<double, Eigen::GpuDevice> operation;
operation(source_error.data(), source_input.data(), weights.data(), sink_output.data(), sink_error.data(), sink_derivative.data(), 4, //NOTE: used only for testing purposes!
batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{0}, {2}}, {{0}, {4}}, {{0}, {6}}, {{0}, {8}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_error(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_error(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionVarErrorTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_error(batch_size, memory_size, source_layer_size);
source_error.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_derivative(batch_size, memory_size, sink_layer_size);
sink_derivative.setConstant(2);
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setConstant(0);
Eigen::Tensor<double, 3> sink_output(batch_size, memory_size, sink_layer_size);
sink_output.setConstant(1);
cudaStream_t stream; assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
VarErrorTensorOp<double, Eigen::GpuDevice> operation;
operation(source_error.data(), source_input.data(), weights.data(), sink_output.data(), sink_error.data(), sink_derivative.data(), sink_layer_size,
batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{0}, {4}}, {{0}, {8}}, {{0}, {12}}, {{0}, {16}} });
// TODO: update
//for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
// for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
// for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
// //std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_error(batch_iter, memory_iter, layer_iter) << std::endl;
// assert(assert_close(sink_error(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
// }
// }
//}
}
void test_operationfunctionCountErrorTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> source_error(batch_size, memory_size, source_layer_size);
source_error.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 3> sink_derivative(batch_size, memory_size, sink_layer_size);
sink_derivative.setConstant(2);
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setConstant(0);
Eigen::Tensor<double, 3> sink_output(batch_size, memory_size, sink_layer_size);
sink_output.setConstant(1);
cudaStream_t stream; assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
CountErrorTensorOp<double, Eigen::GpuDevice> operation;
operation(source_error.data(), source_input.data(), weights.data(), sink_output.data(), sink_error.data(), sink_derivative.data(), sink_layer_size,
batch_size, memory_size, source_layer_size, sink_layer_size, source_time_step, sink_time_step, device);
Eigen::Tensor<double, 3> expected(batch_size, memory_size, sink_layer_size);
expected.setValues({ {{0}, {0}}, {{0}, {0}}, {{0}, {0}}, {{0}, {0}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int layer_iter = 0; layer_iter < sink_layer_size; ++layer_iter) {
//std::cout << "Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Layer Iter: " << memory_iter << "= " << sink_error(batch_iter, memory_iter, layer_iter) << std::endl;
assert(assert_close(sink_error(batch_iter, memory_iter, layer_iter), expected(batch_iter, memory_iter, layer_iter)));
}
}
}
}
void test_operationfunctionSumWeightGradTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setValues({ {{1}, {1}},
{{2}, {1}},
{{3}, {1}},
//{{3}, {0}},
{{4}, {0}} });
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {1, 1}},
{{2, 2}, {2, 2}},
{{1, 0}, {1, 0}},
//{{1, 1}, {0, 0}},
{{2, 2}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{2, 2}, {0, 0}},
{{4, 4}, {0, 0}},
{{2, 2}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 2> weight_error(source_layer_size, sink_layer_size);
weight_error.setConstant(0);
cudaStream_t stream; assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
SumWeightGradTensorOp<double, Eigen::GpuDevice> operation;
operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size,
batch_size, memory_size, source_layer_size, sink_layer_size, device);
Eigen::Tensor<double, 2> expected(source_layer_size, sink_layer_size);
expected.setValues({ {-5}, {-4} });
//expected.setValues({ {-4.75}, {-4.75} });
for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) {
for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) {
//std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl;
assert(assert_close(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter)));
}
}
}
void test_operationfunctionProdWeightGradTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setValues({ {{1}, {1}},
{{2}, {1}},
{{3}, {0}},
{{4}, {0}} });
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {1, 1}},
{{2, 2}, {2, 2}},
{{1, 1}, {0, 0}},
{{2, 2}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{2, 2}, {0, 0}},
{{4, 4}, {0, 0}},
{{2, 2}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 2> weight_error(source_layer_size, sink_layer_size);
weight_error.setConstant(0);
cudaStream_t stream; assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
ProdWeightGradTensorOp<double, Eigen::GpuDevice> operation;
operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size,
batch_size, memory_size, source_layer_size, sink_layer_size, device);
Eigen::Tensor<double, 2> expected(source_layer_size, sink_layer_size);
expected.setValues({ {-8}, {-8} });
for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) {
for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) {
//std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl;
assert(assert_close(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter)));
}
}
}
void test_operationfunctionMaxWeightGradTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setValues({ {{1}, {1}},
{{2}, {1}},
{{3}, {0}},
{{4}, {0}} });
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {1, 1}},
{{2, 2}, {2, 2}},
{{1, 1}, {0, 0}},
{{2, 2}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{2, 2}, {0, 0}},
{{4, 4}, {0, 0}},
{{2, 2}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 2> weight_error(source_layer_size, sink_layer_size);
weight_error.setConstant(0);
cudaStream_t stream; assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
MaxWeightGradTensorOp<double, Eigen::GpuDevice> operation;
operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size,
batch_size, memory_size, source_layer_size, sink_layer_size, device);
Eigen::Tensor<double, 2> expected(source_layer_size, sink_layer_size);
expected.setValues({ {-4.75}, {-4.75} });
for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) {
for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) {
//std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl;
assert(assert_close(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter)));
}
}
}
void test_operationfunctionMinWeightGradTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setValues({ {{1}, {1}},
{{2}, {1}},
{{3}, {0}},
{{4}, {0}} });
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {1, 1}},
{{2, 2}, {2, 2}},
{{1, 1}, {0, 0}},
{{2, 2}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{2, 2}, {0, 0}},
{{4, 4}, {0, 0}},
{{2, 2}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 2> weight_error(source_layer_size, sink_layer_size);
weight_error.setConstant(0);
cudaStream_t stream; assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
MinWeightGradTensorOp<double, Eigen::GpuDevice> operation;
operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size,
batch_size, memory_size, source_layer_size, sink_layer_size, device);
Eigen::Tensor<double, 2> expected(source_layer_size, sink_layer_size);
expected.setValues({ {-4.75}, {-4.75} });
for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) {
for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) {
//std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl;
assert(assert_close(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter)));
}
}
}
void test_operationfunctionMeanWeightGradTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setValues({ {{1}, {1}},
{{2}, {1}},
{{3}, {0}},
{{4}, {0}} });
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {1, 1}},
{{2, 2}, {2, 2}},
{{1, 1}, {0, 0}},
{{2, 2}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{2, 2}, {0, 0}},
{{4, 4}, {0, 0}},
{{2, 2}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 2> weight_error(source_layer_size, sink_layer_size);
weight_error.setConstant(0);
cudaStream_t stream; assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
MeanWeightGradTensorOp<double, Eigen::GpuDevice> operation;
operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size,
batch_size, memory_size, source_layer_size, sink_layer_size, device);
Eigen::Tensor<double, 2> expected(source_layer_size, sink_layer_size);
expected.setValues({ {-2.375}, {-2.375} });
for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) {
for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) {
//std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl;
assert(assert_close(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter)));
}
}
}
void test_operationfunctionVarModWeightGradTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setValues({ {{1}, {1}},
{{2}, {1}},
{{3}, {0}},
{{4}, {0}} });
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {1, 1}},
{{2, 2}, {2, 2}},
{{1, 1}, {0, 0}},
{{2, 2}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{2, 2}, {0, 0}},
{{4, 4}, {0, 0}},
{{2, 2}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 2> weight_error(source_layer_size, sink_layer_size);
weight_error.setConstant(0);
cudaStream_t stream; assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
VarModWeightGradTensorOp<double, Eigen::GpuDevice> operation;
operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size,
batch_size, memory_size, source_layer_size, sink_layer_size, device);
Eigen::Tensor<double, 2> expected(source_layer_size, sink_layer_size);
expected.setValues({ {-4.75}, {-4.75} });
for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) {
for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) {
//std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl;
assert(assert_close(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter)));
}
}
}
void test_operationfunctionVarWeightGradTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setValues({ {{1}, {1}},
{{2}, {1}},
{{3}, {0}},
{{4}, {0}} });
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {1, 1}},
{{2, 2}, {2, 2}},
{{1, 1}, {0, 0}},
{{2, 2}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{2, 2}, {0, 0}},
{{4, 4}, {0, 0}},
{{2, 2}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 2> weight_error(source_layer_size, sink_layer_size);
weight_error.setConstant(0);
cudaStream_t stream; assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
VarWeightGradTensorOp<double, Eigen::GpuDevice> operation;
operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size,
batch_size, memory_size, source_layer_size, sink_layer_size, device);
Eigen::Tensor<double, 2> expected(source_layer_size, sink_layer_size);
expected.setValues({ {-4.75}, {-4.75} });
// TODO: update
//for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) {
// for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) {
// //std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl;
// assert(assert_close(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter)));
// }
//}
}
void test_operationfunctionCountWeightGradTensorOp()
{
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 1;
Eigen::Tensor<double, 3> sink_error(batch_size, memory_size, sink_layer_size);
sink_error.setValues({ {{1}, {1}},
{{2}, {1}},
{{3}, {0}},
{{4}, {0}} });
Eigen::Tensor<double, 3> source_output(batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {1, 1}},
{{2, 2}, {2, 2}},
{{1, 1}, {0, 0}},
{{2, 2}, {0, 0}} });
Eigen::Tensor<double, 3> source_input(batch_size, memory_size, source_layer_size);
source_input.setValues({ {{2, 2}, {0, 0}},
{{4, 4}, {0, 0}},
{{2, 2}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::Tensor<double, 2> weights(source_layer_size, sink_layer_size);
weights.setConstant(1);
Eigen::Tensor<double, 2> weight_error(source_layer_size, sink_layer_size);
weight_error.setConstant(0);
cudaStream_t stream; assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
CountWeightGradTensorOp<double, Eigen::GpuDevice> operation;
operation(sink_error.data(), source_output.data(), weights.data(), source_input.data(), weight_error.data(), source_layer_size,
batch_size, memory_size, source_layer_size, sink_layer_size, device);
Eigen::Tensor<double, 2> expected(source_layer_size, sink_layer_size);
expected.setValues({ {0}, {0} });
for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) {
for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) {
//std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl;
assert(assert_close(weight_error(source_iter, sink_iter), expected(source_iter, sink_iter)));
}
}
}
int main(int argc, char** argv)
{
test_operationfunctionSumTensorOp();
test_operationfunctionProdTensorOp();
test_operationfunctionProdTensorOp2();
test_operationfunctionProdSCTensorOp();
test_operationfunctionMaxTensorOp();
test_operationfunctionMinTensorOp();
test_operationfunctionMeanTensorOp();
test_operationfunctionVarModTensorOp();
test_operationfunctionVarTensorOp();
test_operationfunctionCountTensorOp();
test_operationfunctionSumErrorTensorOp();
test_operationfunctionProdErrorTensorOp();
test_operationfunctionMaxErrorTensorOp();
test_operationfunctionMinErrorTensorOp();
test_operationfunctionMeanErrorTensorOp();
test_operationfunctionVarModErrorTensorOp();
test_operationfunctionVarErrorTensorOp();
test_operationfunctionCountErrorTensorOp();
test_operationfunctionSumWeightGradTensorOp();
test_operationfunctionProdWeightGradTensorOp();
test_operationfunctionMaxWeightGradTensorOp();
test_operationfunctionMinWeightGradTensorOp();
test_operationfunctionMeanWeightGradTensorOp();
test_operationfunctionVarModWeightGradTensorOp();
test_operationfunctionVarWeightGradTensorOp();
test_operationfunctionCountWeightGradTensorOp();
return 0;
}
#endif |
1b5266b5246e522f85601be546a1910368d57645.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/math_function_impl.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
namespace math {
using float16 = paddle::platform::float16;
template <>
void gemm<platform::CUDADeviceContext, float16>(
const platform::CUDADeviceContext& context, const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M, const int N, const int K,
const float16 alpha, const float16* A, const float16* B, const float16 beta,
float16* C) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(transA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(transB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const half h_alpha = static_cast<const half>(alpha);
const half h_beta = static_cast<const half>(beta);
const half* h_A = reinterpret_cast<const half*>(A);
const half* h_B = reinterpret_cast<const half*>(B);
half* h_C = reinterpret_cast<half*>(C);
// TODO(kexinzhao): add processing code for compute capability < 53 case
PADDLE_ENFORCE_GE(context.GetComputeCapability(), 53,
"cublas Hgemm requires GPU compute capability >= 53");
PADDLE_ENFORCE(platform::dynload::hipblasHgemm(
context.cublas_handle(), cuTransB, cuTransA, N, M, K, &h_alpha, h_B, ldb,
h_A, lda, &h_beta, h_C, N));
}
template <>
void gemm<platform::CUDADeviceContext, float>(
const platform::CUDADeviceContext& context, const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(transA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(transB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::hipblasSgemm(
context.cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A,
lda, &beta, C, N));
}
template <>
void gemm<platform::CUDADeviceContext, double>(
const platform::CUDADeviceContext& context, const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(transA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(transB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::hipblasDgemm(
context.cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A,
lda, &beta, C, N));
}
template <>
void gemm<platform::CUDADeviceContext, float16>(
const platform::CUDADeviceContext& context, const bool transA,
const bool transB, const int M, const int N, const int K,
const float16 alpha, const float16* A, const int lda, const float16* B,
const int ldb, const float16 beta, float16* C, const int ldc) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
hipblasOperation_t cuTransA = transA == false ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB = transB == false ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const half h_alpha = static_cast<const half>(alpha);
const half h_beta = static_cast<const half>(beta);
const half* h_A = reinterpret_cast<const half*>(A);
const half* h_B = reinterpret_cast<const half*>(B);
half* h_C = reinterpret_cast<half*>(C);
// TODO(kexinzhao): add processing code for compute capability < 53 case
PADDLE_ENFORCE_GE(context.GetComputeCapability(), 53,
"cublas Hgemm requires GPU compute capability >= 53");
PADDLE_ENFORCE(platform::dynload::hipblasHgemm(
context.cublas_handle(), cuTransB, cuTransA, N, M, K, &h_alpha, h_B, ldb,
h_A, lda, &h_beta, h_C, ldc));
}
template <>
void gemm<platform::CUDADeviceContext, float>(
const platform::CUDADeviceContext& context, const bool transA,
const bool transB, const int M, const int N, const int K, const float alpha,
const float* A, const int lda, const float* B, const int ldb,
const float beta, float* C, const int ldc) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
hipblasOperation_t cuTransA = transA == false ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB = transB == false ? HIPBLAS_OP_N : HIPBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::hipblasSgemm(
context.cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A,
lda, &beta, C, ldc));
}
template <>
void gemm<platform::CUDADeviceContext, double>(
const platform::CUDADeviceContext& context, const bool transA,
const bool transB, const int M, const int N, const int K,
const double alpha, const double* A, const int lda, const double* B,
const int ldb, const double beta, double* C, const int ldc) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
hipblasOperation_t cuTransA = transA == false ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB = transB == false ? HIPBLAS_OP_N : HIPBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::hipblasDgemm(
context.cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A,
lda, &beta, C, ldc));
}
template <>
void matmul<platform::CUDADeviceContext, float16>(
const platform::CUDADeviceContext& context,
const framework::Tensor& matrix_a, bool trans_a,
const framework::Tensor& matrix_b, bool trans_b, float16 alpha,
framework::Tensor* matrix_out, float16 beta) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
"The input and output of matmul be matrix");
PADDLE_ENFORCE(platform::is_gpu_place(matrix_a.place()) &&
platform::is_gpu_place(matrix_b.place()) &&
platform::is_gpu_place(matrix_out->place()),
"Matrix must all be in CUDAPlace");
int M = dim_out[0];
int N = dim_out[1];
int K = (trans_a == false) ? dim_a[1] : dim_a[0];
CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::CUDADeviceContext, float16>(
context, transA, transB, M, N, K, alpha, matrix_a.data<float16>(),
matrix_b.data<float16>(), beta, matrix_out->data<float16>());
}
template <>
void matmul<platform::CUDADeviceContext, float>(
const platform::CUDADeviceContext& context,
const framework::Tensor& matrix_a, bool trans_a,
const framework::Tensor& matrix_b, bool trans_b, float alpha,
framework::Tensor* matrix_out, float beta) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
"The input and output of matmul be matrix");
PADDLE_ENFORCE(platform::is_gpu_place(matrix_a.place()) &&
platform::is_gpu_place(matrix_b.place()) &&
platform::is_gpu_place(matrix_out->place()),
"Matrix must all be in CUDAPlace");
int M = dim_out[0];
int N = dim_out[1];
int K = (trans_a == false) ? dim_a[1] : dim_a[0];
CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::CUDADeviceContext, float>(
context, transA, transB, M, N, K, alpha, matrix_a.data<float>(),
matrix_b.data<float>(), beta, matrix_out->data<float>());
}
template <>
void matmul<platform::CUDADeviceContext, double>(
const platform::CUDADeviceContext& context,
const framework::Tensor& matrix_a, bool trans_a,
const framework::Tensor& matrix_b, bool trans_b, double alpha,
framework::Tensor* matrix_out, double beta) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
"The input and output of matmul be matrix");
PADDLE_ENFORCE(platform::is_gpu_place(matrix_a.place()) &&
platform::is_gpu_place(matrix_b.place()) &&
platform::is_gpu_place(matrix_out->place()),
"Matrix must all be in CUDAPlace");
int M = dim_out[0];
int N = dim_out[1];
int K = (trans_a == false) ? dim_a[1] : dim_a[0];
CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::CUDADeviceContext, double>(
context, transA, transB, M, N, K, alpha, matrix_a.data<double>(),
matrix_b.data<double>(), beta, matrix_out->data<double>());
}
template <>
void batched_gemm<platform::CUDADeviceContext, float16>(
const platform::CUDADeviceContext& context, const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M, const int N, const int K,
const float16 alpha, const float16* A, const float16* B, const float16 beta,
float16* C, const int batchCount, const int strideA, const int strideB) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
int ldc = N;
hipblasOperation_t cuTransA =
(transA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(transB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const int strideC = M * N;
const half h_alpha = static_cast<const half>(alpha);
const half h_beta = static_cast<const half>(beta);
const half* h_A = reinterpret_cast<const half*>(A);
const half* h_B = reinterpret_cast<const half*>(B);
half* h_C = reinterpret_cast<half*>(C);
// TODO(kexinzhao): add processing code for compute capability < 53 case
PADDLE_ENFORCE_GE(context.GetComputeCapability(), 53,
"cublas Hgemm requires GPU compute capability >= 53");
PADDLE_ENFORCE(platform::dynload::hipblasHgemmStridedBatched(
context.cublas_handle(), cuTransB, cuTransA, N, M, K, &h_alpha, h_B, ldb,
strideB, h_A, lda, strideA, &h_beta, h_C, ldc, strideC, batchCount));
}
template <>
void batched_gemm<platform::CUDADeviceContext, float>(
const platform::CUDADeviceContext& context, const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C, const int batchCount, const int strideA, const int strideB) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
int ldc = N;
hipblasOperation_t cuTransA =
(transA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(transB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const int strideC = M * N;
PADDLE_ENFORCE(platform::dynload::hipblasSgemmStridedBatched(
context.cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb,
strideB, A, lda, strideA, &beta, C, ldc, strideC, batchCount));
}
template <>
void batched_gemm<platform::CUDADeviceContext, double>(
const platform::CUDADeviceContext& context, const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C, const int batchCount, const int strideA, const int strideB) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
int ldc = N;
hipblasOperation_t cuTransA =
(transA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(transB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const int strideC = M * N;
PADDLE_ENFORCE(platform::dynload::hipblasDgemmStridedBatched(
context.cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb,
strideB, A, lda, strideA, &beta, C, ldc, strideC, batchCount));
}
template <>
void gemv<platform::CUDADeviceContext, float>(
const platform::CUDADeviceContext& context, const bool trans_a, const int M,
const int N, const float alpha, const float* A, const float* B,
const float beta, float* C) {
hipblasOperation_t cuTransA = (trans_a == false) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
PADDLE_ENFORCE(platform::dynload::hipblasSgemv(context.cublas_handle(),
cuTransA, N, M, &alpha, A, N, B,
1, &beta, C, 1));
}
template <>
void gemv<platform::CUDADeviceContext, double>(
const platform::CUDADeviceContext& context, const bool trans_a, const int M,
const int N, const double alpha, const double* A, const double* B,
const double beta, double* C) {
hipblasOperation_t cuTransA = (trans_a == false) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
PADDLE_ENFORCE(platform::dynload::hipblasDgemv(context.cublas_handle(),
cuTransA, N, M, &alpha, A, N, B,
1, &beta, C, 1));
}
template <>
void axpy<platform::CUDADeviceContext, float>(
const platform::CUDADeviceContext& context, const int n, const float alpha,
const float* x, float* y) {
PADDLE_ENFORCE(platform::dynload::hipblasSaxpy(context.cublas_handle(), n,
&alpha, x, 1, y, 1));
}
template <>
void axpy<platform::CUDADeviceContext, double>(
const platform::CUDADeviceContext& context, const int n, const double alpha,
const double* x, double* y) {
PADDLE_ENFORCE(platform::dynload::hipblasDaxpy(context.cublas_handle(), n,
&alpha, x, 1, y, 1));
}
template struct SetConstant<platform::CUDADeviceContext, float>;
template struct SetConstant<platform::CUDADeviceContext, double>;
template struct SetConstant<platform::CUDADeviceContext, int>;
template struct SetConstant<platform::CUDADeviceContext, int64_t>;
template struct SetConstant<platform::CUDADeviceContext, bool>;
#define DEFINE_GPU_TRANS(RANK) \
template struct Transpose<platform::CUDADeviceContext, float, RANK>; \
template struct Transpose<platform::CUDADeviceContext, double, RANK>;
DEFINE_GPU_TRANS(1);
DEFINE_GPU_TRANS(2);
DEFINE_GPU_TRANS(3);
DEFINE_GPU_TRANS(4);
DEFINE_GPU_TRANS(5);
DEFINE_GPU_TRANS(6);
struct TensorSetConstantGPU {
TensorSetConstantGPU(const platform::DeviceContext& context,
framework::Tensor* tensor, float value)
: context_(context), tensor_(tensor), value_(value) {}
template <typename T>
void operator()() const {
SetConstant<platform::CUDADeviceContext, T> functor;
functor(reinterpret_cast<const platform::CUDADeviceContext&>(context_),
tensor_, static_cast<T>(value_));
}
const platform::DeviceContext& context_;
framework::Tensor* tensor_;
float value_;
};
template <>
void set_constant_with_place<platform::CUDAPlace>(
const platform::DeviceContext& context, framework::Tensor* tensor,
float value) {
framework::VisitDataType(framework::ToDataType(tensor->type()),
TensorSetConstantGPU(context, tensor, value));
}
template <typename T>
__global__ void RowwiseAddKernel(const T* a, const T* b, T* c, int width,
int num) {
T tmp = 1.0 / width;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num;
i += blockDim.x * gridDim.x) {
int h = i * tmp;
int w = i - h * width;
c[i] = a[i] + b[w];
}
}
template <typename T>
struct RowwiseAdd<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& vector, framework::Tensor* output) {
auto in_dims = input.dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(vector.numel(), size);
PADDLE_ENFORCE_EQ(output->dims(), in_dims);
int blocks = 512;
int grids = (input.numel() + blocks - 1) / blocks;
hipLaunchKernelGGL(( RowwiseAddKernel<T>), dim3(grids), dim3(blocks), 0, context.stream(),
input.data<T>(), vector.data<T>(), output->data<T>(),
static_cast<int>(in_dims[1]), static_cast<int>(input.numel()));
}
};
template struct RowwiseAdd<platform::CUDADeviceContext, float>;
template struct RowwiseAdd<platform::CUDADeviceContext, double>;
template struct ColwiseSum<platform::CUDADeviceContext, float>;
template struct ColwiseSum<platform::CUDADeviceContext, int>;
template struct ColwiseSum<platform::CUDADeviceContext, int64_t>;
// template struct ColwiseSum<platform::CUDADeviceContext, double>;
// The ColwiseSum<platform::CUDADeviceContext, double> failed in debug mode,
// and only failed for this case. So reimplemented it.
template <>
void ColwiseSum<platform::CUDADeviceContext, double>::operator()(
const platform::CUDADeviceContext& context, const framework::Tensor& input,
framework::Tensor* vector) {
auto in_dims = input.dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(vector->numel(), size);
framework::Tensor one;
one.mutable_data<double>({in_dims[0]}, context.GetPlace());
SetConstant<platform::CUDADeviceContext, double> set;
set(context, &one, static_cast<double>(1.0));
gemv<platform::CUDADeviceContext, double>(
context, true, static_cast<int>(in_dims[0]), static_cast<int>(in_dims[1]),
1.0, input.data<double>(), one.data<double>(), 0.0,
vector->data<double>());
}
template struct RowwiseSum<platform::CUDADeviceContext, float>;
// template struct RowwiseSum<platform::CUDADeviceContext, double>;
// TODO(zcd): Following ColwiseSum format, need to confirm.
// The RowwiseSum<platform::CUDADeviceContext, double> failed in debug mode,
// and only failed for this case. So reimplemented it.
template <>
void RowwiseSum<platform::CUDADeviceContext, double>::operator()(
const platform::CUDADeviceContext& context, const framework::Tensor& input,
framework::Tensor* vector) {
auto in_dims = input.dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(vector->numel(), in_dims[0]);
framework::Tensor one;
one.mutable_data<double>({size}, context.GetPlace());
SetConstant<platform::CUDADeviceContext, double> set;
set(context, &one, static_cast<double>(1.0));
gemv<platform::CUDADeviceContext, double>(
context, true, static_cast<int>(in_dims[1]), static_cast<int>(in_dims[0]),
1.0, one.data<double>(), input.data<double>(), 0.0,
vector->data<double>());
}
template struct RowwiseMean<platform::CUDADeviceContext, float>;
template struct RowwiseMean<platform::CUDADeviceContext, double>;
} // namespace math
} // namespace operators
} // namespace paddle
| 1b5266b5246e522f85601be546a1910368d57645.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/math_function_impl.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
namespace math {
using float16 = paddle::platform::float16;
template <>
void gemm<platform::CUDADeviceContext, float16>(
const platform::CUDADeviceContext& context, const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M, const int N, const int K,
const float16 alpha, const float16* A, const float16* B, const float16 beta,
float16* C) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const half h_alpha = static_cast<const half>(alpha);
const half h_beta = static_cast<const half>(beta);
const half* h_A = reinterpret_cast<const half*>(A);
const half* h_B = reinterpret_cast<const half*>(B);
half* h_C = reinterpret_cast<half*>(C);
// TODO(kexinzhao): add processing code for compute capability < 53 case
PADDLE_ENFORCE_GE(context.GetComputeCapability(), 53,
"cublas Hgemm requires GPU compute capability >= 53");
PADDLE_ENFORCE(platform::dynload::cublasHgemm(
context.cublas_handle(), cuTransB, cuTransA, N, M, K, &h_alpha, h_B, ldb,
h_A, lda, &h_beta, h_C, N));
}
template <>
void gemm<platform::CUDADeviceContext, float>(
const platform::CUDADeviceContext& context, const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::cublasSgemm(
context.cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A,
lda, &beta, C, N));
}
template <>
void gemm<platform::CUDADeviceContext, double>(
const platform::CUDADeviceContext& context, const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::cublasDgemm(
context.cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A,
lda, &beta, C, N));
}
template <>
void gemm<platform::CUDADeviceContext, float16>(
const platform::CUDADeviceContext& context, const bool transA,
const bool transB, const int M, const int N, const int K,
const float16 alpha, const float16* A, const int lda, const float16* B,
const int ldb, const float16 beta, float16* C, const int ldc) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
cublasOperation_t cuTransA = transA == false ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB = transB == false ? CUBLAS_OP_N : CUBLAS_OP_T;
const half h_alpha = static_cast<const half>(alpha);
const half h_beta = static_cast<const half>(beta);
const half* h_A = reinterpret_cast<const half*>(A);
const half* h_B = reinterpret_cast<const half*>(B);
half* h_C = reinterpret_cast<half*>(C);
// TODO(kexinzhao): add processing code for compute capability < 53 case
PADDLE_ENFORCE_GE(context.GetComputeCapability(), 53,
"cublas Hgemm requires GPU compute capability >= 53");
PADDLE_ENFORCE(platform::dynload::cublasHgemm(
context.cublas_handle(), cuTransB, cuTransA, N, M, K, &h_alpha, h_B, ldb,
h_A, lda, &h_beta, h_C, ldc));
}
template <>
void gemm<platform::CUDADeviceContext, float>(
const platform::CUDADeviceContext& context, const bool transA,
const bool transB, const int M, const int N, const int K, const float alpha,
const float* A, const int lda, const float* B, const int ldb,
const float beta, float* C, const int ldc) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
cublasOperation_t cuTransA = transA == false ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB = transB == false ? CUBLAS_OP_N : CUBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::cublasSgemm(
context.cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A,
lda, &beta, C, ldc));
}
template <>
void gemm<platform::CUDADeviceContext, double>(
const platform::CUDADeviceContext& context, const bool transA,
const bool transB, const int M, const int N, const int K,
const double alpha, const double* A, const int lda, const double* B,
const int ldb, const double beta, double* C, const int ldc) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
cublasOperation_t cuTransA = transA == false ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB = transB == false ? CUBLAS_OP_N : CUBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::cublasDgemm(
context.cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A,
lda, &beta, C, ldc));
}
template <>
void matmul<platform::CUDADeviceContext, float16>(
const platform::CUDADeviceContext& context,
const framework::Tensor& matrix_a, bool trans_a,
const framework::Tensor& matrix_b, bool trans_b, float16 alpha,
framework::Tensor* matrix_out, float16 beta) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
"The input and output of matmul be matrix");
PADDLE_ENFORCE(platform::is_gpu_place(matrix_a.place()) &&
platform::is_gpu_place(matrix_b.place()) &&
platform::is_gpu_place(matrix_out->place()),
"Matrix must all be in CUDAPlace");
int M = dim_out[0];
int N = dim_out[1];
int K = (trans_a == false) ? dim_a[1] : dim_a[0];
CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::CUDADeviceContext, float16>(
context, transA, transB, M, N, K, alpha, matrix_a.data<float16>(),
matrix_b.data<float16>(), beta, matrix_out->data<float16>());
}
template <>
void matmul<platform::CUDADeviceContext, float>(
const platform::CUDADeviceContext& context,
const framework::Tensor& matrix_a, bool trans_a,
const framework::Tensor& matrix_b, bool trans_b, float alpha,
framework::Tensor* matrix_out, float beta) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
"The input and output of matmul be matrix");
PADDLE_ENFORCE(platform::is_gpu_place(matrix_a.place()) &&
platform::is_gpu_place(matrix_b.place()) &&
platform::is_gpu_place(matrix_out->place()),
"Matrix must all be in CUDAPlace");
int M = dim_out[0];
int N = dim_out[1];
int K = (trans_a == false) ? dim_a[1] : dim_a[0];
CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::CUDADeviceContext, float>(
context, transA, transB, M, N, K, alpha, matrix_a.data<float>(),
matrix_b.data<float>(), beta, matrix_out->data<float>());
}
template <>
void matmul<platform::CUDADeviceContext, double>(
const platform::CUDADeviceContext& context,
const framework::Tensor& matrix_a, bool trans_a,
const framework::Tensor& matrix_b, bool trans_b, double alpha,
framework::Tensor* matrix_out, double beta) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
"The input and output of matmul be matrix");
PADDLE_ENFORCE(platform::is_gpu_place(matrix_a.place()) &&
platform::is_gpu_place(matrix_b.place()) &&
platform::is_gpu_place(matrix_out->place()),
"Matrix must all be in CUDAPlace");
int M = dim_out[0];
int N = dim_out[1];
int K = (trans_a == false) ? dim_a[1] : dim_a[0];
CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::CUDADeviceContext, double>(
context, transA, transB, M, N, K, alpha, matrix_a.data<double>(),
matrix_b.data<double>(), beta, matrix_out->data<double>());
}
template <>
void batched_gemm<platform::CUDADeviceContext, float16>(
const platform::CUDADeviceContext& context, const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M, const int N, const int K,
const float16 alpha, const float16* A, const float16* B, const float16 beta,
float16* C, const int batchCount, const int strideA, const int strideB) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
int ldc = N;
cublasOperation_t cuTransA =
(transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const int strideC = M * N;
const half h_alpha = static_cast<const half>(alpha);
const half h_beta = static_cast<const half>(beta);
const half* h_A = reinterpret_cast<const half*>(A);
const half* h_B = reinterpret_cast<const half*>(B);
half* h_C = reinterpret_cast<half*>(C);
// TODO(kexinzhao): add processing code for compute capability < 53 case
PADDLE_ENFORCE_GE(context.GetComputeCapability(), 53,
"cublas Hgemm requires GPU compute capability >= 53");
PADDLE_ENFORCE(platform::dynload::cublasHgemmStridedBatched(
context.cublas_handle(), cuTransB, cuTransA, N, M, K, &h_alpha, h_B, ldb,
strideB, h_A, lda, strideA, &h_beta, h_C, ldc, strideC, batchCount));
}
template <>
void batched_gemm<platform::CUDADeviceContext, float>(
const platform::CUDADeviceContext& context, const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C, const int batchCount, const int strideA, const int strideB) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
int ldc = N;
cublasOperation_t cuTransA =
(transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const int strideC = M * N;
PADDLE_ENFORCE(platform::dynload::cublasSgemmStridedBatched(
context.cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb,
strideB, A, lda, strideA, &beta, C, ldc, strideC, batchCount));
}
template <>
void batched_gemm<platform::CUDADeviceContext, double>(
const platform::CUDADeviceContext& context, const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C, const int batchCount, const int strideA, const int strideB) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
int ldc = N;
cublasOperation_t cuTransA =
(transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const int strideC = M * N;
PADDLE_ENFORCE(platform::dynload::cublasDgemmStridedBatched(
context.cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb,
strideB, A, lda, strideA, &beta, C, ldc, strideC, batchCount));
}
template <>
void gemv<platform::CUDADeviceContext, float>(
const platform::CUDADeviceContext& context, const bool trans_a, const int M,
const int N, const float alpha, const float* A, const float* B,
const float beta, float* C) {
cublasOperation_t cuTransA = (trans_a == false) ? CUBLAS_OP_T : CUBLAS_OP_N;
PADDLE_ENFORCE(platform::dynload::cublasSgemv(context.cublas_handle(),
cuTransA, N, M, &alpha, A, N, B,
1, &beta, C, 1));
}
template <>
void gemv<platform::CUDADeviceContext, double>(
const platform::CUDADeviceContext& context, const bool trans_a, const int M,
const int N, const double alpha, const double* A, const double* B,
const double beta, double* C) {
cublasOperation_t cuTransA = (trans_a == false) ? CUBLAS_OP_T : CUBLAS_OP_N;
PADDLE_ENFORCE(platform::dynload::cublasDgemv(context.cublas_handle(),
cuTransA, N, M, &alpha, A, N, B,
1, &beta, C, 1));
}
template <>
void axpy<platform::CUDADeviceContext, float>(
const platform::CUDADeviceContext& context, const int n, const float alpha,
const float* x, float* y) {
PADDLE_ENFORCE(platform::dynload::cublasSaxpy(context.cublas_handle(), n,
&alpha, x, 1, y, 1));
}
template <>
void axpy<platform::CUDADeviceContext, double>(
const platform::CUDADeviceContext& context, const int n, const double alpha,
const double* x, double* y) {
PADDLE_ENFORCE(platform::dynload::cublasDaxpy(context.cublas_handle(), n,
&alpha, x, 1, y, 1));
}
template struct SetConstant<platform::CUDADeviceContext, float>;
template struct SetConstant<platform::CUDADeviceContext, double>;
template struct SetConstant<platform::CUDADeviceContext, int>;
template struct SetConstant<platform::CUDADeviceContext, int64_t>;
template struct SetConstant<platform::CUDADeviceContext, bool>;
#define DEFINE_GPU_TRANS(RANK) \
template struct Transpose<platform::CUDADeviceContext, float, RANK>; \
template struct Transpose<platform::CUDADeviceContext, double, RANK>;
DEFINE_GPU_TRANS(1);
DEFINE_GPU_TRANS(2);
DEFINE_GPU_TRANS(3);
DEFINE_GPU_TRANS(4);
DEFINE_GPU_TRANS(5);
DEFINE_GPU_TRANS(6);
struct TensorSetConstantGPU {
TensorSetConstantGPU(const platform::DeviceContext& context,
framework::Tensor* tensor, float value)
: context_(context), tensor_(tensor), value_(value) {}
template <typename T>
void operator()() const {
SetConstant<platform::CUDADeviceContext, T> functor;
functor(reinterpret_cast<const platform::CUDADeviceContext&>(context_),
tensor_, static_cast<T>(value_));
}
const platform::DeviceContext& context_;
framework::Tensor* tensor_;
float value_;
};
template <>
void set_constant_with_place<platform::CUDAPlace>(
const platform::DeviceContext& context, framework::Tensor* tensor,
float value) {
framework::VisitDataType(framework::ToDataType(tensor->type()),
TensorSetConstantGPU(context, tensor, value));
}
template <typename T>
__global__ void RowwiseAddKernel(const T* a, const T* b, T* c, int width,
int num) {
T tmp = 1.0 / width;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num;
i += blockDim.x * gridDim.x) {
int h = i * tmp;
int w = i - h * width;
c[i] = a[i] + b[w];
}
}
template <typename T>
struct RowwiseAdd<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& vector, framework::Tensor* output) {
auto in_dims = input.dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(vector.numel(), size);
PADDLE_ENFORCE_EQ(output->dims(), in_dims);
int blocks = 512;
int grids = (input.numel() + blocks - 1) / blocks;
RowwiseAddKernel<T><<<grids, blocks, 0, context.stream()>>>(
input.data<T>(), vector.data<T>(), output->data<T>(),
static_cast<int>(in_dims[1]), static_cast<int>(input.numel()));
}
};
template struct RowwiseAdd<platform::CUDADeviceContext, float>;
template struct RowwiseAdd<platform::CUDADeviceContext, double>;
template struct ColwiseSum<platform::CUDADeviceContext, float>;
template struct ColwiseSum<platform::CUDADeviceContext, int>;
template struct ColwiseSum<platform::CUDADeviceContext, int64_t>;
// template struct ColwiseSum<platform::CUDADeviceContext, double>;
// The ColwiseSum<platform::CUDADeviceContext, double> failed in debug mode,
// and only failed for this case. So reimplemented it.
template <>
void ColwiseSum<platform::CUDADeviceContext, double>::operator()(
const platform::CUDADeviceContext& context, const framework::Tensor& input,
framework::Tensor* vector) {
auto in_dims = input.dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(vector->numel(), size);
framework::Tensor one;
one.mutable_data<double>({in_dims[0]}, context.GetPlace());
SetConstant<platform::CUDADeviceContext, double> set;
set(context, &one, static_cast<double>(1.0));
gemv<platform::CUDADeviceContext, double>(
context, true, static_cast<int>(in_dims[0]), static_cast<int>(in_dims[1]),
1.0, input.data<double>(), one.data<double>(), 0.0,
vector->data<double>());
}
template struct RowwiseSum<platform::CUDADeviceContext, float>;
// template struct RowwiseSum<platform::CUDADeviceContext, double>;
// TODO(zcd): Following ColwiseSum format, need to confirm.
// The RowwiseSum<platform::CUDADeviceContext, double> failed in debug mode,
// and only failed for this case. So reimplemented it.
template <>
void RowwiseSum<platform::CUDADeviceContext, double>::operator()(
const platform::CUDADeviceContext& context, const framework::Tensor& input,
framework::Tensor* vector) {
auto in_dims = input.dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(vector->numel(), in_dims[0]);
framework::Tensor one;
one.mutable_data<double>({size}, context.GetPlace());
SetConstant<platform::CUDADeviceContext, double> set;
set(context, &one, static_cast<double>(1.0));
gemv<platform::CUDADeviceContext, double>(
context, true, static_cast<int>(in_dims[1]), static_cast<int>(in_dims[0]),
1.0, one.data<double>(), input.data<double>(), 0.0,
vector->data<double>());
}
template struct RowwiseMean<platform::CUDADeviceContext, float>;
template struct RowwiseMean<platform::CUDADeviceContext, double>;
} // namespace math
} // namespace operators
} // namespace paddle
|
45c51d27d25224928be1bb94ac9b7e72ab72f01e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_z
// These routines merge multiple kernels from zcgs into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_zcgs_1_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex *r,
magmaDoubleComplex *q,
magmaDoubleComplex *u,
magmaDoubleComplex *p )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaDoubleComplex tmp;
tmp = r[ i+j*num_rows ] + beta * q[ i+j*num_rows ];
p[ i+j*num_rows ] = tmp + beta * q[ i+j*num_rows ]
+ beta * beta * p[ i+j*num_rows ];
u[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
u = r + beta q
p = u + beta*(q + beta*p)
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta magmaDoubleComplex
scalar
@param[in]
r magmaDoubleComplex_ptr
vector
@param[in]
q magmaDoubleComplex_ptr
vector
@param[in,out]
u magmaDoubleComplex_ptr
vector
@param[in,out]
p magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zcgs_1(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr q,
magmaDoubleComplex_ptr u,
magmaDoubleComplex_ptr p,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zcgs_1_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, beta, r, q, u, p );
return MAGMA_SUCCESS;
}
__global__ void
magma_zcgs_2_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex *r,
magmaDoubleComplex *u,
magmaDoubleComplex *p )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaDoubleComplex tmp;
tmp = r[ i+j*num_rows ];
u[ i+j*num_rows ] = tmp;
p[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
u = r
p = r
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
r magmaDoubleComplex_ptr
vector
@param[in,out]
u magmaDoubleComplex_ptr
vector
@param[in,out]
p magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zcgs_2(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr u,
magmaDoubleComplex_ptr p,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zcgs_2_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, r, u, p);
return MAGMA_SUCCESS;
}
__global__ void
magma_zcgs_3_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex *v_hat,
magmaDoubleComplex *u,
magmaDoubleComplex *q,
magmaDoubleComplex *t )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaDoubleComplex uloc, tmp;
uloc = u[ i+j*num_rows ];
tmp = uloc - alpha * v_hat[ i+j*num_rows ];
t[ i+j*num_rows ] = tmp + uloc;
q[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
q = u - alpha v_hat
t = u + q
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha magmaDoubleComplex
scalar
@param[in]
v_hat magmaDoubleComplex_ptr
vector
@param[in]
u magmaDoubleComplex_ptr
vector
@param[in,out]
q magmaDoubleComplex_ptr
vector
@param[in,out]
t magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zcgs_3(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr v_hat,
magmaDoubleComplex_ptr u,
magmaDoubleComplex_ptr q,
magmaDoubleComplex_ptr t,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zcgs_3_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, v_hat, u, q, t );
return MAGMA_SUCCESS;
}
__global__ void
magma_zcgs_4_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex *u_hat,
magmaDoubleComplex *t,
magmaDoubleComplex *x,
magmaDoubleComplex *r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
x[ i+j*num_rows ] = x[ i+j*num_rows ]
+ alpha * u_hat[ i+j*num_rows ];
r[ i+j*num_rows ] = r[ i+j*num_rows ]
- alpha * t[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
x = x + alpha u_hat
r = r -alpha*A u_hat = r -alpha*t
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha magmaDoubleComplex
scalar
@param[in]
u_hat magmaDoubleComplex_ptr
vector
@param[in]
t magmaDoubleComplex_ptr
vector
@param[in,out]
x magmaDoubleComplex_ptr
vector
@param[in,out]
r magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zcgs_4(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr u_hat,
magmaDoubleComplex_ptr t,
magmaDoubleComplex_ptr x,
magmaDoubleComplex_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zcgs_4_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, u_hat, t, x, r );
return MAGMA_SUCCESS;
}
| 45c51d27d25224928be1bb94ac9b7e72ab72f01e.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_z
// These routines merge multiple kernels from zcgs into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_zcgs_1_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex *r,
magmaDoubleComplex *q,
magmaDoubleComplex *u,
magmaDoubleComplex *p )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaDoubleComplex tmp;
tmp = r[ i+j*num_rows ] + beta * q[ i+j*num_rows ];
p[ i+j*num_rows ] = tmp + beta * q[ i+j*num_rows ]
+ beta * beta * p[ i+j*num_rows ];
u[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
u = r + beta q
p = u + beta*(q + beta*p)
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta magmaDoubleComplex
scalar
@param[in]
r magmaDoubleComplex_ptr
vector
@param[in]
q magmaDoubleComplex_ptr
vector
@param[in,out]
u magmaDoubleComplex_ptr
vector
@param[in,out]
p magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zcgs_1(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr q,
magmaDoubleComplex_ptr u,
magmaDoubleComplex_ptr p,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_zcgs_1_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, beta, r, q, u, p );
return MAGMA_SUCCESS;
}
__global__ void
magma_zcgs_2_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex *r,
magmaDoubleComplex *u,
magmaDoubleComplex *p )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaDoubleComplex tmp;
tmp = r[ i+j*num_rows ];
u[ i+j*num_rows ] = tmp;
p[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
u = r
p = r
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
r magmaDoubleComplex_ptr
vector
@param[in,out]
u magmaDoubleComplex_ptr
vector
@param[in,out]
p magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zcgs_2(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr u,
magmaDoubleComplex_ptr p,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_zcgs_2_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, r, u, p);
return MAGMA_SUCCESS;
}
__global__ void
magma_zcgs_3_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex *v_hat,
magmaDoubleComplex *u,
magmaDoubleComplex *q,
magmaDoubleComplex *t )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaDoubleComplex uloc, tmp;
uloc = u[ i+j*num_rows ];
tmp = uloc - alpha * v_hat[ i+j*num_rows ];
t[ i+j*num_rows ] = tmp + uloc;
q[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
q = u - alpha v_hat
t = u + q
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha magmaDoubleComplex
scalar
@param[in]
v_hat magmaDoubleComplex_ptr
vector
@param[in]
u magmaDoubleComplex_ptr
vector
@param[in,out]
q magmaDoubleComplex_ptr
vector
@param[in,out]
t magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zcgs_3(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr v_hat,
magmaDoubleComplex_ptr u,
magmaDoubleComplex_ptr q,
magmaDoubleComplex_ptr t,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_zcgs_3_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, v_hat, u, q, t );
return MAGMA_SUCCESS;
}
__global__ void
magma_zcgs_4_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex *u_hat,
magmaDoubleComplex *t,
magmaDoubleComplex *x,
magmaDoubleComplex *r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
x[ i+j*num_rows ] = x[ i+j*num_rows ]
+ alpha * u_hat[ i+j*num_rows ];
r[ i+j*num_rows ] = r[ i+j*num_rows ]
- alpha * t[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
x = x + alpha u_hat
r = r -alpha*A u_hat = r -alpha*t
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha magmaDoubleComplex
scalar
@param[in]
u_hat magmaDoubleComplex_ptr
vector
@param[in]
t magmaDoubleComplex_ptr
vector
@param[in,out]
x magmaDoubleComplex_ptr
vector
@param[in,out]
r magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zcgs_4(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr u_hat,
magmaDoubleComplex_ptr t,
magmaDoubleComplex_ptr x,
magmaDoubleComplex_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_zcgs_4_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, u_hat, t, x, r );
return MAGMA_SUCCESS;
}
|
766f090f205ba1894dac8bc0f9308bb00fb1bd5c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<iostream>
#include<stdlib.h>
#include<string.h>
#define NUM_THREADS 256
#define IMG_SIZE 1048576
struct Coefficients_AOS {
int* r;
int* b;
int* g;
int* hue;
int* saturation;
int* maxVal;
int* minVal;
int* finalVal;
};
__global__
void complicatedCalculation(Coefficients_AOS data)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int grayscale = (data.r[i] + data.g[i] + data.b[i])/data.maxVal[i];
int hue_sat = data.hue[i] * data.saturation[i] / data.minVal[i];
data.finalVal[i] = grayscale*hue_sat;
}
void complicatedCalculation()
{
Coefficients_AOS d_x;
hipMalloc(&d_x.r, IMG_SIZE*sizeof(int));
hipMalloc(&d_x.g, IMG_SIZE*sizeof(int));
hipMalloc(&d_x.b, IMG_SIZE*sizeof(int));
hipMalloc(&d_x.hue, IMG_SIZE*sizeof(int));
hipMalloc(&d_x.saturation, IMG_SIZE*sizeof(int));
hipMalloc(&d_x.maxVal, IMG_SIZE*sizeof(int));
hipMalloc(&d_x.minVal, IMG_SIZE*sizeof(int));
hipMalloc(&d_x.finalVal, IMG_SIZE*sizeof(int));
int num_blocks = IMG_SIZE/NUM_THREADS;
hipLaunchKernelGGL(( complicatedCalculation), dim3(num_blocks),dim3(NUM_THREADS), 0, 0, d_x);
hipFree(d_x.r);
hipFree(d_x.g);
hipFree(d_x.b);
hipFree(d_x.hue);
hipFree(d_x.saturation);
hipFree(d_x.maxVal);
hipFree(d_x.maxVal);
hipFree(d_x.minVal);
hipFree(d_x.finalVal);
}
int main(int argc, char*argv[])
{
complicatedCalculation();
return 0;
}
| 766f090f205ba1894dac8bc0f9308bb00fb1bd5c.cu | #include<stdio.h>
#include<iostream>
#include<stdlib.h>
#include<string.h>
#define NUM_THREADS 256
#define IMG_SIZE 1048576
struct Coefficients_AOS {
int* r;
int* b;
int* g;
int* hue;
int* saturation;
int* maxVal;
int* minVal;
int* finalVal;
};
__global__
void complicatedCalculation(Coefficients_AOS data)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int grayscale = (data.r[i] + data.g[i] + data.b[i])/data.maxVal[i];
int hue_sat = data.hue[i] * data.saturation[i] / data.minVal[i];
data.finalVal[i] = grayscale*hue_sat;
}
void complicatedCalculation()
{
Coefficients_AOS d_x;
cudaMalloc(&d_x.r, IMG_SIZE*sizeof(int));
cudaMalloc(&d_x.g, IMG_SIZE*sizeof(int));
cudaMalloc(&d_x.b, IMG_SIZE*sizeof(int));
cudaMalloc(&d_x.hue, IMG_SIZE*sizeof(int));
cudaMalloc(&d_x.saturation, IMG_SIZE*sizeof(int));
cudaMalloc(&d_x.maxVal, IMG_SIZE*sizeof(int));
cudaMalloc(&d_x.minVal, IMG_SIZE*sizeof(int));
cudaMalloc(&d_x.finalVal, IMG_SIZE*sizeof(int));
int num_blocks = IMG_SIZE/NUM_THREADS;
complicatedCalculation<<<num_blocks,NUM_THREADS>>>(d_x);
cudaFree(d_x.r);
cudaFree(d_x.g);
cudaFree(d_x.b);
cudaFree(d_x.hue);
cudaFree(d_x.saturation);
cudaFree(d_x.maxVal);
cudaFree(d_x.maxVal);
cudaFree(d_x.minVal);
cudaFree(d_x.finalVal);
}
int main(int argc, char*argv[])
{
complicatedCalculation();
return 0;
}
|
9b7a82d3f0371a8ccc3d5a18535b38a0971c77b4.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "property_generator.cuh"
#include <utilities/base_fixture.hpp>
#include <utilities/device_comm_wrapper.hpp>
#include <utilities/mg_utilities.hpp>
#include <utilities/test_graphs.hpp>
#include <utilities/test_utilities.hpp>
#include <utilities/thrust_wrapper.hpp>
#include <prims/transform_reduce_v.cuh>
#include <cugraph/algorithms.hpp>
#include <cugraph/graph_view.hpp>
#include <cugraph/utilities/high_res_timer.hpp>
#include <cuco/hash_functions.cuh>
#include <raft/comms/mpi_comms.hpp>
#include <raft/core/comms.hpp>
#include <raft/core/handle.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/count.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/tuple.h>
#include <gtest/gtest.h>
#include <random>
template <typename vertex_t, typename property_t>
struct v_op_t {
int32_t mod{};
__device__ auto operator()(vertex_t, vertex_t val) const
{
cuco::detail::MurmurHash3_32<vertex_t> hash_func{};
return cugraph::test::detail::make_property_value<property_t>(hash_func(val) % mod);
}
};
template <typename T>
struct result_compare {
static constexpr double threshold_ratio{1e-3};
constexpr auto operator()(const T& t1, const T& t2)
{
if constexpr (std::is_floating_point_v<T>) {
bool passed = (t1 == t2) // when t1 == t2 == 0
||
(std::abs(t1 - t2) < (::max(std::abs(t1), std::abs(t2)) * threshold_ratio));
return passed;
}
return t1 == t2;
}
};
template <typename... Args>
struct result_compare<thrust::tuple<Args...>> {
static constexpr double threshold_ratio{1e-3};
using Type = thrust::tuple<Args...>;
constexpr auto operator()(const Type& t1, const Type& t2)
{
return equality_impl(t1, t2, std::make_index_sequence<thrust::tuple_size<Type>::value>());
}
private:
template <typename T>
constexpr bool equal(T t1, T t2)
{
if constexpr (std::is_floating_point_v<T>) {
bool passed = (t1 == t2) // when t1 == t2 == 0
||
(std::abs(t1 - t2) < (::max(std::abs(t1), std::abs(t2)) * threshold_ratio));
return passed;
}
return t1 == t2;
}
template <typename T, std::size_t... I>
constexpr auto equality_impl(T& t1, T& t2, std::index_sequence<I...>)
{
return (... && (equal(thrust::get<I>(t1), thrust::get<I>(t2))));
}
};
struct Prims_Usecase {
bool check_correctness{true};
};
template <typename input_usecase_t>
class Tests_MGTransformReduceV
: public ::testing::TestWithParam<std::tuple<Prims_Usecase, input_usecase_t>> {
public:
Tests_MGTransformReduceV() {}
static void SetUpTestCase() { handle_ = cugraph::test::initialize_mg_handle(); }
static void TearDownTestCase() { handle_.reset(); }
virtual void SetUp() {}
virtual void TearDown() {}
// Compare the results of reduce_if_v primitive and thrust reduce on a single GPU
template <typename vertex_t,
typename edge_t,
typename weight_t,
typename result_t,
bool store_transposed>
void run_current_test(Prims_Usecase const& prims_usecase, input_usecase_t const& input_usecase)
{
HighResTimer hr_timer{};
// 1. create MG graph
if (cugraph::test::g_perf) {
RAFT_CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement
handle_->get_comms().barrier();
hr_timer.start("MG Construct graph");
}
cugraph::graph_t<vertex_t, edge_t, store_transposed, true> mg_graph(*handle_);
std::optional<rmm::device_uvector<vertex_t>> mg_renumber_map{std::nullopt};
std::tie(mg_graph, std::ignore, mg_renumber_map) =
cugraph::test::construct_graph<vertex_t, edge_t, weight_t, store_transposed, true>(
*handle_, input_usecase, true, true);
if (cugraph::test::g_perf) {
RAFT_CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement
handle_->get_comms().barrier();
hr_timer.stop();
hr_timer.display_and_clear(std::cout);
}
auto mg_graph_view = mg_graph.view();
// 2. run MG transform reduce
const int hash_bin_count = 5;
const int initial_value = 10;
v_op_t<vertex_t, result_t> v_op{hash_bin_count};
auto property_initial_value =
cugraph::test::generate<vertex_t, result_t>::initial_value(initial_value);
enum class reduction_type_t { PLUS, MINIMUM, MAXIMUM };
std::array<reduction_type_t, 3> reduction_types = {
reduction_type_t::PLUS, reduction_type_t::MINIMUM, reduction_type_t::MAXIMUM};
std::unordered_map<reduction_type_t, result_t> results;
for (auto reduction_type : reduction_types) {
if (cugraph::test::g_perf) {
RAFT_CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement
handle_->get_comms().barrier();
hr_timer.start("MG transform_reduce_v");
}
switch (reduction_type) {
case reduction_type_t::PLUS:
results[reduction_type] = transform_reduce_v(*handle_,
mg_graph_view,
(*mg_renumber_map).begin(),
v_op,
property_initial_value,
cugraph::reduce_op::plus<result_t>{});
break;
case reduction_type_t::MINIMUM:
results[reduction_type] = transform_reduce_v(*handle_,
mg_graph_view,
(*mg_renumber_map).begin(),
v_op,
property_initial_value,
cugraph::reduce_op::minimum<result_t>{});
break;
case reduction_type_t::MAXIMUM:
results[reduction_type] = transform_reduce_v(*handle_,
mg_graph_view,
(*mg_renumber_map).begin(),
v_op,
property_initial_value,
cugraph::reduce_op::maximum<result_t>{});
break;
default: FAIL() << "should not be reached.";
}
if (cugraph::test::g_perf) {
RAFT_CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement
handle_->get_comms().barrier();
hr_timer.stop();
hr_timer.display_and_clear(std::cout);
}
}
// 3. compare SG & MG results
if (prims_usecase.check_correctness) {
cugraph::graph_t<vertex_t, edge_t, store_transposed, false> sg_graph(*handle_);
std::tie(sg_graph, std::ignore, std::ignore) = cugraph::test::mg_graph_to_sg_graph(
*handle_,
mg_graph_view,
std::optional<cugraph::edge_property_view_t<edge_t, weight_t const*>>{std::nullopt},
std::make_optional<raft::device_span<vertex_t const>>((*mg_renumber_map).data(),
(*mg_renumber_map).size()),
false);
if (handle_->get_comms().get_rank() == 0) {
auto sg_graph_view = sg_graph.view();
for (auto reduction_type : reduction_types) {
result_t expected_result{};
switch (reduction_type) {
case reduction_type_t::PLUS:
expected_result = transform_reduce_v(
*handle_,
sg_graph_view,
thrust::make_counting_iterator(sg_graph_view.local_vertex_partition_range_first()),
v_op,
property_initial_value,
cugraph::reduce_op::plus<result_t>{});
break;
case reduction_type_t::MINIMUM:
expected_result = transform_reduce_v(
*handle_,
sg_graph_view,
thrust::make_counting_iterator(sg_graph_view.local_vertex_partition_range_first()),
v_op,
property_initial_value,
cugraph::reduce_op::minimum<result_t>{});
break;
case reduction_type_t::MAXIMUM:
expected_result = transform_reduce_v(
*handle_,
sg_graph_view,
thrust::make_counting_iterator(sg_graph_view.local_vertex_partition_range_first()),
v_op,
property_initial_value,
cugraph::reduce_op::maximum<result_t>{});
break;
default: FAIL() << "should not be reached.";
}
result_compare<result_t> compare{};
ASSERT_TRUE(compare(expected_result, results[reduction_type]));
}
}
}
}
private:
static std::unique_ptr<raft::handle_t> handle_;
};
template <typename input_usecase_t>
std::unique_ptr<raft::handle_t> Tests_MGTransformReduceV<input_usecase_t>::handle_ = nullptr;
using Tests_MGTransformReduceV_File = Tests_MGTransformReduceV<cugraph::test::File_Usecase>;
using Tests_MGTransformReduceV_Rmat = Tests_MGTransformReduceV<cugraph::test::Rmat_Usecase>;
TEST_P(Tests_MGTransformReduceV_File, CheckInt32Int32FloatTupleIntFloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, thrust::tuple<int, float>, false>(std::get<0>(param),
std::get<1>(param));
}
TEST_P(Tests_MGTransformReduceV_Rmat, CheckInt32Int32FloatTupleIntFloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, thrust::tuple<int, float>, false>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGTransformReduceV_Rmat, CheckInt32Int64FloatTupleIntFloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int64_t, float, thrust::tuple<int, float>, false>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGTransformReduceV_Rmat, CheckInt64Int64FloatTupleIntFloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int64_t, int64_t, float, thrust::tuple<int, float>, false>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGTransformReduceV_File, CheckInt32Int32FloatTupleIntFloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, thrust::tuple<int, float>, true>(std::get<0>(param),
std::get<1>(param));
}
TEST_P(Tests_MGTransformReduceV_Rmat, CheckInt32Int32FloatTupleIntFloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, thrust::tuple<int, float>, true>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGTransformReduceV_Rmat, CheckInt32Int64FloatTupleIntFloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int64_t, float, thrust::tuple<int, float>, true>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGTransformReduceV_Rmat, CheckInt64Int64FloatTupleIntFloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int64_t, int64_t, float, thrust::tuple<int, float>, true>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGTransformReduceV_File, CheckInt32Int32FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int, false>(std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MGTransformReduceV_Rmat, CheckInt32Int32FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int, false>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGTransformReduceV_Rmat, CheckInt32Int64FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int64_t, float, int, false>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGTransformReduceV_Rmat, CheckInt64Int64FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int64_t, int64_t, float, int, false>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGTransformReduceV_File, CheckInt32Int32FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int, true>(std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MGTransformReduceV_Rmat, CheckInt32Int32FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int, true>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGTransformReduceV_Rmat, CheckInt32Int64FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int64_t, float, int, true>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGTransformReduceV_Rmat, CheckInt64Int64FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int64_t, int64_t, float, int, true>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
INSTANTIATE_TEST_SUITE_P(
file_test,
Tests_MGTransformReduceV_File,
::testing::Combine(
::testing::Values(Prims_Usecase{true}),
::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx"),
cugraph::test::File_Usecase("test/datasets/web-Google.mtx"),
cugraph::test::File_Usecase("test/datasets/ljournal-2008.mtx"),
cugraph::test::File_Usecase("test/datasets/webbase-1M.mtx"))));
INSTANTIATE_TEST_SUITE_P(rmat_small_test,
Tests_MGTransformReduceV_Rmat,
::testing::Combine(::testing::Values(Prims_Usecase{true}),
::testing::Values(cugraph::test::Rmat_Usecase(
10, 16, 0.57, 0.19, 0.19, 0, false, false))));
INSTANTIATE_TEST_SUITE_P(
rmat_benchmark_test, /* note that scale & edge factor can be overridden in benchmarking (with
--gtest_filter to select only the rmat_benchmark_test with a specific
vertex & edge type combination) by command line arguments and do not
include more than one Rmat_Usecase that differ only in scale or edge
factor (to avoid running same benchmarks more than once) */
Tests_MGTransformReduceV_Rmat,
::testing::Combine(
::testing::Values(Prims_Usecase{false}),
::testing::Values(cugraph::test::Rmat_Usecase(20, 32, 0.57, 0.19, 0.19, 0, false, false))));
CUGRAPH_MG_TEST_PROGRAM_MAIN()
| 9b7a82d3f0371a8ccc3d5a18535b38a0971c77b4.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "property_generator.cuh"
#include <utilities/base_fixture.hpp>
#include <utilities/device_comm_wrapper.hpp>
#include <utilities/mg_utilities.hpp>
#include <utilities/test_graphs.hpp>
#include <utilities/test_utilities.hpp>
#include <utilities/thrust_wrapper.hpp>
#include <prims/transform_reduce_v.cuh>
#include <cugraph/algorithms.hpp>
#include <cugraph/graph_view.hpp>
#include <cugraph/utilities/high_res_timer.hpp>
#include <cuco/hash_functions.cuh>
#include <raft/comms/mpi_comms.hpp>
#include <raft/core/comms.hpp>
#include <raft/core/handle.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/count.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/tuple.h>
#include <gtest/gtest.h>
#include <random>
template <typename vertex_t, typename property_t>
struct v_op_t {
int32_t mod{};
__device__ auto operator()(vertex_t, vertex_t val) const
{
cuco::detail::MurmurHash3_32<vertex_t> hash_func{};
return cugraph::test::detail::make_property_value<property_t>(hash_func(val) % mod);
}
};
template <typename T>
struct result_compare {
static constexpr double threshold_ratio{1e-3};
constexpr auto operator()(const T& t1, const T& t2)
{
if constexpr (std::is_floating_point_v<T>) {
bool passed = (t1 == t2) // when t1 == t2 == 0
||
(std::abs(t1 - t2) < (std::max(std::abs(t1), std::abs(t2)) * threshold_ratio));
return passed;
}
return t1 == t2;
}
};
template <typename... Args>
struct result_compare<thrust::tuple<Args...>> {
static constexpr double threshold_ratio{1e-3};
using Type = thrust::tuple<Args...>;
constexpr auto operator()(const Type& t1, const Type& t2)
{
return equality_impl(t1, t2, std::make_index_sequence<thrust::tuple_size<Type>::value>());
}
private:
template <typename T>
constexpr bool equal(T t1, T t2)
{
if constexpr (std::is_floating_point_v<T>) {
bool passed = (t1 == t2) // when t1 == t2 == 0
||
(std::abs(t1 - t2) < (std::max(std::abs(t1), std::abs(t2)) * threshold_ratio));
return passed;
}
return t1 == t2;
}
template <typename T, std::size_t... I>
constexpr auto equality_impl(T& t1, T& t2, std::index_sequence<I...>)
{
return (... && (equal(thrust::get<I>(t1), thrust::get<I>(t2))));
}
};
struct Prims_Usecase {
bool check_correctness{true};
};
template <typename input_usecase_t>
class Tests_MGTransformReduceV
: public ::testing::TestWithParam<std::tuple<Prims_Usecase, input_usecase_t>> {
public:
Tests_MGTransformReduceV() {}
static void SetUpTestCase() { handle_ = cugraph::test::initialize_mg_handle(); }
static void TearDownTestCase() { handle_.reset(); }
virtual void SetUp() {}
virtual void TearDown() {}
// Compare the results of reduce_if_v primitive and thrust reduce on a single GPU
template <typename vertex_t,
typename edge_t,
typename weight_t,
typename result_t,
bool store_transposed>
void run_current_test(Prims_Usecase const& prims_usecase, input_usecase_t const& input_usecase)
{
HighResTimer hr_timer{};
// 1. create MG graph
if (cugraph::test::g_perf) {
RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement
handle_->get_comms().barrier();
hr_timer.start("MG Construct graph");
}
cugraph::graph_t<vertex_t, edge_t, store_transposed, true> mg_graph(*handle_);
std::optional<rmm::device_uvector<vertex_t>> mg_renumber_map{std::nullopt};
std::tie(mg_graph, std::ignore, mg_renumber_map) =
cugraph::test::construct_graph<vertex_t, edge_t, weight_t, store_transposed, true>(
*handle_, input_usecase, true, true);
if (cugraph::test::g_perf) {
RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement
handle_->get_comms().barrier();
hr_timer.stop();
hr_timer.display_and_clear(std::cout);
}
auto mg_graph_view = mg_graph.view();
// 2. run MG transform reduce
const int hash_bin_count = 5;
const int initial_value = 10;
v_op_t<vertex_t, result_t> v_op{hash_bin_count};
auto property_initial_value =
cugraph::test::generate<vertex_t, result_t>::initial_value(initial_value);
enum class reduction_type_t { PLUS, MINIMUM, MAXIMUM };
std::array<reduction_type_t, 3> reduction_types = {
reduction_type_t::PLUS, reduction_type_t::MINIMUM, reduction_type_t::MAXIMUM};
std::unordered_map<reduction_type_t, result_t> results;
for (auto reduction_type : reduction_types) {
if (cugraph::test::g_perf) {
RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement
handle_->get_comms().barrier();
hr_timer.start("MG transform_reduce_v");
}
switch (reduction_type) {
case reduction_type_t::PLUS:
results[reduction_type] = transform_reduce_v(*handle_,
mg_graph_view,
(*mg_renumber_map).begin(),
v_op,
property_initial_value,
cugraph::reduce_op::plus<result_t>{});
break;
case reduction_type_t::MINIMUM:
results[reduction_type] = transform_reduce_v(*handle_,
mg_graph_view,
(*mg_renumber_map).begin(),
v_op,
property_initial_value,
cugraph::reduce_op::minimum<result_t>{});
break;
case reduction_type_t::MAXIMUM:
results[reduction_type] = transform_reduce_v(*handle_,
mg_graph_view,
(*mg_renumber_map).begin(),
v_op,
property_initial_value,
cugraph::reduce_op::maximum<result_t>{});
break;
default: FAIL() << "should not be reached.";
}
if (cugraph::test::g_perf) {
RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement
handle_->get_comms().barrier();
hr_timer.stop();
hr_timer.display_and_clear(std::cout);
}
}
// 3. compare SG & MG results
if (prims_usecase.check_correctness) {
cugraph::graph_t<vertex_t, edge_t, store_transposed, false> sg_graph(*handle_);
std::tie(sg_graph, std::ignore, std::ignore) = cugraph::test::mg_graph_to_sg_graph(
*handle_,
mg_graph_view,
std::optional<cugraph::edge_property_view_t<edge_t, weight_t const*>>{std::nullopt},
std::make_optional<raft::device_span<vertex_t const>>((*mg_renumber_map).data(),
(*mg_renumber_map).size()),
false);
if (handle_->get_comms().get_rank() == 0) {
auto sg_graph_view = sg_graph.view();
for (auto reduction_type : reduction_types) {
result_t expected_result{};
switch (reduction_type) {
case reduction_type_t::PLUS:
expected_result = transform_reduce_v(
*handle_,
sg_graph_view,
thrust::make_counting_iterator(sg_graph_view.local_vertex_partition_range_first()),
v_op,
property_initial_value,
cugraph::reduce_op::plus<result_t>{});
break;
case reduction_type_t::MINIMUM:
expected_result = transform_reduce_v(
*handle_,
sg_graph_view,
thrust::make_counting_iterator(sg_graph_view.local_vertex_partition_range_first()),
v_op,
property_initial_value,
cugraph::reduce_op::minimum<result_t>{});
break;
case reduction_type_t::MAXIMUM:
expected_result = transform_reduce_v(
*handle_,
sg_graph_view,
thrust::make_counting_iterator(sg_graph_view.local_vertex_partition_range_first()),
v_op,
property_initial_value,
cugraph::reduce_op::maximum<result_t>{});
break;
default: FAIL() << "should not be reached.";
}
result_compare<result_t> compare{};
ASSERT_TRUE(compare(expected_result, results[reduction_type]));
}
}
}
}
private:
static std::unique_ptr<raft::handle_t> handle_;
};
template <typename input_usecase_t>
std::unique_ptr<raft::handle_t> Tests_MGTransformReduceV<input_usecase_t>::handle_ = nullptr;
using Tests_MGTransformReduceV_File = Tests_MGTransformReduceV<cugraph::test::File_Usecase>;
using Tests_MGTransformReduceV_Rmat = Tests_MGTransformReduceV<cugraph::test::Rmat_Usecase>;
TEST_P(Tests_MGTransformReduceV_File, CheckInt32Int32FloatTupleIntFloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, thrust::tuple<int, float>, false>(std::get<0>(param),
std::get<1>(param));
}
TEST_P(Tests_MGTransformReduceV_Rmat, CheckInt32Int32FloatTupleIntFloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, thrust::tuple<int, float>, false>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGTransformReduceV_Rmat, CheckInt32Int64FloatTupleIntFloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int64_t, float, thrust::tuple<int, float>, false>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGTransformReduceV_Rmat, CheckInt64Int64FloatTupleIntFloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int64_t, int64_t, float, thrust::tuple<int, float>, false>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGTransformReduceV_File, CheckInt32Int32FloatTupleIntFloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, thrust::tuple<int, float>, true>(std::get<0>(param),
std::get<1>(param));
}
TEST_P(Tests_MGTransformReduceV_Rmat, CheckInt32Int32FloatTupleIntFloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, thrust::tuple<int, float>, true>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGTransformReduceV_Rmat, CheckInt32Int64FloatTupleIntFloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int64_t, float, thrust::tuple<int, float>, true>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGTransformReduceV_Rmat, CheckInt64Int64FloatTupleIntFloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int64_t, int64_t, float, thrust::tuple<int, float>, true>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGTransformReduceV_File, CheckInt32Int32FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int, false>(std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MGTransformReduceV_Rmat, CheckInt32Int32FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int, false>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGTransformReduceV_Rmat, CheckInt32Int64FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int64_t, float, int, false>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGTransformReduceV_Rmat, CheckInt64Int64FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int64_t, int64_t, float, int, false>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGTransformReduceV_File, CheckInt32Int32FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int, true>(std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MGTransformReduceV_Rmat, CheckInt32Int32FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int, true>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGTransformReduceV_Rmat, CheckInt32Int64FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int64_t, float, int, true>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGTransformReduceV_Rmat, CheckInt64Int64FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int64_t, int64_t, float, int, true>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
INSTANTIATE_TEST_SUITE_P(
file_test,
Tests_MGTransformReduceV_File,
::testing::Combine(
::testing::Values(Prims_Usecase{true}),
::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx"),
cugraph::test::File_Usecase("test/datasets/web-Google.mtx"),
cugraph::test::File_Usecase("test/datasets/ljournal-2008.mtx"),
cugraph::test::File_Usecase("test/datasets/webbase-1M.mtx"))));
INSTANTIATE_TEST_SUITE_P(rmat_small_test,
Tests_MGTransformReduceV_Rmat,
::testing::Combine(::testing::Values(Prims_Usecase{true}),
::testing::Values(cugraph::test::Rmat_Usecase(
10, 16, 0.57, 0.19, 0.19, 0, false, false))));
INSTANTIATE_TEST_SUITE_P(
rmat_benchmark_test, /* note that scale & edge factor can be overridden in benchmarking (with
--gtest_filter to select only the rmat_benchmark_test with a specific
vertex & edge type combination) by command line arguments and do not
include more than one Rmat_Usecase that differ only in scale or edge
factor (to avoid running same benchmarks more than once) */
Tests_MGTransformReduceV_Rmat,
::testing::Combine(
::testing::Values(Prims_Usecase{false}),
::testing::Values(cugraph::test::Rmat_Usecase(20, 32, 0.57, 0.19, 0.19, 0, false, false))));
CUGRAPH_MG_TEST_PROGRAM_MAIN()
|
d453d2554063d6b26b0058179ad9aa4238b0c649.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-20, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "sort_impl.cuh"
#include <cudf/column/column.hpp>
#include <cudf/sorting.hpp>
#include <cudf/detail/sorting.hpp>
#include <cudf/table/table_view.hpp>
namespace cudf {
namespace experimental {
namespace detail {
std::unique_ptr<column> stable_sorted_order(
table_view input,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
return sorted_order<true>(input, column_order, null_precedence, mr, stream);
}
} // namespace detail
std::unique_ptr<column> stable_sorted_order(
table_view input,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr)
{
return detail::stable_sorted_order(input, column_order, null_precedence, mr);
}
} // namespace experimental
} // namespace cudf
| d453d2554063d6b26b0058179ad9aa4238b0c649.cu | /*
* Copyright (c) 2019-20, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "sort_impl.cuh"
#include <cudf/column/column.hpp>
#include <cudf/sorting.hpp>
#include <cudf/detail/sorting.hpp>
#include <cudf/table/table_view.hpp>
namespace cudf {
namespace experimental {
namespace detail {
std::unique_ptr<column> stable_sorted_order(
table_view input,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
return sorted_order<true>(input, column_order, null_precedence, mr, stream);
}
} // namespace detail
std::unique_ptr<column> stable_sorted_order(
table_view input,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr)
{
return detail::stable_sorted_order(input, column_order, null_precedence, mr);
}
} // namespace experimental
} // namespace cudf
|
69fe7a5039ea1acc22f17eb34a26847e73cced97.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017-2019 XGBoost contributors
*
* \brief Utilities for CUDA.
*/
#ifdef XGBOOST_USE_NCCL
#include <rccl.h>
#endif // #ifdef XGBOOST_USE_NCCL
#include <sstream>
#include "device_helpers_hip.cuh"
namespace dh {
#if __CUDACC_VER_MAJOR__ > 9
constexpr std::size_t kUuidLength =
sizeof(std::declval<hipDeviceProp_t>().uuid) / sizeof(uint64_t);
void GetCudaUUID(int world_size, int rank, int device_ord,
xgboost::common::Span<uint64_t, kUuidLength> uuid) {
hipDeviceProp_t prob;
safe_cuda(hipGetDeviceProperties(&prob, device_ord));
std::memcpy(uuid.data(), static_cast<void*>(&(prob.uuid)), sizeof(prob.uuid));
}
std::string PrintUUID(xgboost::common::Span<uint64_t, kUuidLength> uuid) {
std::stringstream ss;
for (auto v : uuid) {
ss << std::hex << v;
}
return ss.str();
}
#endif // __CUDACC_VER_MAJOR__ > 9
void AllReducer::Init(int _device_ordinal) {
#ifdef XGBOOST_USE_NCCL
LOG(DEBUG) << "Running nccl init on: " << __CUDACC_VER_MAJOR__ << "." << __CUDACC_VER_MINOR__;
device_ordinal = _device_ordinal;
int32_t const rank = rabit::GetRank();
#if __CUDACC_VER_MAJOR__ > 9
int32_t const world = rabit::GetWorldSize();
std::vector<uint64_t> uuids(world * kUuidLength, 0);
auto s_uuid = xgboost::common::Span<uint64_t>{uuids.data(), uuids.size()};
auto s_this_uuid = s_uuid.subspan(rank * kUuidLength, kUuidLength);
GetCudaUUID(world, rank, device_ordinal, s_this_uuid);
// No allgather yet.
rabit::Allreduce<rabit::op::Sum, uint64_t>(uuids.data(), uuids.size());
std::vector<xgboost::common::Span<uint64_t, kUuidLength>> converted(world);;
size_t j = 0;
for (size_t i = 0; i < uuids.size(); i += kUuidLength) {
converted[j] =
xgboost::common::Span<uint64_t, kUuidLength>{uuids.data() + i, kUuidLength};
j++;
}
auto iter = std::unique(converted.begin(), converted.end());
auto n_uniques = std::distance(converted.begin(), iter);
CHECK_EQ(n_uniques, world)
<< "Multiple processes within communication group running on same CUDA "
<< "device is not supported";
#endif // __CUDACC_VER_MAJOR__ > 9
id = GetUniqueId();
dh::safe_cuda(hipSetDevice(device_ordinal));
dh::safe_nccl(ncclCommInitRank(&comm, rabit::GetWorldSize(), id, rank));
safe_cuda(hipStreamCreate(&stream));
initialised_ = true;
#endif // XGBOOST_USE_NCCL
}
AllReducer::~AllReducer() {
#ifdef XGBOOST_USE_NCCL
if (initialised_) {
dh::safe_cuda(hipStreamDestroy(stream));
ncclCommDestroy(comm);
}
if (xgboost::ConsoleLogger::ShouldLog(xgboost::ConsoleLogger::LV::kDebug)) {
LOG(CONSOLE) << "======== NCCL Statistics========";
LOG(CONSOLE) << "AllReduce calls: " << allreduce_calls_;
LOG(CONSOLE) << "AllReduce total MiB communicated: " << allreduce_bytes_/1048576;
}
#endif // XGBOOST_USE_NCCL
}
} // namespace dh
| 69fe7a5039ea1acc22f17eb34a26847e73cced97.cu | /*!
* Copyright 2017-2019 XGBoost contributors
*
* \brief Utilities for CUDA.
*/
#ifdef XGBOOST_USE_NCCL
#include <nccl.h>
#endif // #ifdef XGBOOST_USE_NCCL
#include <sstream>
#include "device_helpers.cuh"
namespace dh {
#if __CUDACC_VER_MAJOR__ > 9
constexpr std::size_t kUuidLength =
sizeof(std::declval<cudaDeviceProp>().uuid) / sizeof(uint64_t);
void GetCudaUUID(int world_size, int rank, int device_ord,
xgboost::common::Span<uint64_t, kUuidLength> uuid) {
cudaDeviceProp prob;
safe_cuda(cudaGetDeviceProperties(&prob, device_ord));
std::memcpy(uuid.data(), static_cast<void*>(&(prob.uuid)), sizeof(prob.uuid));
}
std::string PrintUUID(xgboost::common::Span<uint64_t, kUuidLength> uuid) {
std::stringstream ss;
for (auto v : uuid) {
ss << std::hex << v;
}
return ss.str();
}
#endif // __CUDACC_VER_MAJOR__ > 9
void AllReducer::Init(int _device_ordinal) {
#ifdef XGBOOST_USE_NCCL
LOG(DEBUG) << "Running nccl init on: " << __CUDACC_VER_MAJOR__ << "." << __CUDACC_VER_MINOR__;
device_ordinal = _device_ordinal;
int32_t const rank = rabit::GetRank();
#if __CUDACC_VER_MAJOR__ > 9
int32_t const world = rabit::GetWorldSize();
std::vector<uint64_t> uuids(world * kUuidLength, 0);
auto s_uuid = xgboost::common::Span<uint64_t>{uuids.data(), uuids.size()};
auto s_this_uuid = s_uuid.subspan(rank * kUuidLength, kUuidLength);
GetCudaUUID(world, rank, device_ordinal, s_this_uuid);
// No allgather yet.
rabit::Allreduce<rabit::op::Sum, uint64_t>(uuids.data(), uuids.size());
std::vector<xgboost::common::Span<uint64_t, kUuidLength>> converted(world);;
size_t j = 0;
for (size_t i = 0; i < uuids.size(); i += kUuidLength) {
converted[j] =
xgboost::common::Span<uint64_t, kUuidLength>{uuids.data() + i, kUuidLength};
j++;
}
auto iter = std::unique(converted.begin(), converted.end());
auto n_uniques = std::distance(converted.begin(), iter);
CHECK_EQ(n_uniques, world)
<< "Multiple processes within communication group running on same CUDA "
<< "device is not supported";
#endif // __CUDACC_VER_MAJOR__ > 9
id = GetUniqueId();
dh::safe_cuda(cudaSetDevice(device_ordinal));
dh::safe_nccl(ncclCommInitRank(&comm, rabit::GetWorldSize(), id, rank));
safe_cuda(cudaStreamCreate(&stream));
initialised_ = true;
#endif // XGBOOST_USE_NCCL
}
AllReducer::~AllReducer() {
#ifdef XGBOOST_USE_NCCL
if (initialised_) {
dh::safe_cuda(cudaStreamDestroy(stream));
ncclCommDestroy(comm);
}
if (xgboost::ConsoleLogger::ShouldLog(xgboost::ConsoleLogger::LV::kDebug)) {
LOG(CONSOLE) << "======== NCCL Statistics========";
LOG(CONSOLE) << "AllReduce calls: " << allreduce_calls_;
LOG(CONSOLE) << "AllReduce total MiB communicated: " << allreduce_bytes_/1048576;
}
#endif // XGBOOST_USE_NCCL
}
} // namespace dh
|
1cbf51857bd394dcbbcbdc3112bfd255aac62258.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <gtest/gtest.h>
#include <ATen/ATen.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/MemoryAccess.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/core/Array.h>
using namespace at::native;
using namespace at::native::memory;
constexpr int buffer_size = 1024;
__managed__ double4 buffer1[buffer_size];
__managed__ double4 buffer2[buffer_size];
void reset_buffers() {
for (int i = 0; i < buffer_size; i++) {
buffer1[i].x = i;
buffer1[i].y = i + 0.1;
buffer1[i].z = i + 0.2;
buffer1[i].w = i + 0.3;
buffer2[2].x = -i;
buffer2[2].y = -(i + 0.1);
buffer2[2].z = -(i + 0.2);
buffer2[2].w = -(i + 0.3);
}
}
#if defined(USE_ROCM)
TEST(TestLoops, HasSameArgTypes) {
// This is a compile-time unit test. If this file compiles without error,
// then the test passes and during runtime, we just need to return.
using namespace at::native::modern::detail;
using func1_t = int (*)(float, float);
using func2_t = int (*)(bool, float, float);
using func3_t = int (*)(float);
using func4_t = int (*)();
static_assert(has_same_arg_types<func1_t>::value, "func1_t has the same argument types");
static_assert(!has_same_arg_types<func2_t>::value, "func2_t does not have the same argument types");
static_assert(has_same_arg_types<func3_t>::value, "func3_t has the same argument types");
static_assert(has_same_arg_types<func4_t>::value, "func4_t has the same argument types");
return;
}
#endif
TEST(TestVectorizedMemoryAccess, CanVectorizeUpTo) {
char *ptr = reinterpret_cast<char *>(buffer1);
ASSERT_EQ(memory::can_vectorize_up_to<bool>(ptr), 4);
ASSERT_EQ(memory::can_vectorize_up_to<int8_t>(ptr), 4);
ASSERT_EQ(memory::can_vectorize_up_to<int16_t>(ptr), 4);
ASSERT_EQ(memory::can_vectorize_up_to<int>(ptr), 4);
ASSERT_EQ(memory::can_vectorize_up_to<int64_t>(ptr), 4);
ASSERT_EQ(memory::can_vectorize_up_to<bool>(ptr + 1), 1);
ASSERT_EQ(memory::can_vectorize_up_to<int8_t>(ptr + 1), 1);
ASSERT_EQ(memory::can_vectorize_up_to<bool>(ptr + 2), 2);
ASSERT_EQ(memory::can_vectorize_up_to<int8_t>(ptr + 2), 2);
ASSERT_EQ(memory::can_vectorize_up_to<int16_t>(ptr + 2), 1);
ASSERT_EQ(memory::can_vectorize_up_to<bool>(ptr + 4), 4);
ASSERT_EQ(memory::can_vectorize_up_to<int8_t>(ptr + 4), 4);
ASSERT_EQ(memory::can_vectorize_up_to<int16_t>(ptr + 4), 2);
ASSERT_EQ(memory::can_vectorize_up_to<int>(ptr + 4), 1);
ASSERT_EQ(memory::can_vectorize_up_to<bool>(ptr + 8), 4);
ASSERT_EQ(memory::can_vectorize_up_to<int8_t>(ptr + 8), 4);
ASSERT_EQ(memory::can_vectorize_up_to<int16_t>(ptr + 8), 4);
ASSERT_EQ(memory::can_vectorize_up_to<int>(ptr + 8), 2);
ASSERT_EQ(memory::can_vectorize_up_to<int64_t>(ptr + 8), 1);
}
// The following kernel copy values by using vectorized policies
// defined in `ATen/native/cuda/MemoryAccess.cuh`
template <typename scalar_t, int vec_size>
__global__ void vectorized_copy(scalar_t *dst, scalar_t *src) {
static_assert(vec_size <= thread_work_size() && thread_work_size() % vec_size == 0, "Invalid vec_size");
using array_t = at::detail::Array<char*, 2>;
array_t data;
data[0] = reinterpret_cast<char *>(dst);
data[1] = reinterpret_cast<char *>(src);
int idx = blockIdx.x;
using vectorized = policies::vectorized<vec_size, array_t>;
auto policy = vectorized(data);
scalar_t buf[thread_work_size()];
#if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION < 11000
// This fails only on CUDA 10.x, remove this after CUDA 10.x support is dropped
scalar_t *buf_ = &buf[0];
auto accessor = [&](int index) -> scalar_t & { return buf_[index]; };
#else
auto accessor = [&](int index) -> scalar_t & { return buf[index]; };
#endif
policy.load_single_arg(accessor, src + block_work_size() * blockIdx.x);
policy.store(buf, idx);
}
TEST(TestVectorizedMemoryAccess, CopyKernel) {
if (!at::cuda::is_available()) {
return;
}
double *b1 = reinterpret_cast<double *>(buffer1);
double *b2 = reinterpret_cast<double *>(buffer2);
// vec4 copy
reset_buffers();
hipDeviceSynchronize();
constexpr int total_work_size = buffer_size * 4;
hipLaunchKernelGGL(( vectorized_copy<double, 4>), dim3(total_work_size / block_work_size()) , dim3(num_threads()), 0, 0, b2, b1);
C10_HIP_KERNEL_LAUNCH_CHECK();
ASSERT_EQ(hipSuccess, hipDeviceSynchronize());
for (int i = 0; i < 1024; i++) {
ASSERT_EQ(buffer1[i].x, buffer2[i].x);
ASSERT_EQ(buffer1[i].y, buffer2[i].y);
ASSERT_EQ(buffer1[i].z, buffer2[i].z);
ASSERT_EQ(buffer1[i].w, buffer2[i].w);
}
// vec2 copy
reset_buffers();
hipDeviceSynchronize();
hipLaunchKernelGGL(( vectorized_copy<double, 2>), dim3(total_work_size / block_work_size()) , dim3(num_threads()), 0, 0, b2, b1);
C10_HIP_KERNEL_LAUNCH_CHECK();
ASSERT_EQ(hipSuccess, hipDeviceSynchronize());
for (int i = 0; i < 1024; i++) {
ASSERT_EQ(buffer1[i].x, buffer2[i].x);
ASSERT_EQ(buffer1[i].y, buffer2[i].y);
ASSERT_EQ(buffer1[i].z, buffer2[i].z);
ASSERT_EQ(buffer1[i].w, buffer2[i].w);
}
// vec1 copy
reset_buffers();
hipDeviceSynchronize();
hipLaunchKernelGGL(( vectorized_copy<double, 1>), dim3(total_work_size / block_work_size()) , dim3(num_threads()), 0, 0, b2, b1);
C10_HIP_KERNEL_LAUNCH_CHECK();
ASSERT_EQ(hipSuccess, hipDeviceSynchronize());
for (int i = 0; i < 1024; i++) {
ASSERT_EQ(buffer1[i].x, buffer2[i].x);
ASSERT_EQ(buffer1[i].y, buffer2[i].y);
ASSERT_EQ(buffer1[i].z, buffer2[i].z);
ASSERT_EQ(buffer1[i].w, buffer2[i].w);
}
// Skipping this part until https://github.com/pytorch/pytorch/issues/51863 is resolved
#if 0
// unaligned
for (int i = 0; i < 16; i++) {
for (int j = 0; j < 16; j++) {
b1 = reinterpret_cast<double *>(reinterpret_cast<char *>(buffer1) + i);
b2 = reinterpret_cast<double *>(reinterpret_cast<char *>(buffer2) + j);
hipGetLastError();
hipDeviceSynchronize();
hipLaunchKernelGGL(( vectorized_copy<double, 4>), dim3(1), dim3(num_threads()), 0, 0, b2, b1);
hipDeviceSynchronize();
auto err = hipGetLastError();
if (i % 16 == 0 && j % 16 == 0) {
ASSERT_EQ(err, hipSuccess);
} else {
ASSERT_EQ(err, hipErrorMisalignedAddress);
}
}
}
#endif
}
| 1cbf51857bd394dcbbcbdc3112bfd255aac62258.cu | #include <gtest/gtest.h>
#include <ATen/ATen.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/MemoryAccess.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/core/Array.h>
using namespace at::native;
using namespace at::native::memory;
constexpr int buffer_size = 1024;
__managed__ double4 buffer1[buffer_size];
__managed__ double4 buffer2[buffer_size];
void reset_buffers() {
for (int i = 0; i < buffer_size; i++) {
buffer1[i].x = i;
buffer1[i].y = i + 0.1;
buffer1[i].z = i + 0.2;
buffer1[i].w = i + 0.3;
buffer2[2].x = -i;
buffer2[2].y = -(i + 0.1);
buffer2[2].z = -(i + 0.2);
buffer2[2].w = -(i + 0.3);
}
}
#if defined(USE_ROCM)
TEST(TestLoops, HasSameArgTypes) {
// This is a compile-time unit test. If this file compiles without error,
// then the test passes and during runtime, we just need to return.
using namespace at::native::modern::detail;
using func1_t = int (*)(float, float);
using func2_t = int (*)(bool, float, float);
using func3_t = int (*)(float);
using func4_t = int (*)();
static_assert(has_same_arg_types<func1_t>::value, "func1_t has the same argument types");
static_assert(!has_same_arg_types<func2_t>::value, "func2_t does not have the same argument types");
static_assert(has_same_arg_types<func3_t>::value, "func3_t has the same argument types");
static_assert(has_same_arg_types<func4_t>::value, "func4_t has the same argument types");
return;
}
#endif
TEST(TestVectorizedMemoryAccess, CanVectorizeUpTo) {
char *ptr = reinterpret_cast<char *>(buffer1);
ASSERT_EQ(memory::can_vectorize_up_to<bool>(ptr), 4);
ASSERT_EQ(memory::can_vectorize_up_to<int8_t>(ptr), 4);
ASSERT_EQ(memory::can_vectorize_up_to<int16_t>(ptr), 4);
ASSERT_EQ(memory::can_vectorize_up_to<int>(ptr), 4);
ASSERT_EQ(memory::can_vectorize_up_to<int64_t>(ptr), 4);
ASSERT_EQ(memory::can_vectorize_up_to<bool>(ptr + 1), 1);
ASSERT_EQ(memory::can_vectorize_up_to<int8_t>(ptr + 1), 1);
ASSERT_EQ(memory::can_vectorize_up_to<bool>(ptr + 2), 2);
ASSERT_EQ(memory::can_vectorize_up_to<int8_t>(ptr + 2), 2);
ASSERT_EQ(memory::can_vectorize_up_to<int16_t>(ptr + 2), 1);
ASSERT_EQ(memory::can_vectorize_up_to<bool>(ptr + 4), 4);
ASSERT_EQ(memory::can_vectorize_up_to<int8_t>(ptr + 4), 4);
ASSERT_EQ(memory::can_vectorize_up_to<int16_t>(ptr + 4), 2);
ASSERT_EQ(memory::can_vectorize_up_to<int>(ptr + 4), 1);
ASSERT_EQ(memory::can_vectorize_up_to<bool>(ptr + 8), 4);
ASSERT_EQ(memory::can_vectorize_up_to<int8_t>(ptr + 8), 4);
ASSERT_EQ(memory::can_vectorize_up_to<int16_t>(ptr + 8), 4);
ASSERT_EQ(memory::can_vectorize_up_to<int>(ptr + 8), 2);
ASSERT_EQ(memory::can_vectorize_up_to<int64_t>(ptr + 8), 1);
}
// The following kernel copy values by using vectorized policies
// defined in `ATen/native/cuda/MemoryAccess.cuh`
template <typename scalar_t, int vec_size>
__global__ void vectorized_copy(scalar_t *dst, scalar_t *src) {
static_assert(vec_size <= thread_work_size() && thread_work_size() % vec_size == 0, "Invalid vec_size");
using array_t = at::detail::Array<char*, 2>;
array_t data;
data[0] = reinterpret_cast<char *>(dst);
data[1] = reinterpret_cast<char *>(src);
int idx = blockIdx.x;
using vectorized = policies::vectorized<vec_size, array_t>;
auto policy = vectorized(data);
scalar_t buf[thread_work_size()];
#if defined(CUDA_VERSION) && CUDA_VERSION < 11000
// This fails only on CUDA 10.x, remove this after CUDA 10.x support is dropped
scalar_t *buf_ = &buf[0];
auto accessor = [&](int index) -> scalar_t & { return buf_[index]; };
#else
auto accessor = [&](int index) -> scalar_t & { return buf[index]; };
#endif
policy.load_single_arg(accessor, src + block_work_size() * blockIdx.x);
policy.store(buf, idx);
}
TEST(TestVectorizedMemoryAccess, CopyKernel) {
if (!at::cuda::is_available()) {
return;
}
double *b1 = reinterpret_cast<double *>(buffer1);
double *b2 = reinterpret_cast<double *>(buffer2);
// vec4 copy
reset_buffers();
cudaDeviceSynchronize();
constexpr int total_work_size = buffer_size * 4;
vectorized_copy<double, 4><<<total_work_size / block_work_size() , num_threads()>>>(b2, b1);
C10_CUDA_KERNEL_LAUNCH_CHECK();
ASSERT_EQ(cudaSuccess, cudaDeviceSynchronize());
for (int i = 0; i < 1024; i++) {
ASSERT_EQ(buffer1[i].x, buffer2[i].x);
ASSERT_EQ(buffer1[i].y, buffer2[i].y);
ASSERT_EQ(buffer1[i].z, buffer2[i].z);
ASSERT_EQ(buffer1[i].w, buffer2[i].w);
}
// vec2 copy
reset_buffers();
cudaDeviceSynchronize();
vectorized_copy<double, 2><<<total_work_size / block_work_size() , num_threads()>>>(b2, b1);
C10_CUDA_KERNEL_LAUNCH_CHECK();
ASSERT_EQ(cudaSuccess, cudaDeviceSynchronize());
for (int i = 0; i < 1024; i++) {
ASSERT_EQ(buffer1[i].x, buffer2[i].x);
ASSERT_EQ(buffer1[i].y, buffer2[i].y);
ASSERT_EQ(buffer1[i].z, buffer2[i].z);
ASSERT_EQ(buffer1[i].w, buffer2[i].w);
}
// vec1 copy
reset_buffers();
cudaDeviceSynchronize();
vectorized_copy<double, 1><<<total_work_size / block_work_size() , num_threads()>>>(b2, b1);
C10_CUDA_KERNEL_LAUNCH_CHECK();
ASSERT_EQ(cudaSuccess, cudaDeviceSynchronize());
for (int i = 0; i < 1024; i++) {
ASSERT_EQ(buffer1[i].x, buffer2[i].x);
ASSERT_EQ(buffer1[i].y, buffer2[i].y);
ASSERT_EQ(buffer1[i].z, buffer2[i].z);
ASSERT_EQ(buffer1[i].w, buffer2[i].w);
}
// Skipping this part until https://github.com/pytorch/pytorch/issues/51863 is resolved
#if 0
// unaligned
for (int i = 0; i < 16; i++) {
for (int j = 0; j < 16; j++) {
b1 = reinterpret_cast<double *>(reinterpret_cast<char *>(buffer1) + i);
b2 = reinterpret_cast<double *>(reinterpret_cast<char *>(buffer2) + j);
cudaGetLastError();
cudaDeviceSynchronize();
vectorized_copy<double, 4><<<1, num_threads()>>>(b2, b1);
cudaDeviceSynchronize();
auto err = cudaGetLastError();
if (i % 16 == 0 && j % 16 == 0) {
ASSERT_EQ(err, cudaSuccess);
} else {
ASSERT_EQ(err, cudaErrorMisalignedAddress);
}
}
}
#endif
}
|
755bebd20676cade57204cf6c37f20a7b9e405c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color is
//specified by how much Red, Grean and Blue is in it. The 'A' stands for Alpha
//and is used for transparency, it will be ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte. Since we
//are using one byte for each color there are 256 different possible values for
//each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel which
//is one byte in size.
//To convert an image from color to grayscale one simple method is to set the
//intensity to the average of the RGB channels. But we will use a more
//sophisticated method that takes into account how the eye perceives color and
//weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue. The
//NTSC (National Television System Committee) recommends the following formula
//for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are single
//precision floating point constants and not double precision constants.
//You should fill in the kernel as well as set the block and grid sizes so that
//the entire image is processed.
#include "reference_calc.cpp"
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset.
//
//NOTE: Be careful not to try to access memory that is outside the bounds of
//the image. You'll want code that performs the following check before accessing
//GPU memory:
//
int x=blockIdx.x;
int y=threadIdx.y;
if ( x >= numRows || y >= numCols )
{
return;
}
uchar4 rgba = rgbaImage[x * numCols + y];
float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
greyImage[x * numCols + y] = channelSum;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage,
uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage,
size_t numRows,
size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(1, numCols, 1); //TODO
const dim3 gridSize(numRows, 1, 1); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage,
d_greyImage,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
} | 755bebd20676cade57204cf6c37f20a7b9e405c9.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color is
//specified by how much Red, Grean and Blue is in it. The 'A' stands for Alpha
//and is used for transparency, it will be ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte. Since we
//are using one byte for each color there are 256 different possible values for
//each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel which
//is one byte in size.
//To convert an image from color to grayscale one simple method is to set the
//intensity to the average of the RGB channels. But we will use a more
//sophisticated method that takes into account how the eye perceives color and
//weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue. The
//NTSC (National Television System Committee) recommends the following formula
//for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are single
//precision floating point constants and not double precision constants.
//You should fill in the kernel as well as set the block and grid sizes so that
//the entire image is processed.
#include "reference_calc.cpp"
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset.
//
//NOTE: Be careful not to try to access memory that is outside the bounds of
//the image. You'll want code that performs the following check before accessing
//GPU memory:
//
int x=blockIdx.x;
int y=threadIdx.y;
if ( x >= numRows || y >= numCols )
{
return;
}
uchar4 rgba = rgbaImage[x * numCols + y];
float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
greyImage[x * numCols + y] = channelSum;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage,
uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage,
size_t numRows,
size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(1, numCols, 1); //TODO
const dim3 gridSize(numRows, 1, 1); //TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage,
d_greyImage,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
} |
867bcaa3121fefdfac9336d99343838d7ea38b62.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "delta_infer/custom_ops/platform/CUDA/kernels.h"
namespace tensorflow {
#define FINAL_MASK 0xffffffff
template <typename T>
__inline__ __device__ T warpReduceSum(T val) {
for(int mask = 16; mask > 0; mask >>= 1)
val += __shfl_xor_sync(FINAL_MASK, val, mask, 32);
return val;
}
/* Calculate the sum of all elements in a block */
template <typename T>
__inline__ __device__ T blockReduceSum(T val) {
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceSum<T>(val);
if(lane == 0)
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)(0.0f);
val = warpReduceSum<T>(val);
return val;
}
template <typename T>
__inline__ __device__ T warpReduceMax(T val) {
for(int mask = 16; mask > 0; mask >>= 1)
val = max(val, __shfl_xor_sync(FINAL_MASK, val, mask, 32));
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__inline__ __device__ T blockReduceMax(T val) {
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f; // in-warp idx
int wid = threadIdx.x >> 5; // warp idx
val = warpReduceMax(val); // get maxx in each warp
if(lane == 0) // record in-warp maxx by warp Idx
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : -1e20f;
val = warpReduceMax(val);
return val;
}
template<typename T>
__global__ void transpose(T* src, T* dst, const int batch_size, const int seq_len, const int head_num, const int size_per_head) {
int batch_id = blockIdx.x / (head_num * seq_len);
int seq_id = blockIdx.x % seq_len;
int head_id = (blockIdx.x % (head_num * seq_len))/ seq_len;
dst[batch_id * (head_num * seq_len * size_per_head) + seq_id * head_num * size_per_head
+ head_id * size_per_head + threadIdx.x] = src[blockIdx.x * size_per_head + threadIdx.x];
}
template <typename T>
__global__
void softmax_kernel(T* qk_buf, const int* attr_mask, const int batch_size, const int head_num, const int seq_len,
const T scaler)
{
int batch_id = blockIdx.x / head_num;
int qk_offset = blockIdx.x * seq_len * seq_len;
int mask_offset = batch_id * seq_len * seq_len;
__shared__ float s_sum, s_max;
for(int i = 0; i < seq_len; ++i)
{
float qk = threadIdx.x < seq_len ? (float)qk_buf[threadIdx.x + qk_offset] : 0.0f;
float mask_val = threadIdx.x < seq_len ? (float)attr_mask[threadIdx.x + mask_offset] : 0.0f;
mask_val = (1.0f - mask_val) * -10000.0f;
float tmp = threadIdx.x < seq_len ? (float)(qk * (float)scaler + mask_val): -1e20f;
float max_val = blockReduceMax<float>(tmp);
if(threadIdx.x == 0)
s_max = max_val;
__syncthreads();
qk = threadIdx.x < seq_len ? __expf(tmp - s_max) : 0.0f;
float sum_val = blockReduceSum<float>(qk);
if(threadIdx.x == 0)
{
s_sum = sum_val + 1e-30f;
}
__syncthreads();
if(threadIdx.x < seq_len)
qk_buf[threadIdx.x + qk_offset] = (T)(qk / s_sum);
qk_offset += seq_len;
mask_offset += seq_len;
}
}
template <typename T>
__global__
void softmax_kernel_v2(T* qk_buf, const int* attr_mask, const int batch_size, const int head_num,
const int seq_len, const float scaler)
{
int batch_id = blockIdx.x / head_num / seq_len;
int seq_id = blockIdx.x % seq_len;
int qk_offset = blockIdx.x * seq_len;
int mask_offset = batch_id * seq_len * seq_len + seq_id * seq_len;
__shared__ float s_sum, s_max;
float qk = threadIdx.x < seq_len ? (float)qk_buf[threadIdx.x + qk_offset] : 0.0f;
float mask_val = threadIdx.x < seq_len ? (float)attr_mask[threadIdx.x + mask_offset] : 0.0f;
mask_val = (1.0f - mask_val) * -10000.0f;
float tmp = threadIdx.x < seq_len ? (float)(qk * (float)scaler + mask_val) : -1e20f;
float max_val = blockReduceMax<float>(tmp);
if(threadIdx.x == 0)
s_max = max_val;
__syncthreads();
float qk_tmp = threadIdx.x < seq_len ? __expf((float)(tmp - s_max)) : 0.0f;
float sum_val = blockReduceSum<float>(qk_tmp);
if(threadIdx.x == 0)
{
s_sum = sum_val + 1e-30f;
}
__syncthreads();
if(threadIdx.x < seq_len)
qk_buf[threadIdx.x + qk_offset] = (T)(qk_tmp / s_sum);
}
template<typename T>
__global__ void add_QKV_bias(T* Q, const T* bias_Q,
T* K, const T* bias_K,
T* V, const T* bias_V,
T* q_buf, T* k_buf, T* v_buf,
const int batch_size, const int seq_len, const int head_num, const int size_per_head,
const int word_per_block) {
T* data_ptr;
T* buf_ptr;
const T* bias_ptr;
int m = batch_size * seq_len;
int n = head_num * size_per_head;
int qkv_id = blockIdx.x * word_per_block / m;
int row_offset = (blockIdx.x * word_per_block % m) * n;
if(qkv_id == 0) {
data_ptr = Q + row_offset;
buf_ptr = q_buf;
bias_ptr = bias_Q;
} else if(qkv_id == 1) {
data_ptr = K + row_offset;
buf_ptr = k_buf;
bias_ptr = bias_K;
} else {
data_ptr = V + row_offset;
buf_ptr = v_buf;
bias_ptr = bias_V;
}
#if 1
// add bias and transpose
int batch_id = (blockIdx.x * word_per_block % m) / seq_len;
int head_id = threadIdx.x / size_per_head;
int id_in_head = threadIdx.x % size_per_head;
int word_start_id = (blockIdx.x * word_per_block) % seq_len;
T bias = __ldg(&bias_ptr[threadIdx.x]);
#pragma unroll
for(int i = word_start_id; i < word_start_id + word_per_block; ++i) {
T tmp = data_ptr[threadIdx.x] + bias;
int target_id = batch_id * (seq_len * head_num * size_per_head) + head_id * seq_len * size_per_head +
i * size_per_head + id_in_head;
buf_ptr[target_id] = tmp;
data_ptr += n;
}
#else
// error
T bias = __ldg(&bias_ptr[threadIdx.x]);
for(int i = 0; i < word_per_block; i++) {
T tmp = data_ptr[threadIdx.x] + bias;
int target_id = row_offset + i*n + threadIdx.x;
buf_ptr[target_id] = tmp;
data_ptr += n;
}
#endif
}
template<typename T>
void MultiHeadAtentionLauncher(DType<T>* query,
DType<T>* key,
DType<T>* value,
DType<T>* q_buf,
DType<T>* k_buf,
DType<T>* v_buf,
DType<T>* qk_buf,
DType<T>* transpose_dst,
DType<T>* attr_out_buf,
const DType<T> scaler,
int batch_size,
int from_seq_len,
int head_num,
int size_per_head,
const DType<T>* bias_Q,
const DType<T>* bias_K,
const DType<T>* bias_V,
const DType<int>* mask,
hipblasHandle_t cublas_handle,
hipStream_t stream) {
int m = batch_size * from_seq_len;
int k = head_num * size_per_head;
const int word_per_block = 1;
assert(k <= 1024);
assert(m / word_per_block * 3 <= 65536);
dim3 grid(m / word_per_block * 3);
dim3 block(k);
DELTA_SCOPE{
auto sum = CheckSum<GPUDevice, float>(query);
printf("didi query ck sum: %lf \n", sum);
sum = CheckSum<GPUDevice, float>(key);
printf("didi key ck sum: %lf \n", sum);
sum = CheckSum<GPUDevice, float>(value);
printf("didi value ck sum: %lf \n", sum);
};
hipLaunchKernelGGL(( add_QKV_bias<DType<T> >), dim3(grid), dim3(block), 0, stream, query, bias_Q,
key, bias_K,
value, bias_V,
q_buf, k_buf, v_buf,
batch_size, from_seq_len, head_num, size_per_head,
word_per_block);
//cuda(PeekAtLastError());
//cuda(DeviceSynchronize());
DELTA_SCOPE{
auto sum = CheckSum<GPUDevice, float>(q_buf);
printf("didi q_buf ck sum: %lf \n", sum);
sum = CheckSum<GPUDevice, float>(k_buf);
printf("didi k_buf ck sum: %lf \n", sum);
sum = CheckSum<GPUDevice, float>(v_buf);
printf("didi v_buf ck sum: %lf \n", sum);
};
#if 1
T alpha = 1.0f, beta = 0.0f;
typedef DeltaTraits<GPUDevice, T> traits;
/// get _qk_buf[batch_size, head_num, from_seq_len, from_seq_len]
#if (CUDART_VERSION >= 10000)
cublas(GemmStridedBatchedEx(cublas_handle,
HIPBLAS_OP_T, HIPBLAS_OP_N,
from_seq_len, from_seq_len, size_per_head,
&alpha,
k_buf, traits::ComputeType, size_per_head, from_seq_len * size_per_head,
q_buf, traits::ComputeType, size_per_head, from_seq_len * size_per_head,
&beta,
qk_buf, traits::ComputeType, from_seq_len, from_seq_len * from_seq_len,
batch_size * head_num,
traits::ComputeType,
*static_cast<delta::CublasGemmAlgo*>(delta::Config::Instance()["3"].get())));
#else
cublas(SgemmStridedBatched(cublas_handle,
HIPBLAS_OP_T, HIPBLAS_OP_N,
from_seq_len, from_seq_len, size_per_head,
&alpha,
k_buf, size_per_head, from_seq_len * size_per_head,
q_buf, size_per_head, from_seq_len * size_per_head,
&beta,
qk_buf, from_seq_len, from_seq_len * from_seq_len,
batch_size * head_num));
#endif
if(from_seq_len <= 32)
block.x = 32;
else if(from_seq_len > 32 && from_seq_len <= 64)
block.x = 64;
else if(from_seq_len > 64 && from_seq_len <= 128)
block.x = 128;
else if(from_seq_len > 128 && from_seq_len <= 256)
block.x = 256;
else if(from_seq_len > 256 && from_seq_len <= 512)
block.x = 512;
else
block.x = 1024;
DELTA_SCOPE{
auto sum = CheckSum<GPUDevice, float>(qk_buf);
printf("didi qk_buf before softmax ck sum: %lf\n", sum);
};
if(batch_size * head_num <= 120) {
grid.x = batch_size * head_num * from_seq_len;
hipLaunchKernelGGL(( softmax_kernel_v2<DType<T> >), dim3(grid), dim3(block), 0, stream, qk_buf, mask, batch_size, head_num, from_seq_len, scaler);
} else {
grid.x = batch_size * head_num;
hipLaunchKernelGGL(( softmax_kernel<DType<T> >), dim3(grid), dim3(block), 0, stream, qk_buf, mask, batch_size, head_num, from_seq_len, scaler);
}
DELTA_SCOPE {
auto sum = CheckSum<GPUDevice, float>(qk_buf);
printf("didi qk_buf after softmax ck sum: %lf\n", sum);
};
#if (CUDART_VERSION >= 10000) /// cuda > 10.0
cublas(GemmStridedBatchedEx(cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
size_per_head, from_seq_len, from_seq_len,
&alpha,
v_buf, traits::ComputeType, size_per_head, from_seq_len * size_per_head,
qk_buf, traits::ComputeType, from_seq_len, from_seq_len * from_seq_len,
&beta,
transpose_dst, traits::ComputeType, size_per_head, from_seq_len * size_per_head,
batch_size * head_num,
traits::ComputeType,
*static_cast<delta::CublasGemmAlgo*>(delta::Config::Instance()["4"].get())));
#else
cublas(SgemmStridedBatched(cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
size_per_head, from_seq_len, from_seq_len,
&alpha,
v_buf, size_per_head, from_seq_len * size_per_head,
qk_buf, from_seq_len, from_seq_len * from_seq_len,
&beta,
transpose_dst, size_per_head, from_seq_len * size_per_head,
batch_size * head_num));
#endif
DELTA_SCOPE{
auto sum = CheckSum<GPUDevice, float>(transpose_dst);
printf("didi transpose_dst before softmax ck sum: %lf\n", sum);
};
const int seq_per_block = 1;
grid.x = batch_size * head_num * from_seq_len / seq_per_block;
block.x = seq_per_block * size_per_head;
hipLaunchKernelGGL(( transpose<DType<T> >), dim3(grid), dim3(block), 0, stream, transpose_dst,
attr_out_buf,
batch_size,
from_seq_len,
head_num,
size_per_head);
DELTA_SCOPE{
auto sum = CheckSum<GPUDevice, float>(transpose_dst);
printf("didi transpose_dst after softmax ck sum: %lf\n", sum);
};
#endif
}
template void MultiHeadAtentionLauncher<float>(DType<float>* query,
DType<float>* key,
DType<float>* value,
DType<float>* q_buf,
DType<float>* k_buf,
DType<float>* v_buf,
DType<float>* qk_buf,
DType<float>* transpose_dst,
DType<float>* attr_out_buf,
const DType<float> scaler,
int batch_size,
int from_seq_len,
int head_num,
int size_per_head,
const DType<float>* bias_Q,
const DType<float>* bias_K,
const DType<float>* bias_V,
const DType<int>* mask,
hipblasHandle_t cublas_handle,
hipStream_t stream);
template<typename T>
__global__ void transpose_head_and_seq_kernel(T* data, int sqlen, int n_head, int d_head, T* out) {
extern __shared__ T smem[];
int seq_id = blockIdx.x / n_head;
int head_id = blockIdx.x % n_head;
smem[threadIdx.x] = data[blockIdx.x*d_head + threadIdx.x];
__syncthreads();
out[head_id * sqlen * d_head + seq_id * d_head + threadIdx.x] = smem[threadIdx.x];
}
template<typename T>
void transpose_head_and_seq(DType<T>* data, int sqlen, int n_head, int d_head, DType<T>* out, hipStream_t stream) {
dim3 grid(sqlen * n_head);
dim3 block(d_head);
hipLaunchKernelGGL(( transpose_head_and_seq_kernel), dim3(grid), dim3(block), d_head*sizeof(T), stream, data, sqlen, n_head, d_head, out);
}
template void transpose_head_and_seq<float>(DType<float>* data, int sqlen, int n_head, int d_head, DType<float>* out, hipStream_t stream);
template<typename T>
__global__ void transpose_head_num_and_seq_kernel(T* attn_vec,
int sqlen, int bsz, int n_head, int d_head,
T* attn_vec_out) {
extern __shared__ T smem[];
int head_offset = blockIdx.x * d_head;
int bsz_id = blockIdx.x / (n_head * sqlen);
int head_id = blockIdx.x % (n_head * sqlen) / sqlen;
int seq_id = blockIdx.x % sqlen;
smem[threadIdx.x] = attn_vec[head_offset + threadIdx.x];
__syncthreads();
int head_len = n_head * d_head;
int batch_len = head_len * bsz;
attn_vec_out[seq_id * batch_len + bsz_id * head_len + head_id * d_head + threadIdx.x] = smem[threadIdx.x];
}
template<typename T>
void transpose_head_num_and_seq(DType<T>* attn_vec,
int sqlen, int bsz, int n_head, int d_head,
DType<T>* attn_vec_out,
hipStream_t stream) {
dim3 grid(sqlen * bsz * n_head);
dim3 block(d_head);
hipLaunchKernelGGL(( transpose_head_num_and_seq_kernel), dim3(grid), dim3(block), d_head*sizeof(T), stream, attn_vec, sqlen, bsz, n_head, d_head, attn_vec_out);
}
template void transpose_head_num_and_seq<float>(DType<float>* attn_vec,
int sqlen, int bsz, int n_head, int d_head,
DType<float>* attn_vec_out,
hipStream_t stream);
template<typename T>
__global__ void add_bias_and_split_kernel(T* w_heads,
int mlen, int sqlen, int bsz, int n, int n_head, int d_head,
const T* r_w_bias,
const T* r_r_bias,
T* rw_head_q,
T* rr_head_q,
T* w_head_k,
T* w_head_v) {
extern __shared__ T swap_space[];
int start_idx = 0;
if(blockIdx.y == 0) {
// split and add bias
int block_id = blockIdx.x - mlen * bsz;
if(block_id >= 0) {
start_idx = blockIdx.x * n * 3;
for(int i=threadIdx.x; i<n; i+=blockDim.x) {
swap_space[i] = w_heads[start_idx + i];
}
__syncthreads();
int seq_id = block_id / bsz;
int bsz_id = block_id % bsz;
for(int i = 0; i < n_head; i++) {
for(int j=threadIdx.x; j<d_head; j+=blockDim.x) {
T bias = __ldg(&r_w_bias[i*d_head + j]);
// from [seq, bsz, n_head, d_head] to [bsz , n_head, seq, d_head]
int out_id = bsz_id * n_head * sqlen * d_head + i * sqlen * d_head + seq_id * d_head + j;
rw_head_q[out_id] = swap_space[i*d_head + j] + bias;
bias = __ldg(&r_r_bias[i*d_head + j]);
rr_head_q[out_id] = swap_space[i*d_head + j] + bias;
}
}
}
} else if(blockIdx.y == 1) {
// only split
start_idx = blockIdx.x * n * 3 + n;
for(int i=threadIdx.x; i<n; i+=blockDim.x) {
swap_space[i] = w_heads[start_idx + i];
}
__syncthreads();
int seq_id = blockIdx.x / bsz;
int bsz_id = blockIdx.x % bsz;
for(int i = 0; i < n_head; i++) {
for(int j=threadIdx.x; j < d_head; j+=blockDim.x) {
// from [seq, bsz, n_head, d_head] to [bsz , n_head, seq, d_head]
int out_id = bsz_id * n_head * (mlen + sqlen) * d_head + i * (mlen + sqlen) * d_head + seq_id * d_head + j;
w_head_k[out_id] = swap_space[i*d_head + j];;
}
}
} else {
// only split
start_idx = blockIdx.x * n * 3 + 2 * n;
for(int i=threadIdx.x; i<n; i+=blockDim.x) {
swap_space[i] = w_heads[start_idx + i];
}
__syncthreads();
int seq_id = blockIdx.x / bsz;
int bsz_id = blockIdx.x % bsz;
for(int i = 0; i < n_head; i++) {
for(int j=threadIdx.x; j < d_head; j+=blockDim.x) {
// from [seq, bsz, n_head, d_head] to [bsz , n_head, seq, d_head]
int out_id = bsz_id * n_head * (mlen + sqlen) * d_head + i * (mlen + sqlen) * d_head + seq_id * d_head + j;
w_head_v[out_id] = swap_space[i*d_head + j];;
}
}
}
}
template <typename T>
void add_bias_and_split(DType<T>* w_heads,
int mlen, int sqlen, int bsz, int n_head, int d_head,
const DType<T>* r_w_bias,
const DType<T>* r_r_bias,
DType<T>* rw_head_q,
DType<T>* rr_head_q,
DType<T>* w_head_k,
DType<T>* w_head_v,
hipStream_t stream) {
int m = (mlen + sqlen) * bsz;
int n = n_head * d_head; // x 3
assert(n <= 1024);
assert(m * 3 <= 65536);
dim3 grid(m, 3);
dim3 block(128);
hipLaunchKernelGGL(( add_bias_and_split_kernel), dim3(grid), dim3(block), n*sizeof(T), stream, w_heads,
mlen, sqlen, bsz, n, n_head, d_head,
r_w_bias,
r_r_bias,
rw_head_q,
rr_head_q,
w_head_k,
w_head_v);
};
template void add_bias_and_split<float>(DType<float>* w_heads,
int mlen, int sqlen, int bsz, int n_head, int d_head,
const DType<float>* r_w_bias,
const DType<float>* r_r_bias,
DType<float>* rw_head_q,
DType<float>* rr_head_q,
DType<float>* w_head_k,
DType<float>* w_head_v,
hipStream_t stream);
#if 0
template<typename T>
__global__ void attn_prob_softmax_kernel(T* ac,
T* bd,
const T* attn_mask,
const int sqlen,
const int klen,
const float scaler) {
int input_offset = blockIdx.x * klen;
int mask_offset = blockIdx.x % sqlen * klen;
__shared__ float s_sum;
float ac_val = threadIdx.x < klen ? (float)ac[threadIdx.x + input_offset] : 0.0f;
float bd_val = threadIdx.x < klen ? (float)bd[threadIdx.x + input_offset] : 0.0f;
float mask_val = threadIdx.x < klen ? (float)attn_mask[threadIdx.x + mask_offset] : 0.0f;
float tmp = threadIdx.x < klen ? (ac_val + bd_val) * scaler * (1 - mask_val) - 1e30f * mask_val : 1e-30f;
tmp = threadIdx.x < klen ? __expf((float)(tmp)) : 0.0f;
float sum_val = blockReduceSum<float>(tmp);
if(threadIdx.x == 0) {
s_sum = sum_val + 1e-6f;
}
__syncthreads();
if(threadIdx.x < klen) {
ac[threadIdx.x + input_offset] = (T)(tmp / s_sum);
}
}
#else
template<typename T>
__global__ void attn_prob_softmax_kernel(T* ac,
T* bd,
const T* attn_mask,
const int sqlen,
const int klen,
const float scaler) {
int input_offset = blockIdx.x * klen;
int mask_offset = blockIdx.x % sqlen * klen;
__shared__ float s_sum, s_max;
float ac_val = threadIdx.x < klen ? (float)ac[threadIdx.x + input_offset] : 0.0f;
float bd_val = threadIdx.x < klen ? (float)bd[threadIdx.x + input_offset] : 0.0f;
float mask_val = threadIdx.x < klen ? (float)attn_mask[threadIdx.x + mask_offset] : 0.0f;
float tmp = threadIdx.x < klen ? (ac_val + bd_val) * scaler * (1 - mask_val) - 1e20f * mask_val : -1e20f;
float max_val = blockReduceMax<float>(tmp);
if(threadIdx.x == 0) {
s_max = max_val;
}
__syncthreads();
float qk_tmp = threadIdx.x < klen ? __expf((float)(tmp - s_max)) : 0.0f;
float sum_val = blockReduceSum<float>(qk_tmp);
if(threadIdx.x == 0) {
s_sum = sum_val + 1e-6f;
}
__syncthreads();
if(threadIdx.x < klen) {
ac[threadIdx.x + input_offset] = (T)(qk_tmp / s_sum);
}
}
#endif
template <typename T>
void attn_prob_softmax(DType<T>* ac,
DType<T>* bd,
const DType<T>* attn_mask,
int mlen, int sqlen, int bsz, int n_head, int d_head,
hipStream_t stream) {
float scaler = 1 / sqrt(d_head);
dim3 grid;
grid.x = bsz * n_head * sqlen;
dim3 block;
int klen = (sqlen + mlen);
if(klen <= 32) {
block.x = 32;
} else if(klen > 32 && klen <= 64) {
block.x = 64;
} else if(klen > 64 && klen <= 128) {
block.x = 128;
} else if(klen > 128 && klen <= 256) {
block.x = 256;
} else if(klen > 256 && klen <= 512) {
block.x = 512;
} else {
block.x = 1024;
}
hipLaunchKernelGGL(( attn_prob_softmax_kernel), dim3(grid), dim3(block), 0, stream, ac, bd, attn_mask, sqlen, klen, scaler);
}
template void attn_prob_softmax<float>(DType<float>* ac,
DType<float>* bd,
const DType<float>* attn_mask,
int mlen, int sqlen, int bsz, int n_head, int d_head,
hipStream_t stream);
} /* namespace tensorflow */
| 867bcaa3121fefdfac9336d99343838d7ea38b62.cu | #include "delta_infer/custom_ops/platform/CUDA/kernels.h"
namespace tensorflow {
#define FINAL_MASK 0xffffffff
template <typename T>
__inline__ __device__ T warpReduceSum(T val) {
for(int mask = 16; mask > 0; mask >>= 1)
val += __shfl_xor_sync(FINAL_MASK, val, mask, 32);
return val;
}
/* Calculate the sum of all elements in a block */
template <typename T>
__inline__ __device__ T blockReduceSum(T val) {
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceSum<T>(val);
if(lane == 0)
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)(0.0f);
val = warpReduceSum<T>(val);
return val;
}
template <typename T>
__inline__ __device__ T warpReduceMax(T val) {
for(int mask = 16; mask > 0; mask >>= 1)
val = max(val, __shfl_xor_sync(FINAL_MASK, val, mask, 32));
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__inline__ __device__ T blockReduceMax(T val) {
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f; // in-warp idx
int wid = threadIdx.x >> 5; // warp idx
val = warpReduceMax(val); // get maxx in each warp
if(lane == 0) // record in-warp maxx by warp Idx
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : -1e20f;
val = warpReduceMax(val);
return val;
}
template<typename T>
__global__ void transpose(T* src, T* dst, const int batch_size, const int seq_len, const int head_num, const int size_per_head) {
int batch_id = blockIdx.x / (head_num * seq_len);
int seq_id = blockIdx.x % seq_len;
int head_id = (blockIdx.x % (head_num * seq_len))/ seq_len;
dst[batch_id * (head_num * seq_len * size_per_head) + seq_id * head_num * size_per_head
+ head_id * size_per_head + threadIdx.x] = src[blockIdx.x * size_per_head + threadIdx.x];
}
template <typename T>
__global__
void softmax_kernel(T* qk_buf, const int* attr_mask, const int batch_size, const int head_num, const int seq_len,
const T scaler)
{
int batch_id = blockIdx.x / head_num;
int qk_offset = blockIdx.x * seq_len * seq_len;
int mask_offset = batch_id * seq_len * seq_len;
__shared__ float s_sum, s_max;
for(int i = 0; i < seq_len; ++i)
{
float qk = threadIdx.x < seq_len ? (float)qk_buf[threadIdx.x + qk_offset] : 0.0f;
float mask_val = threadIdx.x < seq_len ? (float)attr_mask[threadIdx.x + mask_offset] : 0.0f;
mask_val = (1.0f - mask_val) * -10000.0f;
float tmp = threadIdx.x < seq_len ? (float)(qk * (float)scaler + mask_val): -1e20f;
float max_val = blockReduceMax<float>(tmp);
if(threadIdx.x == 0)
s_max = max_val;
__syncthreads();
qk = threadIdx.x < seq_len ? __expf(tmp - s_max) : 0.0f;
float sum_val = blockReduceSum<float>(qk);
if(threadIdx.x == 0)
{
s_sum = sum_val + 1e-30f;
}
__syncthreads();
if(threadIdx.x < seq_len)
qk_buf[threadIdx.x + qk_offset] = (T)(qk / s_sum);
qk_offset += seq_len;
mask_offset += seq_len;
}
}
template <typename T>
__global__
void softmax_kernel_v2(T* qk_buf, const int* attr_mask, const int batch_size, const int head_num,
const int seq_len, const float scaler)
{
int batch_id = blockIdx.x / head_num / seq_len;
int seq_id = blockIdx.x % seq_len;
int qk_offset = blockIdx.x * seq_len;
int mask_offset = batch_id * seq_len * seq_len + seq_id * seq_len;
__shared__ float s_sum, s_max;
float qk = threadIdx.x < seq_len ? (float)qk_buf[threadIdx.x + qk_offset] : 0.0f;
float mask_val = threadIdx.x < seq_len ? (float)attr_mask[threadIdx.x + mask_offset] : 0.0f;
mask_val = (1.0f - mask_val) * -10000.0f;
float tmp = threadIdx.x < seq_len ? (float)(qk * (float)scaler + mask_val) : -1e20f;
float max_val = blockReduceMax<float>(tmp);
if(threadIdx.x == 0)
s_max = max_val;
__syncthreads();
float qk_tmp = threadIdx.x < seq_len ? __expf((float)(tmp - s_max)) : 0.0f;
float sum_val = blockReduceSum<float>(qk_tmp);
if(threadIdx.x == 0)
{
s_sum = sum_val + 1e-30f;
}
__syncthreads();
if(threadIdx.x < seq_len)
qk_buf[threadIdx.x + qk_offset] = (T)(qk_tmp / s_sum);
}
template<typename T>
__global__ void add_QKV_bias(T* Q, const T* bias_Q,
T* K, const T* bias_K,
T* V, const T* bias_V,
T* q_buf, T* k_buf, T* v_buf,
const int batch_size, const int seq_len, const int head_num, const int size_per_head,
const int word_per_block) {
T* data_ptr;
T* buf_ptr;
const T* bias_ptr;
int m = batch_size * seq_len;
int n = head_num * size_per_head;
int qkv_id = blockIdx.x * word_per_block / m;
int row_offset = (blockIdx.x * word_per_block % m) * n;
if(qkv_id == 0) {
data_ptr = Q + row_offset;
buf_ptr = q_buf;
bias_ptr = bias_Q;
} else if(qkv_id == 1) {
data_ptr = K + row_offset;
buf_ptr = k_buf;
bias_ptr = bias_K;
} else {
data_ptr = V + row_offset;
buf_ptr = v_buf;
bias_ptr = bias_V;
}
#if 1
// add bias and transpose
int batch_id = (blockIdx.x * word_per_block % m) / seq_len;
int head_id = threadIdx.x / size_per_head;
int id_in_head = threadIdx.x % size_per_head;
int word_start_id = (blockIdx.x * word_per_block) % seq_len;
T bias = __ldg(&bias_ptr[threadIdx.x]);
#pragma unroll
for(int i = word_start_id; i < word_start_id + word_per_block; ++i) {
T tmp = data_ptr[threadIdx.x] + bias;
int target_id = batch_id * (seq_len * head_num * size_per_head) + head_id * seq_len * size_per_head +
i * size_per_head + id_in_head;
buf_ptr[target_id] = tmp;
data_ptr += n;
}
#else
// error
T bias = __ldg(&bias_ptr[threadIdx.x]);
for(int i = 0; i < word_per_block; i++) {
T tmp = data_ptr[threadIdx.x] + bias;
int target_id = row_offset + i*n + threadIdx.x;
buf_ptr[target_id] = tmp;
data_ptr += n;
}
#endif
}
template<typename T>
void MultiHeadAtentionLauncher(DType<T>* query,
DType<T>* key,
DType<T>* value,
DType<T>* q_buf,
DType<T>* k_buf,
DType<T>* v_buf,
DType<T>* qk_buf,
DType<T>* transpose_dst,
DType<T>* attr_out_buf,
const DType<T> scaler,
int batch_size,
int from_seq_len,
int head_num,
int size_per_head,
const DType<T>* bias_Q,
const DType<T>* bias_K,
const DType<T>* bias_V,
const DType<int>* mask,
cublasHandle_t cublas_handle,
cudaStream_t stream) {
int m = batch_size * from_seq_len;
int k = head_num * size_per_head;
const int word_per_block = 1;
assert(k <= 1024);
assert(m / word_per_block * 3 <= 65536);
dim3 grid(m / word_per_block * 3);
dim3 block(k);
DELTA_SCOPE{
auto sum = CheckSum<GPUDevice, float>(query);
printf("didi query ck sum: %lf \n", sum);
sum = CheckSum<GPUDevice, float>(key);
printf("didi key ck sum: %lf \n", sum);
sum = CheckSum<GPUDevice, float>(value);
printf("didi value ck sum: %lf \n", sum);
};
add_QKV_bias<DType<T> ><<<grid, block, 0, stream>>>(query, bias_Q,
key, bias_K,
value, bias_V,
q_buf, k_buf, v_buf,
batch_size, from_seq_len, head_num, size_per_head,
word_per_block);
//cuda(PeekAtLastError());
//cuda(DeviceSynchronize());
DELTA_SCOPE{
auto sum = CheckSum<GPUDevice, float>(q_buf);
printf("didi q_buf ck sum: %lf \n", sum);
sum = CheckSum<GPUDevice, float>(k_buf);
printf("didi k_buf ck sum: %lf \n", sum);
sum = CheckSum<GPUDevice, float>(v_buf);
printf("didi v_buf ck sum: %lf \n", sum);
};
#if 1
T alpha = 1.0f, beta = 0.0f;
typedef DeltaTraits<GPUDevice, T> traits;
/// get _qk_buf[batch_size, head_num, from_seq_len, from_seq_len]
#if (CUDART_VERSION >= 10000)
cublas(GemmStridedBatchedEx(cublas_handle,
CUBLAS_OP_T, CUBLAS_OP_N,
from_seq_len, from_seq_len, size_per_head,
&alpha,
k_buf, traits::ComputeType, size_per_head, from_seq_len * size_per_head,
q_buf, traits::ComputeType, size_per_head, from_seq_len * size_per_head,
&beta,
qk_buf, traits::ComputeType, from_seq_len, from_seq_len * from_seq_len,
batch_size * head_num,
traits::ComputeType,
*static_cast<delta::CublasGemmAlgo*>(delta::Config::Instance()["3"].get())));
#else
cublas(SgemmStridedBatched(cublas_handle,
CUBLAS_OP_T, CUBLAS_OP_N,
from_seq_len, from_seq_len, size_per_head,
&alpha,
k_buf, size_per_head, from_seq_len * size_per_head,
q_buf, size_per_head, from_seq_len * size_per_head,
&beta,
qk_buf, from_seq_len, from_seq_len * from_seq_len,
batch_size * head_num));
#endif
if(from_seq_len <= 32)
block.x = 32;
else if(from_seq_len > 32 && from_seq_len <= 64)
block.x = 64;
else if(from_seq_len > 64 && from_seq_len <= 128)
block.x = 128;
else if(from_seq_len > 128 && from_seq_len <= 256)
block.x = 256;
else if(from_seq_len > 256 && from_seq_len <= 512)
block.x = 512;
else
block.x = 1024;
DELTA_SCOPE{
auto sum = CheckSum<GPUDevice, float>(qk_buf);
printf("didi qk_buf before softmax ck sum: %lf\n", sum);
};
if(batch_size * head_num <= 120) {
grid.x = batch_size * head_num * from_seq_len;
softmax_kernel_v2<DType<T> ><<<grid, block, 0, stream>>>(qk_buf, mask, batch_size, head_num, from_seq_len, scaler);
} else {
grid.x = batch_size * head_num;
softmax_kernel<DType<T> ><<<grid, block, 0, stream>>>(qk_buf, mask, batch_size, head_num, from_seq_len, scaler);
}
DELTA_SCOPE {
auto sum = CheckSum<GPUDevice, float>(qk_buf);
printf("didi qk_buf after softmax ck sum: %lf\n", sum);
};
#if (CUDART_VERSION >= 10000) /// cuda > 10.0
cublas(GemmStridedBatchedEx(cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
size_per_head, from_seq_len, from_seq_len,
&alpha,
v_buf, traits::ComputeType, size_per_head, from_seq_len * size_per_head,
qk_buf, traits::ComputeType, from_seq_len, from_seq_len * from_seq_len,
&beta,
transpose_dst, traits::ComputeType, size_per_head, from_seq_len * size_per_head,
batch_size * head_num,
traits::ComputeType,
*static_cast<delta::CublasGemmAlgo*>(delta::Config::Instance()["4"].get())));
#else
cublas(SgemmStridedBatched(cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
size_per_head, from_seq_len, from_seq_len,
&alpha,
v_buf, size_per_head, from_seq_len * size_per_head,
qk_buf, from_seq_len, from_seq_len * from_seq_len,
&beta,
transpose_dst, size_per_head, from_seq_len * size_per_head,
batch_size * head_num));
#endif
DELTA_SCOPE{
auto sum = CheckSum<GPUDevice, float>(transpose_dst);
printf("didi transpose_dst before softmax ck sum: %lf\n", sum);
};
const int seq_per_block = 1;
grid.x = batch_size * head_num * from_seq_len / seq_per_block;
block.x = seq_per_block * size_per_head;
transpose<DType<T> ><<<grid, block, 0, stream>>>(transpose_dst,
attr_out_buf,
batch_size,
from_seq_len,
head_num,
size_per_head);
DELTA_SCOPE{
auto sum = CheckSum<GPUDevice, float>(transpose_dst);
printf("didi transpose_dst after softmax ck sum: %lf\n", sum);
};
#endif
}
template void MultiHeadAtentionLauncher<float>(DType<float>* query,
DType<float>* key,
DType<float>* value,
DType<float>* q_buf,
DType<float>* k_buf,
DType<float>* v_buf,
DType<float>* qk_buf,
DType<float>* transpose_dst,
DType<float>* attr_out_buf,
const DType<float> scaler,
int batch_size,
int from_seq_len,
int head_num,
int size_per_head,
const DType<float>* bias_Q,
const DType<float>* bias_K,
const DType<float>* bias_V,
const DType<int>* mask,
cublasHandle_t cublas_handle,
cudaStream_t stream);
template<typename T>
__global__ void transpose_head_and_seq_kernel(T* data, int sqlen, int n_head, int d_head, T* out) {
extern __shared__ T smem[];
int seq_id = blockIdx.x / n_head;
int head_id = blockIdx.x % n_head;
smem[threadIdx.x] = data[blockIdx.x*d_head + threadIdx.x];
__syncthreads();
out[head_id * sqlen * d_head + seq_id * d_head + threadIdx.x] = smem[threadIdx.x];
}
template<typename T>
void transpose_head_and_seq(DType<T>* data, int sqlen, int n_head, int d_head, DType<T>* out, cudaStream_t stream) {
dim3 grid(sqlen * n_head);
dim3 block(d_head);
transpose_head_and_seq_kernel<<<grid, block, d_head*sizeof(T), stream>>>(data, sqlen, n_head, d_head, out);
}
template void transpose_head_and_seq<float>(DType<float>* data, int sqlen, int n_head, int d_head, DType<float>* out, cudaStream_t stream);
template<typename T>
__global__ void transpose_head_num_and_seq_kernel(T* attn_vec,
int sqlen, int bsz, int n_head, int d_head,
T* attn_vec_out) {
extern __shared__ T smem[];
int head_offset = blockIdx.x * d_head;
int bsz_id = blockIdx.x / (n_head * sqlen);
int head_id = blockIdx.x % (n_head * sqlen) / sqlen;
int seq_id = blockIdx.x % sqlen;
smem[threadIdx.x] = attn_vec[head_offset + threadIdx.x];
__syncthreads();
int head_len = n_head * d_head;
int batch_len = head_len * bsz;
attn_vec_out[seq_id * batch_len + bsz_id * head_len + head_id * d_head + threadIdx.x] = smem[threadIdx.x];
}
template<typename T>
void transpose_head_num_and_seq(DType<T>* attn_vec,
int sqlen, int bsz, int n_head, int d_head,
DType<T>* attn_vec_out,
cudaStream_t stream) {
dim3 grid(sqlen * bsz * n_head);
dim3 block(d_head);
transpose_head_num_and_seq_kernel<<<grid, block, d_head*sizeof(T), stream>>>(attn_vec, sqlen, bsz, n_head, d_head, attn_vec_out);
}
template void transpose_head_num_and_seq<float>(DType<float>* attn_vec,
int sqlen, int bsz, int n_head, int d_head,
DType<float>* attn_vec_out,
cudaStream_t stream);
template<typename T>
__global__ void add_bias_and_split_kernel(T* w_heads,
int mlen, int sqlen, int bsz, int n, int n_head, int d_head,
const T* r_w_bias,
const T* r_r_bias,
T* rw_head_q,
T* rr_head_q,
T* w_head_k,
T* w_head_v) {
extern __shared__ T swap_space[];
int start_idx = 0;
if(blockIdx.y == 0) {
// split and add bias
int block_id = blockIdx.x - mlen * bsz;
if(block_id >= 0) {
start_idx = blockIdx.x * n * 3;
for(int i=threadIdx.x; i<n; i+=blockDim.x) {
swap_space[i] = w_heads[start_idx + i];
}
__syncthreads();
int seq_id = block_id / bsz;
int bsz_id = block_id % bsz;
for(int i = 0; i < n_head; i++) {
for(int j=threadIdx.x; j<d_head; j+=blockDim.x) {
T bias = __ldg(&r_w_bias[i*d_head + j]);
// from [seq, bsz, n_head, d_head] to [bsz , n_head, seq, d_head]
int out_id = bsz_id * n_head * sqlen * d_head + i * sqlen * d_head + seq_id * d_head + j;
rw_head_q[out_id] = swap_space[i*d_head + j] + bias;
bias = __ldg(&r_r_bias[i*d_head + j]);
rr_head_q[out_id] = swap_space[i*d_head + j] + bias;
}
}
}
} else if(blockIdx.y == 1) {
// only split
start_idx = blockIdx.x * n * 3 + n;
for(int i=threadIdx.x; i<n; i+=blockDim.x) {
swap_space[i] = w_heads[start_idx + i];
}
__syncthreads();
int seq_id = blockIdx.x / bsz;
int bsz_id = blockIdx.x % bsz;
for(int i = 0; i < n_head; i++) {
for(int j=threadIdx.x; j < d_head; j+=blockDim.x) {
// from [seq, bsz, n_head, d_head] to [bsz , n_head, seq, d_head]
int out_id = bsz_id * n_head * (mlen + sqlen) * d_head + i * (mlen + sqlen) * d_head + seq_id * d_head + j;
w_head_k[out_id] = swap_space[i*d_head + j];;
}
}
} else {
// only split
start_idx = blockIdx.x * n * 3 + 2 * n;
for(int i=threadIdx.x; i<n; i+=blockDim.x) {
swap_space[i] = w_heads[start_idx + i];
}
__syncthreads();
int seq_id = blockIdx.x / bsz;
int bsz_id = blockIdx.x % bsz;
for(int i = 0; i < n_head; i++) {
for(int j=threadIdx.x; j < d_head; j+=blockDim.x) {
// from [seq, bsz, n_head, d_head] to [bsz , n_head, seq, d_head]
int out_id = bsz_id * n_head * (mlen + sqlen) * d_head + i * (mlen + sqlen) * d_head + seq_id * d_head + j;
w_head_v[out_id] = swap_space[i*d_head + j];;
}
}
}
}
template <typename T>
void add_bias_and_split(DType<T>* w_heads,
int mlen, int sqlen, int bsz, int n_head, int d_head,
const DType<T>* r_w_bias,
const DType<T>* r_r_bias,
DType<T>* rw_head_q,
DType<T>* rr_head_q,
DType<T>* w_head_k,
DType<T>* w_head_v,
cudaStream_t stream) {
int m = (mlen + sqlen) * bsz;
int n = n_head * d_head; // x 3
assert(n <= 1024);
assert(m * 3 <= 65536);
dim3 grid(m, 3);
dim3 block(128);
add_bias_and_split_kernel<<<grid, block, n*sizeof(T), stream>>>(w_heads,
mlen, sqlen, bsz, n, n_head, d_head,
r_w_bias,
r_r_bias,
rw_head_q,
rr_head_q,
w_head_k,
w_head_v);
};
template void add_bias_and_split<float>(DType<float>* w_heads,
int mlen, int sqlen, int bsz, int n_head, int d_head,
const DType<float>* r_w_bias,
const DType<float>* r_r_bias,
DType<float>* rw_head_q,
DType<float>* rr_head_q,
DType<float>* w_head_k,
DType<float>* w_head_v,
cudaStream_t stream);
#if 0
template<typename T>
__global__ void attn_prob_softmax_kernel(T* ac,
T* bd,
const T* attn_mask,
const int sqlen,
const int klen,
const float scaler) {
int input_offset = blockIdx.x * klen;
int mask_offset = blockIdx.x % sqlen * klen;
__shared__ float s_sum;
float ac_val = threadIdx.x < klen ? (float)ac[threadIdx.x + input_offset] : 0.0f;
float bd_val = threadIdx.x < klen ? (float)bd[threadIdx.x + input_offset] : 0.0f;
float mask_val = threadIdx.x < klen ? (float)attn_mask[threadIdx.x + mask_offset] : 0.0f;
float tmp = threadIdx.x < klen ? (ac_val + bd_val) * scaler * (1 - mask_val) - 1e30f * mask_val : 1e-30f;
tmp = threadIdx.x < klen ? __expf((float)(tmp)) : 0.0f;
float sum_val = blockReduceSum<float>(tmp);
if(threadIdx.x == 0) {
s_sum = sum_val + 1e-6f;
}
__syncthreads();
if(threadIdx.x < klen) {
ac[threadIdx.x + input_offset] = (T)(tmp / s_sum);
}
}
#else
template<typename T>
__global__ void attn_prob_softmax_kernel(T* ac,
T* bd,
const T* attn_mask,
const int sqlen,
const int klen,
const float scaler) {
int input_offset = blockIdx.x * klen;
int mask_offset = blockIdx.x % sqlen * klen;
__shared__ float s_sum, s_max;
float ac_val = threadIdx.x < klen ? (float)ac[threadIdx.x + input_offset] : 0.0f;
float bd_val = threadIdx.x < klen ? (float)bd[threadIdx.x + input_offset] : 0.0f;
float mask_val = threadIdx.x < klen ? (float)attn_mask[threadIdx.x + mask_offset] : 0.0f;
float tmp = threadIdx.x < klen ? (ac_val + bd_val) * scaler * (1 - mask_val) - 1e20f * mask_val : -1e20f;
float max_val = blockReduceMax<float>(tmp);
if(threadIdx.x == 0) {
s_max = max_val;
}
__syncthreads();
float qk_tmp = threadIdx.x < klen ? __expf((float)(tmp - s_max)) : 0.0f;
float sum_val = blockReduceSum<float>(qk_tmp);
if(threadIdx.x == 0) {
s_sum = sum_val + 1e-6f;
}
__syncthreads();
if(threadIdx.x < klen) {
ac[threadIdx.x + input_offset] = (T)(qk_tmp / s_sum);
}
}
#endif
template <typename T>
void attn_prob_softmax(DType<T>* ac,
DType<T>* bd,
const DType<T>* attn_mask,
int mlen, int sqlen, int bsz, int n_head, int d_head,
cudaStream_t stream) {
float scaler = 1 / sqrt(d_head);
dim3 grid;
grid.x = bsz * n_head * sqlen;
dim3 block;
int klen = (sqlen + mlen);
if(klen <= 32) {
block.x = 32;
} else if(klen > 32 && klen <= 64) {
block.x = 64;
} else if(klen > 64 && klen <= 128) {
block.x = 128;
} else if(klen > 128 && klen <= 256) {
block.x = 256;
} else if(klen > 256 && klen <= 512) {
block.x = 512;
} else {
block.x = 1024;
}
attn_prob_softmax_kernel<<<grid, block, 0, stream>>>(ac, bd, attn_mask, sqlen, klen, scaler);
}
template void attn_prob_softmax<float>(DType<float>* ac,
DType<float>* bd,
const DType<float>* attn_mask,
int mlen, int sqlen, int bsz, int n_head, int d_head,
cudaStream_t stream);
} /* namespace tensorflow */
|
dbe5df54fa1e29de3337e0cf7b830b1682083a34.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <math.h>
#include<sys/time.h>
//#define N 6
__global__ void add( float *a, float *b, float *c) {
int tid = blockIdx.x; //Handle the data at the index
c[tid] = a[tid] + b[tid];
}
__global__ void scale(float *a, int size, int index){
int i;
int start=(index*size+index);
int end=(index*size+size);
for(i=start+1;i<end;i++){
a[i]=(a[i]/a[start]);
}
}
__global__ void reduce(float *a, int size, int index, int b_size){
extern __shared__ float pivot[];
int i;
int tid=threadIdx.x;
int bid=blockIdx.x;
int block_size=b_size;
int pivot_start=(index*size+index);
int pivot_end=(index*size+size);
int start;
int end;
int pivot_row;
int my_row;
if(tid==0){
for(i=index;i<size;i++) pivot[i]=a[(index*size)+i];
}
__syncthreads();
pivot_row=(index*size);
my_row=(((block_size*bid) + tid)*size);
start=my_row+index;
end=my_row+size;
if(my_row >pivot_row){
for(i=start+1;i<end;i++){
// a[i]=a[i]-(a[start]*a[(index*size)+i]);
// a[i]=a[i]-(a[start]*a[(index*size)+(index+(i-start))]);
a[i]=a[i]-(a[start]*pivot[(i-my_row)]);
}
}
}
int main(int argc, char *argv[]){
float *a;
float *c;
float error;
int N;
int flag=0;
float **result;
float **b;
int blocks;
float *dev_a;
int i;
int j;
int k;
float l1;
float u1;
double start;
double end;
struct timeval tv;
N=atoi(argv[1]);
//allocate memory on CPU
a=(float *)malloc(sizeof(float)*N*N);
c=(float *)malloc(sizeof(float)*N*N);
result=(float **)malloc(sizeof(float *)*N);
b=(float **)malloc(sizeof(float *)*N);
for(i=0;i<N;i++){
result[i]=(float *)malloc(sizeof(float)*N);
b[i]=(float *)malloc(sizeof(float)*N);
}
//allocate the memory on the GPU
hipMalloc ( (void**)&dev_a, N*N* sizeof (float) );
srand((unsigned)2);
//fill the arrays 'a' on the CPU
for ( i = 0; i <= (N*N); i++) {
a[i] =((rand()%10)+1);
}
printf("Matrix a is :\n");
/*for(i=0; i<(N*N); i++){
if(i%N==0)
printf("\n %f ", a[i]);
else printf("%lf ",a[i]);
}*/
hipMemcpy(dev_a,a,N*N*sizeof(float), hipMemcpyHostToDevice);//copy array to device memory
gettimeofday(&tv,NULL);
start=tv.tv_sec;
/*Perform LU Decomposition*/
printf("\n==========================================================\n");
for(i=0;i<N;i++){
hipLaunchKernelGGL(( scale), dim3(1),dim3(1), 0, 0, dev_a,N,i);
// blocks= ((N-i-1)/512)+1;
blocks=((N/512));
// printf("Number of blocks rxd : %d \n",blocks);
hipLaunchKernelGGL(( reduce), dim3(blocks),dim3(512),N*sizeof(float), 0, dev_a,N,i,512);
}
/*LU decomposition ends here*/
gettimeofday(&tv,NULL);
end=tv.tv_sec;
hipMemcpy( c, dev_a, N*N*sizeof(float),hipMemcpyDeviceToHost );//copy array back to host
printf("\nThe time for LU decomposition is %lf \n",(end-start));
//display the results
printf("\nCopied matrix C is \n");
/* for ( i = 0; i < (N*N); i++) {
if(i%N==0)
printf( "\n%f ", c[i]);
else printf("%lf ",c[i]);
}*/
printf("\n");
/*copy the result matrix into explicit 2D matrix for verification*/
for(i=0;i<N;i++){
for(j=0;j<N;j++){
result[i][j]=c[i*N+j];
}
}
/* printf("The result matrix\n");
for(i=0;i<N;i++){
for(j=0;j<N;j++){
printf("%lf ",result[i][j]);
}
printf("\n");
}*/
printf("=======================================================");
printf("\n Performing inplace verification \n");
/*Inplace verification step*/
/* for(i=0;i<N;i++){
for(j=0;j<N;j++){
b[i][j]=0;
for(k=0;k<N;k++){
if(i>=k)l1=result[i][k];
else l1=0;
if(k==j)u1=1;
else if(k<j)u1=result[k][j];//figured it out
else u1=0.0;
b[i][j]=b[i][j]+(l1*u1);
}
}
}
for(i=0;i<N;i++){
for(j=0;j<N;j++){
error=abs(a[(i*N+j)]-b[i][j]);
if(error> 1 ) {
// printf("No match occured at %d %d Error is %lf \n ", i, j, abs(a[(i*N+j)]-b[i][j]));
flag =flag+1;
}
}
}*/
if(flag==0) printf("Match");
else printf("No Matchs %d \n",flag);
printf("\n==================================================\n");
printf("\nThe b matrix\n");
/*for(i=0;i<N;i++){
for(j=0;j<N;j++){
printf("%lf ",b[i][j]);
}
printf("\n");
}*/
//free the memory allocated on the GPU
hipFree( dev_a );
return 0;
}
| dbe5df54fa1e29de3337e0cf7b830b1682083a34.cu | #include <cuda.h>
#include <stdio.h>
#include <math.h>
#include<sys/time.h>
//#define N 6
__global__ void add( float *a, float *b, float *c) {
int tid = blockIdx.x; //Handle the data at the index
c[tid] = a[tid] + b[tid];
}
__global__ void scale(float *a, int size, int index){
int i;
int start=(index*size+index);
int end=(index*size+size);
for(i=start+1;i<end;i++){
a[i]=(a[i]/a[start]);
}
}
__global__ void reduce(float *a, int size, int index, int b_size){
extern __shared__ float pivot[];
int i;
int tid=threadIdx.x;
int bid=blockIdx.x;
int block_size=b_size;
int pivot_start=(index*size+index);
int pivot_end=(index*size+size);
int start;
int end;
int pivot_row;
int my_row;
if(tid==0){
for(i=index;i<size;i++) pivot[i]=a[(index*size)+i];
}
__syncthreads();
pivot_row=(index*size);
my_row=(((block_size*bid) + tid)*size);
start=my_row+index;
end=my_row+size;
if(my_row >pivot_row){
for(i=start+1;i<end;i++){
// a[i]=a[i]-(a[start]*a[(index*size)+i]);
// a[i]=a[i]-(a[start]*a[(index*size)+(index+(i-start))]);
a[i]=a[i]-(a[start]*pivot[(i-my_row)]);
}
}
}
int main(int argc, char *argv[]){
float *a;
float *c;
float error;
int N;
int flag=0;
float **result;
float **b;
int blocks;
float *dev_a;
int i;
int j;
int k;
float l1;
float u1;
double start;
double end;
struct timeval tv;
N=atoi(argv[1]);
//allocate memory on CPU
a=(float *)malloc(sizeof(float)*N*N);
c=(float *)malloc(sizeof(float)*N*N);
result=(float **)malloc(sizeof(float *)*N);
b=(float **)malloc(sizeof(float *)*N);
for(i=0;i<N;i++){
result[i]=(float *)malloc(sizeof(float)*N);
b[i]=(float *)malloc(sizeof(float)*N);
}
//allocate the memory on the GPU
cudaMalloc ( (void**)&dev_a, N*N* sizeof (float) );
srand((unsigned)2);
//fill the arrays 'a' on the CPU
for ( i = 0; i <= (N*N); i++) {
a[i] =((rand()%10)+1);
}
printf("Matrix a is :\n");
/*for(i=0; i<(N*N); i++){
if(i%N==0)
printf("\n %f ", a[i]);
else printf("%lf ",a[i]);
}*/
cudaMemcpy(dev_a,a,N*N*sizeof(float), cudaMemcpyHostToDevice);//copy array to device memory
gettimeofday(&tv,NULL);
start=tv.tv_sec;
/*Perform LU Decomposition*/
printf("\n==========================================================\n");
for(i=0;i<N;i++){
scale<<<1,1>>>(dev_a,N,i);
// blocks= ((N-i-1)/512)+1;
blocks=((N/512));
// printf("Number of blocks rxd : %d \n",blocks);
reduce<<<blocks,512,N*sizeof(float)>>>(dev_a,N,i,512);
}
/*LU decomposition ends here*/
gettimeofday(&tv,NULL);
end=tv.tv_sec;
cudaMemcpy( c, dev_a, N*N*sizeof(float),cudaMemcpyDeviceToHost );//copy array back to host
printf("\nThe time for LU decomposition is %lf \n",(end-start));
//display the results
printf("\nCopied matrix C is \n");
/* for ( i = 0; i < (N*N); i++) {
if(i%N==0)
printf( "\n%f ", c[i]);
else printf("%lf ",c[i]);
}*/
printf("\n");
/*copy the result matrix into explicit 2D matrix for verification*/
for(i=0;i<N;i++){
for(j=0;j<N;j++){
result[i][j]=c[i*N+j];
}
}
/* printf("The result matrix\n");
for(i=0;i<N;i++){
for(j=0;j<N;j++){
printf("%lf ",result[i][j]);
}
printf("\n");
}*/
printf("=======================================================");
printf("\n Performing inplace verification \n");
/*Inplace verification step*/
/* for(i=0;i<N;i++){
for(j=0;j<N;j++){
b[i][j]=0;
for(k=0;k<N;k++){
if(i>=k)l1=result[i][k];
else l1=0;
if(k==j)u1=1;
else if(k<j)u1=result[k][j];//figured it out
else u1=0.0;
b[i][j]=b[i][j]+(l1*u1);
}
}
}
for(i=0;i<N;i++){
for(j=0;j<N;j++){
error=abs(a[(i*N+j)]-b[i][j]);
if(error> 1 ) {
// printf("No match occured at %d %d Error is %lf \n ", i, j, abs(a[(i*N+j)]-b[i][j]));
flag =flag+1;
}
}
}*/
if(flag==0) printf("Match");
else printf("No Matchs %d \n",flag);
printf("\n==================================================\n");
printf("\nThe b matrix\n");
/*for(i=0;i<N;i++){
for(j=0;j<N;j++){
printf("%lf ",b[i][j]);
}
printf("\n");
}*/
//free the memory allocated on the GPU
cudaFree( dev_a );
return 0;
}
|
59f9b8d87ce7f3523570a90654ce586efd8bc559.hip | // !!! This is a file automatically generated by hipify!!!
#include "mex.h"
#include "roctracer/roctx.h"
#include "cudaCommon.h"
#include "cudaFluidStep.h"
#include "cudaArrayRotateB.h"
#include "cudaHaloExchange.h"
#include "cudaStatics.h"
#include "flux.h"
//int performFluidUpdate_3D(MGArray *fluid, ParallelTopology* parallelTopo, int order, int stepNumber, double *lambda, double gamma, double minRho, double stepMethod, int geomType, double Rinner)
int performFluidUpdate_3D(MGArray *fluid, ParallelTopology* parallelTopo, FluidStepParams fsp, int stepNumber, int order, MGArray *tempStorage)
{
int sweep, flag_1D = 0;
// Choose our sweep number depending on whether we are 1- or 2-dimensional
//if(fluid[0].dim[2] > 1) { // if nz > 1, three-dimensional
sweep = (stepNumber + 3*(order > 0)) % 6;
//} else {
// if(fluid[0].dim[1] > 3) { // if ny > 3, two dimensional
// sweep = (stepNumber + (order < 0)) % 2;
// } else {
// flag_1D = 1;
// }
//}
int preperm[6] = {0, 2, 0, 2, 3, 3};
int fluxcall[3][6] = {{1,2,1,2,3,3},{3,1,2,3,1,2},{2,3,3,1,2,1}};
int permcall[3][6] = {{3,2,2,3,3,2},{2,3,3,5,2,6},{6,3,5,0,2,0}};
int n;
int returnCode = SUCCESSFUL;
int nowDir;
FluidStepParams stepParameters = fsp;
// Just short-circuit for a one-D run, don't try to make the 2/3D loop reduce for it
if(flag_1D) {
nowDir = 1;
stepParameters.stepDirection = nowDir;
returnCode = performFluidUpdate_1D(fluid, stepParameters, parallelTopo, NULL);
if(returnCode != SUCCESSFUL) return CHECK_IMOGEN_ERROR(returnCode);
returnCode = setFluidBoundary(fluid, fluid->matlabClassHandle, &fsp.geometry, nowDir);
if(returnCode != SUCCESSFUL) return CHECK_IMOGEN_ERROR(returnCode);
returnCode = exchange_MPI_Halos(fluid, 5, parallelTopo, nowDir);
return CHECK_IMOGEN_ERROR(returnCode);
}
#ifdef USE_NVTX
roctxRangePush(__FUNCTION__);
#endif
// Put pointers for GPU storage here so we can acquire it once for this whole step, reusing for all 3 fluid calls
// and the array rotates.
MGArray localTempStorage;
localTempStorage.nGPUs = -1; // Use this as a "not allocated" marker.
int usingLocalTemp = 0;
if(tempStorage == NULL) {
usingLocalTemp = 1;
tempStorage = &localTempStorage;
}
if(order > 0) { /* If we are doing forward sweep */
hipStream_t *streams = NULL;
int nstreams;
int s = getGPUTypeStreams(fluid->matlabClassHandle, &streams, &nstreams);
// If we already have a buffer it's large, use it. Otherwise we actually /lose/ beause
// performFluidUpdate below will have to actually free and alloc
returnCode = (preperm[sweep] != 0 ? flipArrayIndices(fluid, NULL, 5, preperm[sweep], streams,
usingLocalTemp ? NULL : tempStorage) : SUCCESSFUL);
if(returnCode != SUCCESSFUL) return CHECK_IMOGEN_ERROR(returnCode);
for(n = 0; n < 3; n++) {
nowDir = fluxcall[n][sweep];
if(fluid->dim[0] > 3) {
stepParameters.stepDirection = nowDir;
returnCode = performFluidUpdate_1D(fluid, stepParameters, parallelTopo, tempStorage);
if(returnCode != SUCCESSFUL) return CHECK_IMOGEN_ERROR(returnCode);
returnCode = setFluidBoundary(fluid, fluid->matlabClassHandle, &fsp.geometry, nowDir);
if(returnCode != SUCCESSFUL) return CHECK_IMOGEN_ERROR(returnCode);
returnCode = exchange_MPI_Halos(fluid, 5, parallelTopo, nowDir);
if(returnCode != SUCCESSFUL) return CHECK_IMOGEN_ERROR(returnCode);
}
/* FIXME: INSERT MAGNETIC FLUX ROUTINES HERE */
returnCode = (permcall[n][sweep] != 0 ? flipArrayIndices(fluid, NULL, 5, permcall[n][sweep], streams, tempStorage) : SUCCESSFUL );
if(returnCode != SUCCESSFUL) return CHECK_IMOGEN_ERROR(returnCode);
}
} else { /* If we are doing backwards sweep */
hipStream_t *streams = NULL;
int nstreams;
int s = getGPUTypeStreams(fluid->matlabClassHandle, &streams, &nstreams);
returnCode = (preperm[sweep] != 0 ? flipArrayIndices(fluid, NULL, 5, preperm[sweep], streams,
usingLocalTemp ? NULL : tempStorage) : SUCCESSFUL);
if(returnCode != SUCCESSFUL) return CHECK_IMOGEN_ERROR(returnCode);
for(n = 0; n < 3; n++) {
nowDir = fluxcall[n][sweep];
/* FIXME: INSERT MAGNETIC FLUX ROUTINES HERE */
if(fluid->dim[0] > 3) {
stepParameters.stepDirection = nowDir;
returnCode = performFluidUpdate_1D(fluid, stepParameters, parallelTopo, tempStorage);
if(returnCode != SUCCESSFUL) return CHECK_IMOGEN_ERROR(returnCode);
returnCode = setFluidBoundary(fluid, fluid->matlabClassHandle, &fsp.geometry, nowDir);
if(returnCode != SUCCESSFUL) return CHECK_IMOGEN_ERROR(returnCode);
returnCode = exchange_MPI_Halos(fluid, 5, parallelTopo, nowDir);
if(returnCode != SUCCESSFUL) return CHECK_IMOGEN_ERROR(returnCode);
}
returnCode = (permcall[n][sweep] != 0 ? flipArrayIndices(fluid, NULL, 5, permcall[n][sweep], streams, tempStorage) : SUCCESSFUL );
if(returnCode != SUCCESSFUL) return CHECK_IMOGEN_ERROR(returnCode);
}
}
if(usingLocalTemp) {
#ifdef USE_NVTX
roctxMark("flux.cu:131 large free");
#endif
returnCode = MGA_delete(&localTempStorage);
localTempStorage.nGPUs = -1;
if(returnCode != SUCCESSFUL) return CHECK_IMOGEN_ERROR(returnCode);
}
#ifdef USE_NVTX
roctxRangePop();
#endif
return CHECK_IMOGEN_ERROR(returnCode);
/* Fluid half-step completed
* If order > 0, next call sourcing terms
* If order < 0, next call fluid with order > 0 */
}
| 59f9b8d87ce7f3523570a90654ce586efd8bc559.cu |
#include "mex.h"
#include "nvToolsExt.h"
#include "cudaCommon.h"
#include "cudaFluidStep.h"
#include "cudaArrayRotateB.h"
#include "cudaHaloExchange.h"
#include "cudaStatics.h"
#include "flux.h"
//int performFluidUpdate_3D(MGArray *fluid, ParallelTopology* parallelTopo, int order, int stepNumber, double *lambda, double gamma, double minRho, double stepMethod, int geomType, double Rinner)
int performFluidUpdate_3D(MGArray *fluid, ParallelTopology* parallelTopo, FluidStepParams fsp, int stepNumber, int order, MGArray *tempStorage)
{
int sweep, flag_1D = 0;
// Choose our sweep number depending on whether we are 1- or 2-dimensional
//if(fluid[0].dim[2] > 1) { // if nz > 1, three-dimensional
sweep = (stepNumber + 3*(order > 0)) % 6;
//} else {
// if(fluid[0].dim[1] > 3) { // if ny > 3, two dimensional
// sweep = (stepNumber + (order < 0)) % 2;
// } else {
// flag_1D = 1;
// }
//}
int preperm[6] = {0, 2, 0, 2, 3, 3};
int fluxcall[3][6] = {{1,2,1,2,3,3},{3,1,2,3,1,2},{2,3,3,1,2,1}};
int permcall[3][6] = {{3,2,2,3,3,2},{2,3,3,5,2,6},{6,3,5,0,2,0}};
int n;
int returnCode = SUCCESSFUL;
int nowDir;
FluidStepParams stepParameters = fsp;
// Just short-circuit for a one-D run, don't try to make the 2/3D loop reduce for it
if(flag_1D) {
nowDir = 1;
stepParameters.stepDirection = nowDir;
returnCode = performFluidUpdate_1D(fluid, stepParameters, parallelTopo, NULL);
if(returnCode != SUCCESSFUL) return CHECK_IMOGEN_ERROR(returnCode);
returnCode = setFluidBoundary(fluid, fluid->matlabClassHandle, &fsp.geometry, nowDir);
if(returnCode != SUCCESSFUL) return CHECK_IMOGEN_ERROR(returnCode);
returnCode = exchange_MPI_Halos(fluid, 5, parallelTopo, nowDir);
return CHECK_IMOGEN_ERROR(returnCode);
}
#ifdef USE_NVTX
nvtxRangePush(__FUNCTION__);
#endif
// Put pointers for GPU storage here so we can acquire it once for this whole step, reusing for all 3 fluid calls
// and the array rotates.
MGArray localTempStorage;
localTempStorage.nGPUs = -1; // Use this as a "not allocated" marker.
int usingLocalTemp = 0;
if(tempStorage == NULL) {
usingLocalTemp = 1;
tempStorage = &localTempStorage;
}
if(order > 0) { /* If we are doing forward sweep */
cudaStream_t *streams = NULL;
int nstreams;
int s = getGPUTypeStreams(fluid->matlabClassHandle, &streams, &nstreams);
// If we already have a buffer it's large, use it. Otherwise we actually /lose/ beause
// performFluidUpdate below will have to actually free and alloc
returnCode = (preperm[sweep] != 0 ? flipArrayIndices(fluid, NULL, 5, preperm[sweep], streams,
usingLocalTemp ? NULL : tempStorage) : SUCCESSFUL);
if(returnCode != SUCCESSFUL) return CHECK_IMOGEN_ERROR(returnCode);
for(n = 0; n < 3; n++) {
nowDir = fluxcall[n][sweep];
if(fluid->dim[0] > 3) {
stepParameters.stepDirection = nowDir;
returnCode = performFluidUpdate_1D(fluid, stepParameters, parallelTopo, tempStorage);
if(returnCode != SUCCESSFUL) return CHECK_IMOGEN_ERROR(returnCode);
returnCode = setFluidBoundary(fluid, fluid->matlabClassHandle, &fsp.geometry, nowDir);
if(returnCode != SUCCESSFUL) return CHECK_IMOGEN_ERROR(returnCode);
returnCode = exchange_MPI_Halos(fluid, 5, parallelTopo, nowDir);
if(returnCode != SUCCESSFUL) return CHECK_IMOGEN_ERROR(returnCode);
}
/* FIXME: INSERT MAGNETIC FLUX ROUTINES HERE */
returnCode = (permcall[n][sweep] != 0 ? flipArrayIndices(fluid, NULL, 5, permcall[n][sweep], streams, tempStorage) : SUCCESSFUL );
if(returnCode != SUCCESSFUL) return CHECK_IMOGEN_ERROR(returnCode);
}
} else { /* If we are doing backwards sweep */
cudaStream_t *streams = NULL;
int nstreams;
int s = getGPUTypeStreams(fluid->matlabClassHandle, &streams, &nstreams);
returnCode = (preperm[sweep] != 0 ? flipArrayIndices(fluid, NULL, 5, preperm[sweep], streams,
usingLocalTemp ? NULL : tempStorage) : SUCCESSFUL);
if(returnCode != SUCCESSFUL) return CHECK_IMOGEN_ERROR(returnCode);
for(n = 0; n < 3; n++) {
nowDir = fluxcall[n][sweep];
/* FIXME: INSERT MAGNETIC FLUX ROUTINES HERE */
if(fluid->dim[0] > 3) {
stepParameters.stepDirection = nowDir;
returnCode = performFluidUpdate_1D(fluid, stepParameters, parallelTopo, tempStorage);
if(returnCode != SUCCESSFUL) return CHECK_IMOGEN_ERROR(returnCode);
returnCode = setFluidBoundary(fluid, fluid->matlabClassHandle, &fsp.geometry, nowDir);
if(returnCode != SUCCESSFUL) return CHECK_IMOGEN_ERROR(returnCode);
returnCode = exchange_MPI_Halos(fluid, 5, parallelTopo, nowDir);
if(returnCode != SUCCESSFUL) return CHECK_IMOGEN_ERROR(returnCode);
}
returnCode = (permcall[n][sweep] != 0 ? flipArrayIndices(fluid, NULL, 5, permcall[n][sweep], streams, tempStorage) : SUCCESSFUL );
if(returnCode != SUCCESSFUL) return CHECK_IMOGEN_ERROR(returnCode);
}
}
if(usingLocalTemp) {
#ifdef USE_NVTX
nvtxMark("flux.cu:131 large free");
#endif
returnCode = MGA_delete(&localTempStorage);
localTempStorage.nGPUs = -1;
if(returnCode != SUCCESSFUL) return CHECK_IMOGEN_ERROR(returnCode);
}
#ifdef USE_NVTX
nvtxRangePop();
#endif
return CHECK_IMOGEN_ERROR(returnCode);
/* Fluid half-step completed
* If order > 0, next call sourcing terms
* If order < 0, next call fluid with order > 0 */
}
|
05e5140098fbd9bf59fd64b8b59937eb505ab6a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Cuckaroo Cycle, a memory-hard proof-of-work by John Tromp
// Copyright (c) 2018-2019 Jiri Vadura (photon) and John Tromp
// This software is covered by the FAIR MINING license
#include "mean.cuh"
int gpuAssert(hipError_t code, char* file, int line, bool abort) {
int device_id;
hipGetDevice(&device_id);
if (code != hipSuccess) {
hipDeviceReset();
if (abort) {
spdlog::error("Device {} GPUassert({}): {} {} {}", device_id, code, hipGetErrorString(code), file, line);
exit(code);
}
}
return code;
}
#ifndef MAXSOLS
#define MAXSOLS 4
#endif
#ifndef XBITS
#define XBITS 6
#endif
const uint32_t NX = 1 << XBITS;
const uint32_t NX2 = NX * NX;
const uint32_t XMASK = NX - 1;
const uint32_t YBITS = XBITS;
const uint32_t NY = 1 << YBITS;
const uint32_t YZBITS = EDGEBITS - XBITS;
const uint32_t ZBITS = YZBITS - YBITS;
const uint32_t NZ = 1 << ZBITS;
const uint32_t ZMASK = NZ - 1;
#ifndef IDXSHIFT
// number of bits of compression of surviving edge endpoints
// reduces space used in cycle finding, but too high a value
// results in NODE OVERFLOW warnings and fake cycles
#define IDXSHIFT 12
#endif
const uint32_t MAXEDGES = NEDGES >> IDXSHIFT;
#ifndef NEPS_A
#define NEPS_A 133
#endif
#ifndef NEPS_B
#define NEPS_B 88
#endif
#define NEPS 128
// Number of Parts of BufferB, all but one of which will overlap BufferA
#ifndef NB
#define NB 2
#endif
#ifndef NA
#define NA ((NB * NEPS_A + NEPS_B - 1) / NEPS_B)
#endif
const uint32_t EDGES_A = NZ * NEPS_A / NEPS;
const uint32_t EDGES_B = NZ * NEPS_B / NEPS;
const uint32_t ROW_EDGES_A = EDGES_A * NY;
const uint32_t ROW_EDGES_B = EDGES_B * NY;
__constant__ uint2 recoveredges[MAXCYCLELEN];
__constant__ uint2 e0 = {0, 0};
__device__ uint64_t dipblock(const siphash_keys& keys, const word_t edge, uint64_t* buf) {
diphash_state<> shs(keys);
word_t edge0 = edge & ~EDGE_BLOCK_MASK;
uint32_t i;
for (i = 0; i < EDGE_BLOCK_MASK; i++) {
shs.hash24(edge0 + i);
buf[i] = shs.xor_lanes();
}
shs.hash24(edge0 + i);
buf[i] = 0;
return shs.xor_lanes();
}
__device__ uint32_t endpoint(uint2 nodes, int uorv) {
return uorv ? nodes.y : nodes.x;
}
#ifndef FLUSHA // should perhaps be in trimparams and passed as template
// parameter
#define FLUSHA 16
#endif
template <int maxOut>
__global__ void SeedA(const siphash_keys& sipkeys, ulonglong4* __restrict__ buffer, uint32_t* __restrict__ indexes) {
const int group = blockIdx.x;
const int dim = blockDim.x;
const int lid = threadIdx.x;
const int gid = group * dim + lid;
const int nthreads = gridDim.x * dim;
const int FLUSHA2 = 2 * FLUSHA;
__shared__ uint2 tmp[NX][FLUSHA2]; // needs to be ulonglong4 aligned
const int TMPPERLL4 = sizeof(ulonglong4) / sizeof(uint2);
__shared__ int counters[NX];
uint64_t buf[EDGE_BLOCK_SIZE];
for (int row = lid; row < NX; row += dim)
counters[row] = 0;
__syncthreads();
const int col = group % NX;
const int loops = NEDGES / nthreads; // assuming THREADS_HAVE_EDGES checked
for (int blk = 0; blk < loops; blk += EDGE_BLOCK_SIZE) {
uint32_t nonce0 = gid * loops + blk;
const uint64_t last = dipblock(sipkeys, nonce0, buf);
for (uint32_t e = 0; e < EDGE_BLOCK_SIZE; e++) {
uint64_t edge = buf[e] ^ last;
uint32_t node0 = edge & EDGEMASK;
uint32_t node1 = (edge >> 32) & EDGEMASK;
int row = node0 >> YZBITS;
int counter = min((int) atomicAdd(counters + row, 1),
(int) (FLUSHA2 - 1)); // assuming ROWS_LIMIT_LOSSES checked
tmp[row][counter] = make_uint2(node0, node1);
__syncthreads();
if (counter == FLUSHA - 1) {
int localIdx = min(FLUSHA2, counters[row]);
int newCount = localIdx % FLUSHA;
int nflush = localIdx - newCount;
uint32_t grp = row * NX + col;
int cnt = min((int) atomicAdd(indexes + grp, nflush), (int) (maxOut - nflush));
for (int i = 0; i < nflush; i += TMPPERLL4)
buffer[((uint64_t) grp * maxOut + cnt + i) / TMPPERLL4] = *(ulonglong4*) (&tmp[row][i]);
for (int t = 0; t < newCount; t++) {
tmp[row][t] = tmp[row][t + nflush];
}
counters[row] = newCount;
}
__syncthreads();
}
}
uint2 zero = make_uint2(0, 0);
for (int row = lid; row < NX; row += dim) {
int localIdx = min(FLUSHA2, counters[row]);
uint32_t grp = row * NX + col;
for (int j = localIdx; j % TMPPERLL4; j++)
tmp[row][j] = zero;
for (int i = 0; i < localIdx; i += TMPPERLL4) {
int cnt = min((int) atomicAdd(indexes + grp, TMPPERLL4), (int) (maxOut - TMPPERLL4));
buffer[((uint64_t) grp * maxOut + cnt) / TMPPERLL4] = *(ulonglong4*) (&tmp[row][i]);
}
}
}
template <typename Edge>
__device__ bool null(Edge e);
__device__ bool null(uint32_t nonce) {
return nonce == 0;
}
__device__ bool null(uint2 nodes) {
return nodes.x == 0 && nodes.y == 0;
}
#ifndef FLUSHB
#define FLUSHB 8
#endif
template <typename T>
__device__ __forceinline__ T ldg(const T* ptr) {
#if __CUDA_ARCH__ >= 350
return __ldg(ptr);
#else
return *ptr;
#endif
}
template <int maxOut>
__global__ void SeedB(const uint2* __restrict__ source,
ulonglong4* __restrict__ destination,
const uint32_t* __restrict__ srcIdx,
uint32_t* __restrict__ dstIdx) {
const int group = blockIdx.x;
const int dim = blockDim.x;
const int lid = threadIdx.x;
const int FLUSHB2 = 2 * FLUSHB;
__shared__ uint2 tmp[NX][FLUSHB2];
const int TMPPERLL4 = sizeof(ulonglong4) / sizeof(uint2);
__shared__ int counters[NX];
for (int col = lid; col < NX; col += dim)
counters[col] = 0;
__syncthreads();
const int row = group / NX;
const int bucketEdges = min((int) srcIdx[group], (int) maxOut);
const int loops = (bucketEdges + dim - 1) / dim;
for (int loop = 0; loop < loops; loop++) {
int col;
int counter = 0;
const int edgeIndex = loop * dim + lid;
if (edgeIndex < bucketEdges) {
const int index = group * maxOut + edgeIndex;
uint2 edge = ldg(&source[index]);
if (!null(edge)) {
uint32_t node1 = edge.x;
col = (node1 >> ZBITS) & XMASK;
counter = min((int) atomicAdd(counters + col, 1),
(int) (FLUSHB2 - 1)); // assuming COLS_LIMIT_LOSSES checked
tmp[col][counter] = edge;
}
}
__syncthreads();
if (counter == FLUSHB - 1) {
int localIdx = min(FLUSHB2, counters[col]);
int newCount = localIdx % FLUSHB;
int nflush = localIdx - newCount;
uint32_t grp = row * NX + col;
int cnt = min((int) atomicAdd(dstIdx + grp, nflush), (int) (maxOut - nflush));
for (int i = 0; i < nflush; i += TMPPERLL4)
destination[((uint64_t) grp * maxOut + cnt + i) / TMPPERLL4] = *(ulonglong4*) (&tmp[col][i]);
for (int t = 0; t < newCount; t++) {
tmp[col][t] = tmp[col][t + nflush];
}
counters[col] = newCount;
}
__syncthreads();
}
uint2 zero = make_uint2(0, 0);
for (int col = lid; col < NX; col += dim) {
int localIdx = min(FLUSHB2, counters[col]);
uint32_t grp = row * NX + col;
for (int j = localIdx; j % TMPPERLL4; j++)
tmp[col][j] = zero;
for (int i = 0; i < localIdx; i += TMPPERLL4) {
int cnt = min((int) atomicAdd(dstIdx + grp, TMPPERLL4), (int) (maxOut - TMPPERLL4));
destination[((uint64_t) grp * maxOut + cnt) / TMPPERLL4] = *(ulonglong4*) (&tmp[col][i]);
}
}
}
__device__ __forceinline__ void Increase2bCounter(uint32_t* ecounters, const int bucket) {
int word = bucket >> 5;
unsigned char bit = bucket & 0x1F;
uint32_t mask = 1 << bit;
uint32_t old = atomicOr(ecounters + word, mask) & mask;
if (old)
atomicOr(ecounters + word + NZ / 32, mask);
}
__device__ __forceinline__ bool Read2bCounter(uint32_t* ecounters, const int bucket) {
int word = bucket >> 5;
unsigned char bit = bucket & 0x1F;
return (ecounters[word + NZ / 32] >> bit) & 1;
}
template <int NP, int maxIn, int maxOut>
__global__ void Round(const int round,
const uint2* __restrict__ src,
uint2* __restrict__ dst,
const uint32_t* __restrict__ srcIdx,
uint32_t* __restrict__ dstIdx) {
const int group = blockIdx.x;
const int dim = blockDim.x;
const int lid = threadIdx.x;
const int COUNTERWORDS = NZ / 16; // 16 2-bit counters per 32-bit word
__shared__ uint32_t ecounters[COUNTERWORDS];
for (int i = lid; i < COUNTERWORDS; i += dim)
ecounters[i] = 0;
__syncthreads();
for (int i = 0; i < NP; i++, src += NX2 * maxIn, srcIdx += NX2) {
const int edgesInBucket = min(srcIdx[group], maxIn);
const int loops = (edgesInBucket + dim - 1) / dim;
for (int loop = 0; loop < loops; loop++) {
const int lindex = loop * dim + lid;
if (lindex < edgesInBucket) {
const int index = maxIn * group + lindex;
uint2 edge = ldg(&src[index]);
if (null(edge))
continue;
uint32_t node = endpoint(edge, round & 1);
Increase2bCounter(ecounters, node & ZMASK);
}
}
}
__syncthreads();
src -= NP * NX2 * maxIn;
srcIdx -= NP * NX2;
for (int i = 0; i < NP; i++, src += NX2 * maxIn, srcIdx += NX2) {
const int edgesInBucket = min(srcIdx[group], maxIn);
const int loops = (edgesInBucket + dim - 1) / dim;
for (int loop = 0; loop < loops; loop++) {
const int lindex = loop * dim + lid;
if (lindex < edgesInBucket) {
const int index = maxIn * group + lindex;
uint2 edge = ldg(&src[index]);
if (null(edge))
continue;
uint32_t node0 = endpoint(edge, round & 1);
if (Read2bCounter(ecounters, node0 & ZMASK)) {
uint32_t node1 = endpoint(edge, (round & 1) ^ 1);
const int bucket = node1 >> ZBITS;
const int bktIdx = min(atomicAdd(dstIdx + bucket, 1), maxOut - 1);
dst[bucket * maxOut + bktIdx] = (round & 1) ? make_uint2(node1, node0) : make_uint2(node0, node1);
}
}
}
}
}
template <int maxIn>
__global__ void Tail(const uint2* source, uint2* destination, const uint32_t* srcIdx, uint32_t* dstIdx) {
const int lid = threadIdx.x;
const int group = blockIdx.x;
const int dim = blockDim.x;
int myEdges = srcIdx[group];
__shared__ int destIdx;
if (lid == 0)
destIdx = atomicAdd(dstIdx, myEdges);
__syncthreads();
for (int i = lid; i < myEdges; i += dim)
destination[destIdx + lid] = source[group * maxIn + lid];
}
__global__ void Recovery(const siphash_keys& sipkeys, ulonglong4* buffer, int* indexes, int proofsize) {
const int gid = blockDim.x * blockIdx.x + threadIdx.x;
const int lid = threadIdx.x;
const int nthreads = blockDim.x * gridDim.x;
const int loops = NEDGES / nthreads;
__shared__ uint32_t nonces[MAXCYCLELEN];
uint64_t buf[EDGE_BLOCK_SIZE];
if (lid < proofsize) {
nonces[lid] = 0;
}
__syncthreads();
for (int blk = 0; blk < loops; blk += EDGE_BLOCK_SIZE) {
uint32_t nonce0 = gid * loops + blk;
const uint64_t last = dipblock(sipkeys, nonce0, buf);
for (int i = 0; i < EDGE_BLOCK_SIZE; i++) {
uint64_t edge = buf[i] ^ last;
uint32_t u = edge & EDGEMASK;
uint32_t v = (edge >> 32) & EDGEMASK;
for (int p = 0; p < proofsize; p++) { // YO
if (recoveredges[p].x == u && recoveredges[p].y == v) {
nonces[p] = nonce0 + i;
}
}
}
}
__syncthreads();
if (lid < proofsize) {
if (nonces[lid] > 0) {
indexes[lid] = nonces[lid];
}
}
}
trimparams::trimparams() {
ntrims = 176;
genA.blocks = 4096;
genA.tpb = 256;
genB.blocks = NX2;
genB.tpb = 128;
trim.blocks = NX2;
trim.tpb = 512;
tail.blocks = NX2;
tail.tpb = 1024;
recover.blocks = 1024;
recover.tpb = 1024;
}
GEdgeTrimmer::GEdgeTrimmer(const trimparams _tp, int cyclelen)
: cycle_len(cyclelen), tp(_tp), indexesSize(NX * NY * sizeof(uint32_t)) /*, indexesE(new uint32_t*[1 + NB])*/ {
checkCudaErrors_V(hipMalloc((void**) &dt, sizeof(GEdgeTrimmer)));
checkCudaErrors_V(hipMalloc((void**) &uvnodes, cycle_len * 2 * sizeof(uint32_t)));
checkCudaErrors_V(hipMalloc((void**) &dipkeys, sizeof(siphash_keys)));
for (int i = 0; i < 1 + NB; i++) {
checkCudaErrors_V(hipMalloc((void**) &indexesE[i], indexesSize));
}
sizeA = ROW_EDGES_A * NX * sizeof(uint2);
sizeB = ROW_EDGES_B * NX * sizeof(uint2);
const size_t bufferSize = sizeA + sizeB / NB;
assert(bufferSize >= sizeB + sizeB / NB / 2); // ensure enough space for Round 1
checkCudaErrors_V(hipMalloc((void**) &bufferA, bufferSize));
bufferAB = bufferA + sizeB / NB;
bufferB = bufferA + bufferSize - sizeB;
assert(bufferA + sizeA == bufferB + sizeB * (NB - 1) / NB); // ensure alignment of overlap
hipMemcpy(dt, this, sizeof(GEdgeTrimmer), hipMemcpyHostToDevice);
initsuccess = true;
}
uint64_t GEdgeTrimmer::globalbytes() const {
return (sizeA + sizeB / NB) + (1 + NB) * indexesSize + sizeof(siphash_keys) + cycle_len * 2 * sizeof(uint32_t) +
sizeof(GEdgeTrimmer);
}
GEdgeTrimmer::~GEdgeTrimmer() {
checkCudaErrors_V(hipFree(bufferA));
for (int i = 0; i < 1 + NB; i++) {
checkCudaErrors_V(hipFree(indexesE[i]));
}
/*delete[] indexesE;*/
checkCudaErrors_V(hipFree(dipkeys));
checkCudaErrors_V(hipFree(uvnodes));
checkCudaErrors_V(hipFree(dt));
hipDeviceReset();
}
uint32_t GEdgeTrimmer::trim() {
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
hipMemcpy(dipkeys, &sipkeys, sizeof(sipkeys), hipMemcpyHostToDevice);
hipDeviceSynchronize();
float durationA, durationB;
hipEventRecord(start, NULL);
hipMemset(indexesE[1], 0, indexesSize);
hipLaunchKernelGGL(( SeedA<EDGES_A>), dim3(tp.genA.blocks), dim3(tp.genA.tpb), 0, 0, *dipkeys, (ulonglong4*) bufferAB, indexesE[1]);
checkCudaErrors(hipDeviceSynchronize());
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&durationA, start, stop);
if (abort.load()) {
return false;
}
hipEventRecord(start, NULL);
hipMemset(indexesE[0], 0, indexesSize);
uint32_t qA = sizeA / NA;
uint32_t qE = NX2 / NA;
for (uint32_t i = 0; i < NA; i++) {
hipLaunchKernelGGL(( SeedB<EDGES_A>), dim3(tp.genB.blocks / NA), dim3(tp.genB.tpb), 0, 0,
(uint2*) (bufferAB + i * qA), (ulonglong4*) (bufferA + i * qA), indexesE[1] + i * qE, indexesE[0] + i * qE);
if (abort.load()) {
return false;
}
}
checkCudaErrors(hipDeviceSynchronize());
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&durationB, start, stop);
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
spdlog::trace("Seeding completed in {} + {} ms", durationA, durationB);
if (abort.load()) {
return false;
}
for (uint32_t i = 0; i < NB; i++)
hipMemset(indexesE[1 + i], 0, indexesSize);
qA = sizeA / NB;
const size_t qB = sizeB / NB;
qE = NX2 / NB;
for (uint32_t i = NB; i--;) {
hipLaunchKernelGGL(( Round<1, EDGES_A, EDGES_B / NB>)
, dim3(tp.trim.blocks / NB), dim3(tp.trim.tpb), 0, 0, 0, (uint2*) (bufferA + i * qA), (uint2*) (bufferB + i * qB),
indexesE[0] + i * qE, indexesE[1 + i]); // to .632
if (abort.load()) {
return false;
}
}
hipMemset(indexesE[0], 0, indexesSize);
hipLaunchKernelGGL(( Round<NB, EDGES_B / NB, EDGES_B / 2>), dim3(tp.trim.blocks), dim3(tp.trim.tpb), 0, 0, 1, (const uint2*) bufferB, (uint2*) bufferA,
indexesE[1],
indexesE[0]); // to .296
if (abort.load()) {
return false;
}
hipMemset(indexesE[1], 0, indexesSize);
hipLaunchKernelGGL(( Round<1, EDGES_B / 2, EDGES_A / 4>), dim3(tp.trim.blocks), dim3(tp.trim.tpb), 0, 0, 2, (const uint2*) bufferA, (uint2*) bufferB,
indexesE[0],
indexesE[1]); // to .176
if (abort.load()) {
return false;
}
hipMemset(indexesE[0], 0, indexesSize);
hipLaunchKernelGGL(( Round<1, EDGES_A / 4, EDGES_B / 4>), dim3(tp.trim.blocks), dim3(tp.trim.tpb), 0, 0, 3, (const uint2*) bufferB, (uint2*) bufferA,
indexesE[1],
indexesE[0]); // to .117
if (abort.load()) {
return false;
}
hipDeviceSynchronize();
for (int round = 4; round < tp.ntrims; round += 2) {
hipMemset(indexesE[1], 0, indexesSize);
hipLaunchKernelGGL(( Round<1, EDGES_B / 4, EDGES_B / 4>), dim3(tp.trim.blocks), dim3(tp.trim.tpb), 0, 0, round, (const uint2*) bufferA,
(uint2*) bufferB, indexesE[0], indexesE[1]);
if (abort.load()) {
return false;
}
hipMemset(indexesE[0], 0, indexesSize);
hipLaunchKernelGGL(( Round<1, EDGES_B / 4, EDGES_B / 4>), dim3(tp.trim.blocks), dim3(tp.trim.tpb), 0, 0, round + 1, (const uint2*) bufferB,
(uint2*) bufferA, indexesE[1], indexesE[0]);
if (abort.load()) {
return false;
}
}
hipMemset(indexesE[1], 0, indexesSize);
hipDeviceSynchronize();
hipLaunchKernelGGL(( Tail<EDGES_B / 4>)
, dim3(tp.tail.blocks), dim3(tp.tail.tpb), 0, 0, (const uint2*) bufferA, (uint2*) bufferB, indexesE[0], indexesE[1]);
hipMemcpy(&nedges, indexesE[1], sizeof(uint32_t), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
return nedges;
}
SolverCtx::SolverCtx(const trimparams& tp, int cyclelen)
: trimmer(tp, 0), cg(MAXEDGES, MAXEDGES, MAXSOLS, IDXSHIFT, cyclelen) {
cycle_len = cyclelen;
edges = new uint2[MAXEDGES];
soledges = new uint2[cycle_len];
}
int SolverCtx::findcycles(uint2* edges, uint32_t nedges) {
cg.reset();
for (uint32_t i = 0; i < nedges; i++) {
cg.add_compress_edge(edges[i].x, edges[i].y);
}
for (uint32_t s = 0; s < cg.nsols; s++) {
for (uint32_t j = 0; j < cycle_len; j++) {
soledges[j] = edges[cg.sols[s][j]];
}
sols.resize(sols.size() + cycle_len);
hipMemcpyToSymbol(recoveredges, soledges, sizeof(uint2) * cycle_len);
hipMemset(trimmer.indexesE[1], 0, trimmer.indexesSize);
hipLaunchKernelGGL(( Recovery), dim3(trimmer.tp.recover.blocks), dim3(trimmer.tp.recover.tpb), 0, 0, *trimmer.dipkeys, (ulonglong4*) trimmer.bufferA,
(int*) trimmer.indexesE[1], cycle_len);
hipMemcpy(&sols[sols.size() - cycle_len], trimmer.indexesE[1], cycle_len * sizeof(uint32_t),
hipMemcpyDeviceToHost);
checkCudaErrors(hipDeviceSynchronize());
qsort(&sols[sols.size() - cycle_len], cycle_len, sizeof(uint32_t), cg.nonce_cmp);
}
return 0;
}
int SolverCtx::solve() {
trimmer.abort = false;
uint32_t nedges = trimmer.trim();
if (!nedges)
return 0;
if (nedges > MAXEDGES) {
spdlog::trace("OOPS; losing {} edges beyond MAXEDGES={}", nedges - MAXEDGES, MAXEDGES);
nedges = MAXEDGES;
}
hipMemcpy(edges, trimmer.bufferB, sizeof(uint2[nedges]), hipMemcpyDeviceToHost);
findcycles(edges, nedges);
spdlog::trace("findcycles edges {}", nedges);
return sols.size() / cycle_len;
}
void FillDefaultGPUParams(SolverParams& params) {
trimparams tp;
params.device = 0;
params.ntrims = tp.ntrims;
params.genablocks = min(tp.genA.blocks, NEDGES / EDGE_BLOCK_SIZE / tp.genA.tpb);
params.genatpb = tp.genA.tpb;
params.genbtpb = tp.genB.tpb;
params.trimtpb = tp.trim.tpb;
params.tailtpb = tp.tail.tpb;
params.recoverblocks = min(tp.recover.blocks, NEDGES / EDGE_BLOCK_SIZE / tp.recover.tpb);
params.recovertpb = tp.recover.tpb;
params.cpuload = false;
}
SolverCtx* CreateSolverCtx(SolverParams& params, int cyclelen) {
trimparams tp;
tp.ntrims = params.ntrims;
tp.genA.blocks = params.genablocks;
tp.genA.tpb = params.genatpb;
tp.genB.tpb = params.genbtpb;
tp.trim.tpb = params.trimtpb;
tp.tail.tpb = params.tailtpb;
tp.recover.blocks = params.recoverblocks;
tp.recover.tpb = params.recovertpb;
hipDeviceProp_t prop;
checkCudaErrors_N(hipGetDeviceProperties(&prop, params.device));
assert(tp.genA.tpb <= prop.maxThreadsPerBlock);
assert(tp.genB.tpb <= prop.maxThreadsPerBlock);
assert(tp.trim.tpb <= prop.maxThreadsPerBlock);
// assert(tp.tailblocks <= prop.threadDims[0]);
assert(tp.tail.tpb <= prop.maxThreadsPerBlock);
assert(tp.recover.tpb <= prop.maxThreadsPerBlock);
assert(tp.genA.blocks * tp.genA.tpb * EDGE_BLOCK_SIZE <= NEDGES); // check THREADS_HAVE_EDGES
assert(tp.recover.blocks * tp.recover.tpb * EDGE_BLOCK_SIZE <= NEDGES); // check THREADS_HAVE_EDGES
assert(tp.genA.tpb / NX <= FLUSHA); // check ROWS_LIMIT_LOSSES
assert(tp.genB.tpb / NX <= FLUSHB); // check COLS_LIMIT_LOSSES
checkCudaErrors_N(hipSetDevice(params.device));
if (!params.cpuload) {
checkCudaErrors_N(hipSetDeviceFlags(hipDeviceScheduleBlockingSync));
}
return new SolverCtx(tp, cyclelen);
}
| 05e5140098fbd9bf59fd64b8b59937eb505ab6a4.cu | // Cuckaroo Cycle, a memory-hard proof-of-work by John Tromp
// Copyright (c) 2018-2019 Jiri Vadura (photon) and John Tromp
// This software is covered by the FAIR MINING license
#include "mean.cuh"
int gpuAssert(cudaError_t code, char* file, int line, bool abort) {
int device_id;
cudaGetDevice(&device_id);
if (code != cudaSuccess) {
cudaDeviceReset();
if (abort) {
spdlog::error("Device {} GPUassert({}): {} {} {}", device_id, code, cudaGetErrorString(code), file, line);
exit(code);
}
}
return code;
}
#ifndef MAXSOLS
#define MAXSOLS 4
#endif
#ifndef XBITS
#define XBITS 6
#endif
const uint32_t NX = 1 << XBITS;
const uint32_t NX2 = NX * NX;
const uint32_t XMASK = NX - 1;
const uint32_t YBITS = XBITS;
const uint32_t NY = 1 << YBITS;
const uint32_t YZBITS = EDGEBITS - XBITS;
const uint32_t ZBITS = YZBITS - YBITS;
const uint32_t NZ = 1 << ZBITS;
const uint32_t ZMASK = NZ - 1;
#ifndef IDXSHIFT
// number of bits of compression of surviving edge endpoints
// reduces space used in cycle finding, but too high a value
// results in NODE OVERFLOW warnings and fake cycles
#define IDXSHIFT 12
#endif
const uint32_t MAXEDGES = NEDGES >> IDXSHIFT;
#ifndef NEPS_A
#define NEPS_A 133
#endif
#ifndef NEPS_B
#define NEPS_B 88
#endif
#define NEPS 128
// Number of Parts of BufferB, all but one of which will overlap BufferA
#ifndef NB
#define NB 2
#endif
#ifndef NA
#define NA ((NB * NEPS_A + NEPS_B - 1) / NEPS_B)
#endif
const uint32_t EDGES_A = NZ * NEPS_A / NEPS;
const uint32_t EDGES_B = NZ * NEPS_B / NEPS;
const uint32_t ROW_EDGES_A = EDGES_A * NY;
const uint32_t ROW_EDGES_B = EDGES_B * NY;
__constant__ uint2 recoveredges[MAXCYCLELEN];
__constant__ uint2 e0 = {0, 0};
__device__ uint64_t dipblock(const siphash_keys& keys, const word_t edge, uint64_t* buf) {
diphash_state<> shs(keys);
word_t edge0 = edge & ~EDGE_BLOCK_MASK;
uint32_t i;
for (i = 0; i < EDGE_BLOCK_MASK; i++) {
shs.hash24(edge0 + i);
buf[i] = shs.xor_lanes();
}
shs.hash24(edge0 + i);
buf[i] = 0;
return shs.xor_lanes();
}
__device__ uint32_t endpoint(uint2 nodes, int uorv) {
return uorv ? nodes.y : nodes.x;
}
#ifndef FLUSHA // should perhaps be in trimparams and passed as template
// parameter
#define FLUSHA 16
#endif
template <int maxOut>
__global__ void SeedA(const siphash_keys& sipkeys, ulonglong4* __restrict__ buffer, uint32_t* __restrict__ indexes) {
const int group = blockIdx.x;
const int dim = blockDim.x;
const int lid = threadIdx.x;
const int gid = group * dim + lid;
const int nthreads = gridDim.x * dim;
const int FLUSHA2 = 2 * FLUSHA;
__shared__ uint2 tmp[NX][FLUSHA2]; // needs to be ulonglong4 aligned
const int TMPPERLL4 = sizeof(ulonglong4) / sizeof(uint2);
__shared__ int counters[NX];
uint64_t buf[EDGE_BLOCK_SIZE];
for (int row = lid; row < NX; row += dim)
counters[row] = 0;
__syncthreads();
const int col = group % NX;
const int loops = NEDGES / nthreads; // assuming THREADS_HAVE_EDGES checked
for (int blk = 0; blk < loops; blk += EDGE_BLOCK_SIZE) {
uint32_t nonce0 = gid * loops + blk;
const uint64_t last = dipblock(sipkeys, nonce0, buf);
for (uint32_t e = 0; e < EDGE_BLOCK_SIZE; e++) {
uint64_t edge = buf[e] ^ last;
uint32_t node0 = edge & EDGEMASK;
uint32_t node1 = (edge >> 32) & EDGEMASK;
int row = node0 >> YZBITS;
int counter = min((int) atomicAdd(counters + row, 1),
(int) (FLUSHA2 - 1)); // assuming ROWS_LIMIT_LOSSES checked
tmp[row][counter] = make_uint2(node0, node1);
__syncthreads();
if (counter == FLUSHA - 1) {
int localIdx = min(FLUSHA2, counters[row]);
int newCount = localIdx % FLUSHA;
int nflush = localIdx - newCount;
uint32_t grp = row * NX + col;
int cnt = min((int) atomicAdd(indexes + grp, nflush), (int) (maxOut - nflush));
for (int i = 0; i < nflush; i += TMPPERLL4)
buffer[((uint64_t) grp * maxOut + cnt + i) / TMPPERLL4] = *(ulonglong4*) (&tmp[row][i]);
for (int t = 0; t < newCount; t++) {
tmp[row][t] = tmp[row][t + nflush];
}
counters[row] = newCount;
}
__syncthreads();
}
}
uint2 zero = make_uint2(0, 0);
for (int row = lid; row < NX; row += dim) {
int localIdx = min(FLUSHA2, counters[row]);
uint32_t grp = row * NX + col;
for (int j = localIdx; j % TMPPERLL4; j++)
tmp[row][j] = zero;
for (int i = 0; i < localIdx; i += TMPPERLL4) {
int cnt = min((int) atomicAdd(indexes + grp, TMPPERLL4), (int) (maxOut - TMPPERLL4));
buffer[((uint64_t) grp * maxOut + cnt) / TMPPERLL4] = *(ulonglong4*) (&tmp[row][i]);
}
}
}
template <typename Edge>
__device__ bool null(Edge e);
__device__ bool null(uint32_t nonce) {
return nonce == 0;
}
__device__ bool null(uint2 nodes) {
return nodes.x == 0 && nodes.y == 0;
}
#ifndef FLUSHB
#define FLUSHB 8
#endif
template <typename T>
__device__ __forceinline__ T ldg(const T* ptr) {
#if __CUDA_ARCH__ >= 350
return __ldg(ptr);
#else
return *ptr;
#endif
}
template <int maxOut>
__global__ void SeedB(const uint2* __restrict__ source,
ulonglong4* __restrict__ destination,
const uint32_t* __restrict__ srcIdx,
uint32_t* __restrict__ dstIdx) {
const int group = blockIdx.x;
const int dim = blockDim.x;
const int lid = threadIdx.x;
const int FLUSHB2 = 2 * FLUSHB;
__shared__ uint2 tmp[NX][FLUSHB2];
const int TMPPERLL4 = sizeof(ulonglong4) / sizeof(uint2);
__shared__ int counters[NX];
for (int col = lid; col < NX; col += dim)
counters[col] = 0;
__syncthreads();
const int row = group / NX;
const int bucketEdges = min((int) srcIdx[group], (int) maxOut);
const int loops = (bucketEdges + dim - 1) / dim;
for (int loop = 0; loop < loops; loop++) {
int col;
int counter = 0;
const int edgeIndex = loop * dim + lid;
if (edgeIndex < bucketEdges) {
const int index = group * maxOut + edgeIndex;
uint2 edge = ldg(&source[index]);
if (!null(edge)) {
uint32_t node1 = edge.x;
col = (node1 >> ZBITS) & XMASK;
counter = min((int) atomicAdd(counters + col, 1),
(int) (FLUSHB2 - 1)); // assuming COLS_LIMIT_LOSSES checked
tmp[col][counter] = edge;
}
}
__syncthreads();
if (counter == FLUSHB - 1) {
int localIdx = min(FLUSHB2, counters[col]);
int newCount = localIdx % FLUSHB;
int nflush = localIdx - newCount;
uint32_t grp = row * NX + col;
int cnt = min((int) atomicAdd(dstIdx + grp, nflush), (int) (maxOut - nflush));
for (int i = 0; i < nflush; i += TMPPERLL4)
destination[((uint64_t) grp * maxOut + cnt + i) / TMPPERLL4] = *(ulonglong4*) (&tmp[col][i]);
for (int t = 0; t < newCount; t++) {
tmp[col][t] = tmp[col][t + nflush];
}
counters[col] = newCount;
}
__syncthreads();
}
uint2 zero = make_uint2(0, 0);
for (int col = lid; col < NX; col += dim) {
int localIdx = min(FLUSHB2, counters[col]);
uint32_t grp = row * NX + col;
for (int j = localIdx; j % TMPPERLL4; j++)
tmp[col][j] = zero;
for (int i = 0; i < localIdx; i += TMPPERLL4) {
int cnt = min((int) atomicAdd(dstIdx + grp, TMPPERLL4), (int) (maxOut - TMPPERLL4));
destination[((uint64_t) grp * maxOut + cnt) / TMPPERLL4] = *(ulonglong4*) (&tmp[col][i]);
}
}
}
__device__ __forceinline__ void Increase2bCounter(uint32_t* ecounters, const int bucket) {
int word = bucket >> 5;
unsigned char bit = bucket & 0x1F;
uint32_t mask = 1 << bit;
uint32_t old = atomicOr(ecounters + word, mask) & mask;
if (old)
atomicOr(ecounters + word + NZ / 32, mask);
}
__device__ __forceinline__ bool Read2bCounter(uint32_t* ecounters, const int bucket) {
int word = bucket >> 5;
unsigned char bit = bucket & 0x1F;
return (ecounters[word + NZ / 32] >> bit) & 1;
}
template <int NP, int maxIn, int maxOut>
__global__ void Round(const int round,
const uint2* __restrict__ src,
uint2* __restrict__ dst,
const uint32_t* __restrict__ srcIdx,
uint32_t* __restrict__ dstIdx) {
const int group = blockIdx.x;
const int dim = blockDim.x;
const int lid = threadIdx.x;
const int COUNTERWORDS = NZ / 16; // 16 2-bit counters per 32-bit word
__shared__ uint32_t ecounters[COUNTERWORDS];
for (int i = lid; i < COUNTERWORDS; i += dim)
ecounters[i] = 0;
__syncthreads();
for (int i = 0; i < NP; i++, src += NX2 * maxIn, srcIdx += NX2) {
const int edgesInBucket = min(srcIdx[group], maxIn);
const int loops = (edgesInBucket + dim - 1) / dim;
for (int loop = 0; loop < loops; loop++) {
const int lindex = loop * dim + lid;
if (lindex < edgesInBucket) {
const int index = maxIn * group + lindex;
uint2 edge = ldg(&src[index]);
if (null(edge))
continue;
uint32_t node = endpoint(edge, round & 1);
Increase2bCounter(ecounters, node & ZMASK);
}
}
}
__syncthreads();
src -= NP * NX2 * maxIn;
srcIdx -= NP * NX2;
for (int i = 0; i < NP; i++, src += NX2 * maxIn, srcIdx += NX2) {
const int edgesInBucket = min(srcIdx[group], maxIn);
const int loops = (edgesInBucket + dim - 1) / dim;
for (int loop = 0; loop < loops; loop++) {
const int lindex = loop * dim + lid;
if (lindex < edgesInBucket) {
const int index = maxIn * group + lindex;
uint2 edge = ldg(&src[index]);
if (null(edge))
continue;
uint32_t node0 = endpoint(edge, round & 1);
if (Read2bCounter(ecounters, node0 & ZMASK)) {
uint32_t node1 = endpoint(edge, (round & 1) ^ 1);
const int bucket = node1 >> ZBITS;
const int bktIdx = min(atomicAdd(dstIdx + bucket, 1), maxOut - 1);
dst[bucket * maxOut + bktIdx] = (round & 1) ? make_uint2(node1, node0) : make_uint2(node0, node1);
}
}
}
}
}
template <int maxIn>
__global__ void Tail(const uint2* source, uint2* destination, const uint32_t* srcIdx, uint32_t* dstIdx) {
const int lid = threadIdx.x;
const int group = blockIdx.x;
const int dim = blockDim.x;
int myEdges = srcIdx[group];
__shared__ int destIdx;
if (lid == 0)
destIdx = atomicAdd(dstIdx, myEdges);
__syncthreads();
for (int i = lid; i < myEdges; i += dim)
destination[destIdx + lid] = source[group * maxIn + lid];
}
__global__ void Recovery(const siphash_keys& sipkeys, ulonglong4* buffer, int* indexes, int proofsize) {
const int gid = blockDim.x * blockIdx.x + threadIdx.x;
const int lid = threadIdx.x;
const int nthreads = blockDim.x * gridDim.x;
const int loops = NEDGES / nthreads;
__shared__ uint32_t nonces[MAXCYCLELEN];
uint64_t buf[EDGE_BLOCK_SIZE];
if (lid < proofsize) {
nonces[lid] = 0;
}
__syncthreads();
for (int blk = 0; blk < loops; blk += EDGE_BLOCK_SIZE) {
uint32_t nonce0 = gid * loops + blk;
const uint64_t last = dipblock(sipkeys, nonce0, buf);
for (int i = 0; i < EDGE_BLOCK_SIZE; i++) {
uint64_t edge = buf[i] ^ last;
uint32_t u = edge & EDGEMASK;
uint32_t v = (edge >> 32) & EDGEMASK;
for (int p = 0; p < proofsize; p++) { // YO
if (recoveredges[p].x == u && recoveredges[p].y == v) {
nonces[p] = nonce0 + i;
}
}
}
}
__syncthreads();
if (lid < proofsize) {
if (nonces[lid] > 0) {
indexes[lid] = nonces[lid];
}
}
}
trimparams::trimparams() {
ntrims = 176;
genA.blocks = 4096;
genA.tpb = 256;
genB.blocks = NX2;
genB.tpb = 128;
trim.blocks = NX2;
trim.tpb = 512;
tail.blocks = NX2;
tail.tpb = 1024;
recover.blocks = 1024;
recover.tpb = 1024;
}
GEdgeTrimmer::GEdgeTrimmer(const trimparams _tp, int cyclelen)
: cycle_len(cyclelen), tp(_tp), indexesSize(NX * NY * sizeof(uint32_t)) /*, indexesE(new uint32_t*[1 + NB])*/ {
checkCudaErrors_V(cudaMalloc((void**) &dt, sizeof(GEdgeTrimmer)));
checkCudaErrors_V(cudaMalloc((void**) &uvnodes, cycle_len * 2 * sizeof(uint32_t)));
checkCudaErrors_V(cudaMalloc((void**) &dipkeys, sizeof(siphash_keys)));
for (int i = 0; i < 1 + NB; i++) {
checkCudaErrors_V(cudaMalloc((void**) &indexesE[i], indexesSize));
}
sizeA = ROW_EDGES_A * NX * sizeof(uint2);
sizeB = ROW_EDGES_B * NX * sizeof(uint2);
const size_t bufferSize = sizeA + sizeB / NB;
assert(bufferSize >= sizeB + sizeB / NB / 2); // ensure enough space for Round 1
checkCudaErrors_V(cudaMalloc((void**) &bufferA, bufferSize));
bufferAB = bufferA + sizeB / NB;
bufferB = bufferA + bufferSize - sizeB;
assert(bufferA + sizeA == bufferB + sizeB * (NB - 1) / NB); // ensure alignment of overlap
cudaMemcpy(dt, this, sizeof(GEdgeTrimmer), cudaMemcpyHostToDevice);
initsuccess = true;
}
uint64_t GEdgeTrimmer::globalbytes() const {
return (sizeA + sizeB / NB) + (1 + NB) * indexesSize + sizeof(siphash_keys) + cycle_len * 2 * sizeof(uint32_t) +
sizeof(GEdgeTrimmer);
}
GEdgeTrimmer::~GEdgeTrimmer() {
checkCudaErrors_V(cudaFree(bufferA));
for (int i = 0; i < 1 + NB; i++) {
checkCudaErrors_V(cudaFree(indexesE[i]));
}
/*delete[] indexesE;*/
checkCudaErrors_V(cudaFree(dipkeys));
checkCudaErrors_V(cudaFree(uvnodes));
checkCudaErrors_V(cudaFree(dt));
cudaDeviceReset();
}
uint32_t GEdgeTrimmer::trim() {
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
cudaMemcpy(dipkeys, &sipkeys, sizeof(sipkeys), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
float durationA, durationB;
cudaEventRecord(start, NULL);
cudaMemset(indexesE[1], 0, indexesSize);
SeedA<EDGES_A><<<tp.genA.blocks, tp.genA.tpb>>>(*dipkeys, (ulonglong4*) bufferAB, indexesE[1]);
checkCudaErrors(cudaDeviceSynchronize());
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&durationA, start, stop);
if (abort.load()) {
return false;
}
cudaEventRecord(start, NULL);
cudaMemset(indexesE[0], 0, indexesSize);
uint32_t qA = sizeA / NA;
uint32_t qE = NX2 / NA;
for (uint32_t i = 0; i < NA; i++) {
SeedB<EDGES_A><<<tp.genB.blocks / NA, tp.genB.tpb>>>(
(uint2*) (bufferAB + i * qA), (ulonglong4*) (bufferA + i * qA), indexesE[1] + i * qE, indexesE[0] + i * qE);
if (abort.load()) {
return false;
}
}
checkCudaErrors(cudaDeviceSynchronize());
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&durationB, start, stop);
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
spdlog::trace("Seeding completed in {} + {} ms", durationA, durationB);
if (abort.load()) {
return false;
}
for (uint32_t i = 0; i < NB; i++)
cudaMemset(indexesE[1 + i], 0, indexesSize);
qA = sizeA / NB;
const size_t qB = sizeB / NB;
qE = NX2 / NB;
for (uint32_t i = NB; i--;) {
Round<1, EDGES_A, EDGES_B / NB>
<<<tp.trim.blocks / NB, tp.trim.tpb>>>(0, (uint2*) (bufferA + i * qA), (uint2*) (bufferB + i * qB),
indexesE[0] + i * qE, indexesE[1 + i]); // to .632
if (abort.load()) {
return false;
}
}
cudaMemset(indexesE[0], 0, indexesSize);
Round<NB, EDGES_B / NB, EDGES_B / 2><<<tp.trim.blocks, tp.trim.tpb>>>(1, (const uint2*) bufferB, (uint2*) bufferA,
indexesE[1],
indexesE[0]); // to .296
if (abort.load()) {
return false;
}
cudaMemset(indexesE[1], 0, indexesSize);
Round<1, EDGES_B / 2, EDGES_A / 4><<<tp.trim.blocks, tp.trim.tpb>>>(2, (const uint2*) bufferA, (uint2*) bufferB,
indexesE[0],
indexesE[1]); // to .176
if (abort.load()) {
return false;
}
cudaMemset(indexesE[0], 0, indexesSize);
Round<1, EDGES_A / 4, EDGES_B / 4><<<tp.trim.blocks, tp.trim.tpb>>>(3, (const uint2*) bufferB, (uint2*) bufferA,
indexesE[1],
indexesE[0]); // to .117
if (abort.load()) {
return false;
}
cudaDeviceSynchronize();
for (int round = 4; round < tp.ntrims; round += 2) {
cudaMemset(indexesE[1], 0, indexesSize);
Round<1, EDGES_B / 4, EDGES_B / 4><<<tp.trim.blocks, tp.trim.tpb>>>(round, (const uint2*) bufferA,
(uint2*) bufferB, indexesE[0], indexesE[1]);
if (abort.load()) {
return false;
}
cudaMemset(indexesE[0], 0, indexesSize);
Round<1, EDGES_B / 4, EDGES_B / 4><<<tp.trim.blocks, tp.trim.tpb>>>(round + 1, (const uint2*) bufferB,
(uint2*) bufferA, indexesE[1], indexesE[0]);
if (abort.load()) {
return false;
}
}
cudaMemset(indexesE[1], 0, indexesSize);
cudaDeviceSynchronize();
Tail<EDGES_B / 4>
<<<tp.tail.blocks, tp.tail.tpb>>>((const uint2*) bufferA, (uint2*) bufferB, indexesE[0], indexesE[1]);
cudaMemcpy(&nedges, indexesE[1], sizeof(uint32_t), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
return nedges;
}
SolverCtx::SolverCtx(const trimparams& tp, int cyclelen)
: trimmer(tp, 0), cg(MAXEDGES, MAXEDGES, MAXSOLS, IDXSHIFT, cyclelen) {
cycle_len = cyclelen;
edges = new uint2[MAXEDGES];
soledges = new uint2[cycle_len];
}
int SolverCtx::findcycles(uint2* edges, uint32_t nedges) {
cg.reset();
for (uint32_t i = 0; i < nedges; i++) {
cg.add_compress_edge(edges[i].x, edges[i].y);
}
for (uint32_t s = 0; s < cg.nsols; s++) {
for (uint32_t j = 0; j < cycle_len; j++) {
soledges[j] = edges[cg.sols[s][j]];
}
sols.resize(sols.size() + cycle_len);
cudaMemcpyToSymbol(recoveredges, soledges, sizeof(uint2) * cycle_len);
cudaMemset(trimmer.indexesE[1], 0, trimmer.indexesSize);
Recovery<<<trimmer.tp.recover.blocks, trimmer.tp.recover.tpb>>>(*trimmer.dipkeys, (ulonglong4*) trimmer.bufferA,
(int*) trimmer.indexesE[1], cycle_len);
cudaMemcpy(&sols[sols.size() - cycle_len], trimmer.indexesE[1], cycle_len * sizeof(uint32_t),
cudaMemcpyDeviceToHost);
checkCudaErrors(cudaDeviceSynchronize());
qsort(&sols[sols.size() - cycle_len], cycle_len, sizeof(uint32_t), cg.nonce_cmp);
}
return 0;
}
int SolverCtx::solve() {
trimmer.abort = false;
uint32_t nedges = trimmer.trim();
if (!nedges)
return 0;
if (nedges > MAXEDGES) {
spdlog::trace("OOPS; losing {} edges beyond MAXEDGES={}", nedges - MAXEDGES, MAXEDGES);
nedges = MAXEDGES;
}
cudaMemcpy(edges, trimmer.bufferB, sizeof(uint2[nedges]), cudaMemcpyDeviceToHost);
findcycles(edges, nedges);
spdlog::trace("findcycles edges {}", nedges);
return sols.size() / cycle_len;
}
void FillDefaultGPUParams(SolverParams& params) {
trimparams tp;
params.device = 0;
params.ntrims = tp.ntrims;
params.genablocks = min(tp.genA.blocks, NEDGES / EDGE_BLOCK_SIZE / tp.genA.tpb);
params.genatpb = tp.genA.tpb;
params.genbtpb = tp.genB.tpb;
params.trimtpb = tp.trim.tpb;
params.tailtpb = tp.tail.tpb;
params.recoverblocks = min(tp.recover.blocks, NEDGES / EDGE_BLOCK_SIZE / tp.recover.tpb);
params.recovertpb = tp.recover.tpb;
params.cpuload = false;
}
SolverCtx* CreateSolverCtx(SolverParams& params, int cyclelen) {
trimparams tp;
tp.ntrims = params.ntrims;
tp.genA.blocks = params.genablocks;
tp.genA.tpb = params.genatpb;
tp.genB.tpb = params.genbtpb;
tp.trim.tpb = params.trimtpb;
tp.tail.tpb = params.tailtpb;
tp.recover.blocks = params.recoverblocks;
tp.recover.tpb = params.recovertpb;
cudaDeviceProp prop;
checkCudaErrors_N(cudaGetDeviceProperties(&prop, params.device));
assert(tp.genA.tpb <= prop.maxThreadsPerBlock);
assert(tp.genB.tpb <= prop.maxThreadsPerBlock);
assert(tp.trim.tpb <= prop.maxThreadsPerBlock);
// assert(tp.tailblocks <= prop.threadDims[0]);
assert(tp.tail.tpb <= prop.maxThreadsPerBlock);
assert(tp.recover.tpb <= prop.maxThreadsPerBlock);
assert(tp.genA.blocks * tp.genA.tpb * EDGE_BLOCK_SIZE <= NEDGES); // check THREADS_HAVE_EDGES
assert(tp.recover.blocks * tp.recover.tpb * EDGE_BLOCK_SIZE <= NEDGES); // check THREADS_HAVE_EDGES
assert(tp.genA.tpb / NX <= FLUSHA); // check ROWS_LIMIT_LOSSES
assert(tp.genB.tpb / NX <= FLUSHB); // check COLS_LIMIT_LOSSES
checkCudaErrors_N(cudaSetDevice(params.device));
if (!params.cpuload) {
checkCudaErrors_N(cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync));
}
return new SolverCtx(tp, cyclelen);
}
|
a2769be7a61dfcae8a4493ef332344394bcd636b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "MaskByNaN.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *inputImage = NULL;
hipMalloc(&inputImage, XSIZE*YSIZE);
float *mask = NULL;
hipMalloc(&mask, XSIZE*YSIZE);
float *outputImage = NULL;
hipMalloc(&outputImage, XSIZE*YSIZE);
int count = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
MaskByNaN), dim3(gridBlock),dim3(threadBlock), 0, 0, inputImage,mask,outputImage,count);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
MaskByNaN), dim3(gridBlock),dim3(threadBlock), 0, 0, inputImage,mask,outputImage,count);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
MaskByNaN), dim3(gridBlock),dim3(threadBlock), 0, 0, inputImage,mask,outputImage,count);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | a2769be7a61dfcae8a4493ef332344394bcd636b.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "MaskByNaN.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *inputImage = NULL;
cudaMalloc(&inputImage, XSIZE*YSIZE);
float *mask = NULL;
cudaMalloc(&mask, XSIZE*YSIZE);
float *outputImage = NULL;
cudaMalloc(&outputImage, XSIZE*YSIZE);
int count = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
MaskByNaN<<<gridBlock,threadBlock>>>(inputImage,mask,outputImage,count);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
MaskByNaN<<<gridBlock,threadBlock>>>(inputImage,mask,outputImage,count);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
MaskByNaN<<<gridBlock,threadBlock>>>(inputImage,mask,outputImage,count);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
776cd0d88794fcb5d9193b56799e556135594ca2.hip | // !!! This is a file automatically generated by hipify!!!
// RUN: %run_test hipify "%s" "%t" %cuda_args
/*
Copyright (c) 2015-2016 Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#define CHECK(cmd) \
{\
hipError_t error = cmd;\
if (error != hipSuccess) { \
fprintf(stderr, "error: '%s'(%d) at %s:%d\n", hipGetErrorString(error), error,__FILE__, __LINE__); \
exit(EXIT_FAILURE);\
}\
}
/*
* Square each element in the array A and write to array C.
*/
template <typename T>
__global__ void
vector_square(T *C_d, const T *A_d, size_t N)
{
size_t offset = (blockIdx.x * blockDim.x + threadIdx.x);
size_t stride = blockDim.x * gridDim.x;
for (size_t i=offset; i<N; i+=stride) {
C_d[i] = A_d[i] * A_d[i];
}
}
int main(int argc, char *argv[])
{
float *A_d, *C_d;
float *A_h, *C_h;
size_t N = 1000000;
size_t Nbytes = N * sizeof(float);
// CHECK: hipDeviceProp_t props;
hipDeviceProp_t props;
// CHECK: CHECK(hipGetDeviceProperties(&props, 0/*deviceID*/));
CHECK(hipGetDeviceProperties(&props, 0/*deviceID*/));
printf ("info: running on device %s\n", props.name);
printf ("info: allocate host mem (%6.2f MB)\n", 2*Nbytes/1024.0/1024.0);
A_h = (float*)malloc(Nbytes);
// CHECK: CHECK(A_h == 0 ? hipErrorMemoryAllocation : hipSuccess );
CHECK(A_h == 0 ? hipErrorMemoryAllocation : hipSuccess );
C_h = (float*)malloc(Nbytes);
// CHECK: CHECK(C_h == 0 ? hipErrorMemoryAllocation : hipSuccess );
CHECK(C_h == 0 ? hipErrorMemoryAllocation : hipSuccess );
// Fill with Phi + i
for (size_t i=0; i<N; i++)
{
A_h[i] = 1.618f + i;
}
printf ("info: allocate device mem (%6.2f MB)\n", 2*Nbytes/1024.0/1024.0);
// CHECK: CHECK(hipMalloc(&A_d, Nbytes));
// CHECK: CHECK(hipMalloc(&C_d, Nbytes));
CHECK(hipMalloc(&A_d, Nbytes));
CHECK(hipMalloc(&C_d, Nbytes));
printf ("info: copy Host2Device\n");
// CHECK: CHECK ( hipMemcpy(A_d, A_h, Nbytes, hipMemcpyHostToDevice));
CHECK ( hipMemcpy(A_d, A_h, Nbytes, hipMemcpyHostToDevice));
const unsigned blocks = 512;
const unsigned threadsPerBlock = 256;
printf ("info: launch 'vector_square' kernel\n");
// CHECK: hipLaunchKernelGGL(vector_square, dim3(blocks), dim3(threadsPerBlock), 0, 0, C_d, A_d, N);
hipLaunchKernelGGL(( vector_square) , dim3(blocks), dim3(threadsPerBlock), 0, 0, C_d, A_d, N);
printf ("info: copy Device2Host\n");
// CHECK: CHECK ( hipMemcpy(C_h, C_d, Nbytes, hipMemcpyDeviceToHost));
CHECK ( hipMemcpy(C_h, C_d, Nbytes, hipMemcpyDeviceToHost));
printf ("info: check result\n");
for (size_t i=0; i<N; i++) {
if (C_h[i] != A_h[i] * A_h[i]) {
// CHECK: CHECK(hipErrorUnknown);
CHECK(hipErrorUnknown);
}
}
printf ("PASSED!\n");
}
| 776cd0d88794fcb5d9193b56799e556135594ca2.cu | // RUN: %run_test hipify "%s" "%t" %cuda_args
/*
Copyright (c) 2015-2016 Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include <stdio.h>
#include <cuda_runtime.h>
#define CHECK(cmd) \
{\
cudaError_t error = cmd;\
if (error != cudaSuccess) { \
fprintf(stderr, "error: '%s'(%d) at %s:%d\n", cudaGetErrorString(error), error,__FILE__, __LINE__); \
exit(EXIT_FAILURE);\
}\
}
/*
* Square each element in the array A and write to array C.
*/
template <typename T>
__global__ void
vector_square(T *C_d, const T *A_d, size_t N)
{
size_t offset = (blockIdx.x * blockDim.x + threadIdx.x);
size_t stride = blockDim.x * gridDim.x;
for (size_t i=offset; i<N; i+=stride) {
C_d[i] = A_d[i] * A_d[i];
}
}
int main(int argc, char *argv[])
{
float *A_d, *C_d;
float *A_h, *C_h;
size_t N = 1000000;
size_t Nbytes = N * sizeof(float);
// CHECK: hipDeviceProp_t props;
cudaDeviceProp props;
// CHECK: CHECK(hipGetDeviceProperties(&props, 0/*deviceID*/));
CHECK(cudaGetDeviceProperties(&props, 0/*deviceID*/));
printf ("info: running on device %s\n", props.name);
printf ("info: allocate host mem (%6.2f MB)\n", 2*Nbytes/1024.0/1024.0);
A_h = (float*)malloc(Nbytes);
// CHECK: CHECK(A_h == 0 ? hipErrorMemoryAllocation : hipSuccess );
CHECK(A_h == 0 ? cudaErrorMemoryAllocation : cudaSuccess );
C_h = (float*)malloc(Nbytes);
// CHECK: CHECK(C_h == 0 ? hipErrorMemoryAllocation : hipSuccess );
CHECK(C_h == 0 ? cudaErrorMemoryAllocation : cudaSuccess );
// Fill with Phi + i
for (size_t i=0; i<N; i++)
{
A_h[i] = 1.618f + i;
}
printf ("info: allocate device mem (%6.2f MB)\n", 2*Nbytes/1024.0/1024.0);
// CHECK: CHECK(hipMalloc(&A_d, Nbytes));
// CHECK: CHECK(hipMalloc(&C_d, Nbytes));
CHECK(cudaMalloc(&A_d, Nbytes));
CHECK(cudaMalloc(&C_d, Nbytes));
printf ("info: copy Host2Device\n");
// CHECK: CHECK ( hipMemcpy(A_d, A_h, Nbytes, hipMemcpyHostToDevice));
CHECK ( cudaMemcpy(A_d, A_h, Nbytes, cudaMemcpyHostToDevice));
const unsigned blocks = 512;
const unsigned threadsPerBlock = 256;
printf ("info: launch 'vector_square' kernel\n");
// CHECK: hipLaunchKernelGGL(vector_square, dim3(blocks), dim3(threadsPerBlock), 0, 0, C_d, A_d, N);
vector_square <<<blocks, threadsPerBlock>>> (C_d, A_d, N);
printf ("info: copy Device2Host\n");
// CHECK: CHECK ( hipMemcpy(C_h, C_d, Nbytes, hipMemcpyDeviceToHost));
CHECK ( cudaMemcpy(C_h, C_d, Nbytes, cudaMemcpyDeviceToHost));
printf ("info: check result\n");
for (size_t i=0; i<N; i++) {
if (C_h[i] != A_h[i] * A_h[i]) {
// CHECK: CHECK(hipErrorUnknown);
CHECK(cudaErrorUnknown);
}
}
printf ("PASSED!\n");
}
|
61b4e66ee96516ff3295dc5ff7c073363db6706c.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
#include "hip/hip_runtime.h"
#include <pqxx/pqxx>
#include <string>
#include <vector>
using namespace pqxx;
using namespace std;
result giveResult(connection &C, string sql)
{
nontransaction N(C);
result R(N.exec( sql ));
return R;
}
int main()
{
string sql;
vector<string> Schema;
int v1;
int v2;
cout << "Hello World!" << endl;
try{
connection C("dbname=name user=user password=password hostaddr=ipaddress port=5432");
if (C.is_open())
{
cout << "Database " << C.dbname() << " opened" << endl;
}
else
{
cout << "Cannot Open Database " << endl;
return 1;
}
sql = "SELECT * "\
" FROM firing_times_test0 LIMIT 1;";
//nontransaction N(C);
//result R( N.exec( sql ));
result R = giveResult(C, sql);
/*
for (result::const_iterator c = R.begin(); c != R.end(); ++c)
{
std::cout << (*c).size() << std::endl;
c[0].to(v1);
c[1].to(v2);
cout << typeid(v1).name() << ":" << v1 << "," << typeid(v2).name() << ":" << v2 << endl;
}
*/
result::const_iterator c = R.begin();
cout << c[0] << "," << c[1] << endl;
C.disconnect();
}
catch(const exception &e)
{
cerr << e.what() << endl;
return 1;
}
return 0;
}
| 61b4e66ee96516ff3295dc5ff7c073363db6706c.cu | #include <iostream>
#include <cuda.h>
#include "device_launch_parameters.h"
#include "cuda_runtime.h"
#include <pqxx/pqxx>
#include <string>
#include <vector>
using namespace pqxx;
using namespace std;
result giveResult(connection &C, string sql)
{
nontransaction N(C);
result R(N.exec( sql ));
return R;
}
int main()
{
string sql;
vector<string> Schema;
int v1;
int v2;
cout << "Hello World!" << endl;
try{
connection C("dbname=name user=user password=password hostaddr=ipaddress port=5432");
if (C.is_open())
{
cout << "Database " << C.dbname() << " opened" << endl;
}
else
{
cout << "Cannot Open Database " << endl;
return 1;
}
sql = "SELECT * "\
" FROM firing_times_test0 LIMIT 1;";
//nontransaction N(C);
//result R( N.exec( sql ));
result R = giveResult(C, sql);
/*
for (result::const_iterator c = R.begin(); c != R.end(); ++c)
{
std::cout << (*c).size() << std::endl;
c[0].to(v1);
c[1].to(v2);
cout << typeid(v1).name() << ":" << v1 << "," << typeid(v2).name() << ":" << v2 << endl;
}
*/
result::const_iterator c = R.begin();
cout << c[0] << "," << c[1] << endl;
C.disconnect();
}
catch(const exception &e)
{
cerr << e.what() << endl;
return 1;
}
return 0;
}
|
d869e35b88db8f4e9ea2688383494baee549a40d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#define WIDTH 1024
#define THREADSPERBLOCK 32
#define BLOCKSPERGRID (WIDTH + THREADSPERBLOCK - 1) / THREADSPERBLOCK
int M[WIDTH][WIDTH] = {0};
int N[WIDTH][WIDTH] = {0};
int P[WIDTH][WIDTH] = {0};
int MxN[WIDTH][WIDTH] = {0};
__global__ void mat_mul(int *Md, int *Nd, int *Pd);
__global__ void transposeNaive(int *Nd, int *Td);
int main(int argc, char *argv[])
{
float elapsedTime1;
float elapsedTime2;
for (int i = 0; i < WIDTH; ++i)
{
for (int j = 0; j < WIDTH; ++j)
{
M[i][j] = (int)(rand() % 255 + 1);
N[i][j] = (int)(rand() % 255 + 1);
}
}
struct timeval starttime, endtime;
gettimeofday(&starttime, NULL);
for (int i = 0; i < WIDTH; ++i)
{
for (int j = 0; j < WIDTH; ++j)
{
for (int k = 0; k < WIDTH; ++k)
{
MxN[i][j] += M[i][k] * N[k][j];
}
}
}
gettimeofday(&endtime, NULL);
double executime;
executime = (endtime.tv_sec - starttime.tv_sec) * 1000.0;
executime += (endtime.tv_usec - starttime.tv_usec) / 1000.0;
printf("CPU time: %13lf msec\n", executime);
// Original
size_t size = WIDTH * WIDTH * sizeof(int);
int *Md, *Nd, *Pd, *Td;
hipMalloc((void **)&Md, size);
hipMemcpy(Md, M, size, hipMemcpyHostToDevice);
hipMalloc((void **)&Nd, size);
hipMemcpy(Nd, N, size, hipMemcpyHostToDevice);
hipMalloc((void **)&Td, size);
hipMalloc((void **)&Pd, size);
dim3 dimGrid(BLOCKSPERGRID, BLOCKSPERGRID);
dim3 dimBlock(THREADSPERBLOCK, THREADSPERBLOCK);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( transposeNaive), dim3(dimGrid), dim3(dimBlock), 0, 0, Nd, Td);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime1, start, stop);
printf("GPU transpose time: %13f msec\n", elapsedTime1);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( mat_mul), dim3(dimGrid), dim3(dimBlock), 0, 0, Md, Td, Pd);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime2, start, stop);
printf("GPU time: %13f msec\n", elapsedTime2);
printf("GPU total time: %13f msec\n", elapsedTime1 + elapsedTime2);
hipError_t cuda_err = hipGetLastError();
if (hipSuccess != cuda_err)
{
printf("before kernel call: error = %s\n", hipGetErrorString(cuda_err));
exit(1);
}
hipEventDestroy(start);
hipEventDestroy(stop);
hipMemcpy(P, Pd, size, hipMemcpyDeviceToHost);
int pass = 1;
for (int i = 0; i < WIDTH; ++i)
{
for (int j = 0; j < WIDTH; ++j)
{
if (MxN[i][j] != P[i][j])
{
printf("MxN[%d][%d] = %d P[%d][%d] = %d\n", i, j, MxN[i][j], i, j, P[i][j]);
pass = 0;
break;
}
}
}
printf("Test %s\n", (pass) ? "PASSED" : "FAILED");
hipFree(Md);
hipFree(Nd);
hipFree(Td);
hipFree(Pd);
return 0;
}
__global__ void transposeNaive(int *Nd, int *Td)
{
int x, y;
int tx = threadIdx.x;
int ty = blockDim.x * threadIdx.y;
int bx = blockDim.x * blockDim.y * blockIdx.x;
int by = gridDim.x * (blockDim.x * blockDim.y) * blockIdx.y;
int tid = bx + by + tx + ty;
while (tid < WIDTH * WIDTH)
{
x = tid % WIDTH;
y = tid / WIDTH;
Td[x * WIDTH + y] = Nd[y * WIDTH + x];
tid = tid + gridDim.x * gridDim.y * blockDim.x * blockDim.y;
}
}
__global__ void mat_mul(int *Md, int *Nd, int *Pd)
{
int x, y;
int Pvalue;
int tx = threadIdx.x;
int ty = blockDim.x * threadIdx.y;
int bx = blockDim.x * blockDim.y * blockIdx.x;
int by = gridDim.x * (blockDim.x * blockDim.y) * blockIdx.y;
int tid = bx + by + tx + ty;
while (tid < WIDTH * WIDTH)
{
x = tid % WIDTH;
y = tid / WIDTH;
Pvalue = 0;
for (int k = 0; k < WIDTH; ++k)
{
int Melement = *(Md + y * WIDTH + k);
int Nelement = *(Nd + x * WIDTH + k);
Pvalue += Melement * Nelement;
}
*(Pd + y * WIDTH + x) = Pvalue;
tid = tid + gridDim.x * gridDim.y * blockDim.x * blockDim.y;
}
}
| d869e35b88db8f4e9ea2688383494baee549a40d.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#define WIDTH 1024
#define THREADSPERBLOCK 32
#define BLOCKSPERGRID (WIDTH + THREADSPERBLOCK - 1) / THREADSPERBLOCK
int M[WIDTH][WIDTH] = {0};
int N[WIDTH][WIDTH] = {0};
int P[WIDTH][WIDTH] = {0};
int MxN[WIDTH][WIDTH] = {0};
__global__ void mat_mul(int *Md, int *Nd, int *Pd);
__global__ void transposeNaive(int *Nd, int *Td);
int main(int argc, char *argv[])
{
float elapsedTime1;
float elapsedTime2;
for (int i = 0; i < WIDTH; ++i)
{
for (int j = 0; j < WIDTH; ++j)
{
M[i][j] = (int)(rand() % 255 + 1);
N[i][j] = (int)(rand() % 255 + 1);
}
}
struct timeval starttime, endtime;
gettimeofday(&starttime, NULL);
for (int i = 0; i < WIDTH; ++i)
{
for (int j = 0; j < WIDTH; ++j)
{
for (int k = 0; k < WIDTH; ++k)
{
MxN[i][j] += M[i][k] * N[k][j];
}
}
}
gettimeofday(&endtime, NULL);
double executime;
executime = (endtime.tv_sec - starttime.tv_sec) * 1000.0;
executime += (endtime.tv_usec - starttime.tv_usec) / 1000.0;
printf("CPU time: %13lf msec\n", executime);
// Original
size_t size = WIDTH * WIDTH * sizeof(int);
int *Md, *Nd, *Pd, *Td;
cudaMalloc((void **)&Md, size);
cudaMemcpy(Md, M, size, cudaMemcpyHostToDevice);
cudaMalloc((void **)&Nd, size);
cudaMemcpy(Nd, N, size, cudaMemcpyHostToDevice);
cudaMalloc((void **)&Td, size);
cudaMalloc((void **)&Pd, size);
dim3 dimGrid(BLOCKSPERGRID, BLOCKSPERGRID);
dim3 dimBlock(THREADSPERBLOCK, THREADSPERBLOCK);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
transposeNaive<<<dimGrid, dimBlock>>>(Nd, Td);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime1, start, stop);
printf("GPU transpose time: %13f msec\n", elapsedTime1);
cudaEventRecord(start, 0);
mat_mul<<<dimGrid, dimBlock>>>(Md, Td, Pd);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime2, start, stop);
printf("GPU time: %13f msec\n", elapsedTime2);
printf("GPU total time: %13f msec\n", elapsedTime1 + elapsedTime2);
cudaError_t cuda_err = cudaGetLastError();
if (cudaSuccess != cuda_err)
{
printf("before kernel call: error = %s\n", cudaGetErrorString(cuda_err));
exit(1);
}
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost);
int pass = 1;
for (int i = 0; i < WIDTH; ++i)
{
for (int j = 0; j < WIDTH; ++j)
{
if (MxN[i][j] != P[i][j])
{
printf("MxN[%d][%d] = %d P[%d][%d] = %d\n", i, j, MxN[i][j], i, j, P[i][j]);
pass = 0;
break;
}
}
}
printf("Test %s\n", (pass) ? "PASSED" : "FAILED");
cudaFree(Md);
cudaFree(Nd);
cudaFree(Td);
cudaFree(Pd);
return 0;
}
__global__ void transposeNaive(int *Nd, int *Td)
{
int x, y;
int tx = threadIdx.x;
int ty = blockDim.x * threadIdx.y;
int bx = blockDim.x * blockDim.y * blockIdx.x;
int by = gridDim.x * (blockDim.x * blockDim.y) * blockIdx.y;
int tid = bx + by + tx + ty;
while (tid < WIDTH * WIDTH)
{
x = tid % WIDTH;
y = tid / WIDTH;
Td[x * WIDTH + y] = Nd[y * WIDTH + x];
tid = tid + gridDim.x * gridDim.y * blockDim.x * blockDim.y;
}
}
__global__ void mat_mul(int *Md, int *Nd, int *Pd)
{
int x, y;
int Pvalue;
int tx = threadIdx.x;
int ty = blockDim.x * threadIdx.y;
int bx = blockDim.x * blockDim.y * blockIdx.x;
int by = gridDim.x * (blockDim.x * blockDim.y) * blockIdx.y;
int tid = bx + by + tx + ty;
while (tid < WIDTH * WIDTH)
{
x = tid % WIDTH;
y = tid / WIDTH;
Pvalue = 0;
for (int k = 0; k < WIDTH; ++k)
{
int Melement = *(Md + y * WIDTH + k);
int Nelement = *(Nd + x * WIDTH + k);
Pvalue += Melement * Nelement;
}
*(Pd + y * WIDTH + x) = Pvalue;
tid = tid + gridDim.x * gridDim.y * blockDim.x * blockDim.y;
}
}
|
e22609c3ffd23fbeb31c06ee6fa1830471a11957.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include "srad.h"
// includes, project
#ifdef __CUDA_CC__
#include "hip/hip_runtime.h"
#endif
// includes, kernels
#include "srad_kernel.hip"
void random_matrix(float *I, int rows, int cols);
void runTest( int argc, char** argv);
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <rows> <cols> <y1> <y2> <x1> <x2> <lamda> <no. of iter>\n", argv[0]);
fprintf(stderr, "\t<rows> - number of rows\n");
fprintf(stderr, "\t<cols> - number of cols\n");
fprintf(stderr, "\t<y1> - y1 value of the speckle\n");
fprintf(stderr, "\t<y2> - y2 value of the speckle\n");
fprintf(stderr, "\t<x1> - x1 value of the speckle\n");
fprintf(stderr, "\t<x2> - x2 value of the speckle\n");
fprintf(stderr, "\t<lamda> - lambda (0,1)\n");
fprintf(stderr, "\t<no. of iter> - number of iterations\n");
exit(1);
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE);
runTest( argc, argv);
return EXIT_SUCCESS;
}
void
runTest( int argc, char** argv)
{
int rows, cols, size_I, size_R, niter = 10, iter;
float *I, *J, lambda, q0sqr, sum, sum2, tmp, meanROI,varROI ;
#ifdef CPU
float Jc, G2, L, num, den, qsqr;
int *iN,*iS,*jE,*jW, k;
float *dN,*dS,*dW,*dE;
float cN,cS,cW,cE,D;
#endif
#ifdef GPU
float *J_cuda;
float *C_cuda;
float *E_C, *W_C, *N_C, *S_C;
#endif
unsigned int r1, r2, c1, c2;
float *c;
if (argc == 9)
{
rows = atoi(argv[1]); //number of rows in the domain
cols = atoi(argv[2]); //number of cols in the domain
if ((rows%16!=0) || (cols%16!=0)){
fprintf(stderr, "rows and cols must be multiples of 16\n");
exit(1);
}
r1 = atoi(argv[3]); //y1 position of the speckle
r2 = atoi(argv[4]); //y2 position of the speckle
c1 = atoi(argv[5]); //x1 position of the speckle
c2 = atoi(argv[6]); //x2 position of the speckle
lambda = atof(argv[7]); //Lambda value
niter = atoi(argv[8]); //number of iterations
}
else{
usage(argc, argv);
}
size_I = cols * rows;
size_R = (r2-r1+1)*(c2-c1+1);
I = (float *)malloc( size_I * sizeof(float) );
J = (float *)malloc( size_I * sizeof(float) );
c = (float *)malloc(sizeof(float)* size_I) ;
#ifdef CPU
iN = (int *)malloc(sizeof(unsigned int*) * rows) ;
iS = (int *)malloc(sizeof(unsigned int*) * rows) ;
jW = (int *)malloc(sizeof(unsigned int*) * cols) ;
jE = (int *)malloc(sizeof(unsigned int*) * cols) ;
dN = (float *)malloc(sizeof(float)* size_I) ;
dS = (float *)malloc(sizeof(float)* size_I) ;
dW = (float *)malloc(sizeof(float)* size_I) ;
dE = (float *)malloc(sizeof(float)* size_I) ;
for (int i=0; i< rows; i++) {
iN[i] = i-1;
iS[i] = i+1;
}
for (int j=0; j< cols; j++) {
jW[j] = j-1;
jE[j] = j+1;
}
iN[0] = 0;
iS[rows-1] = rows-1;
jW[0] = 0;
jE[cols-1] = cols-1;
#endif
#ifdef GPU
//Allocate device memory
hipMalloc((void**)& J_cuda, sizeof(float)* size_I);
hipMalloc((void**)& C_cuda, sizeof(float)* size_I);
hipMalloc((void**)& E_C, sizeof(float)* size_I);
hipMalloc((void**)& W_C, sizeof(float)* size_I);
hipMalloc((void**)& S_C, sizeof(float)* size_I);
hipMalloc((void**)& N_C, sizeof(float)* size_I);
#endif
printf("Randomizing the input matrix\n");
//Generate a random matrix
random_matrix(I, rows, cols);
for (int k = 0; k < size_I; k++ ) {
J[k] = (float)exp(I[k]) ;
}
printf("Start the SRAD main loop\n");
for (iter=0; iter< niter; iter++){
sum=0; sum2=0;
for (int i=r1; i<=r2; i++) {
for (int j=c1; j<=c2; j++) {
tmp = J[i * cols + j];
sum += tmp ;
sum2 += tmp*tmp;
}
}
meanROI = sum / size_R;
varROI = (sum2 / size_R) - meanROI*meanROI;
q0sqr = varROI / (meanROI*meanROI);
#ifdef CPU
for (int i = 0 ; i < rows ; i++) {
for (int j = 0; j < cols; j++) {
k = i * cols + j;
Jc = J[k];
// directional derivates
dN[k] = J[iN[i] * cols + j] - Jc;
dS[k] = J[iS[i] * cols + j] - Jc;
dW[k] = J[i * cols + jW[j]] - Jc;
dE[k] = J[i * cols + jE[j]] - Jc;
G2 = (dN[k]*dN[k] + dS[k]*dS[k]
+ dW[k]*dW[k] + dE[k]*dE[k]) / (Jc*Jc);
L = (dN[k] + dS[k] + dW[k] + dE[k]) / Jc;
num = (0.5*G2) - ((1.0/16.0)*(L*L)) ;
den = 1 + (.25*L);
qsqr = num/(den*den);
// diffusion coefficent (equ 33)
den = (qsqr-q0sqr) / (q0sqr * (1+q0sqr)) ;
c[k] = 1.0 / (1.0+den) ;
// saturate diffusion coefficent
if (c[k] < 0) {c[k] = 0;}
else if (c[k] > 1) {c[k] = 1;}
}
}
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
// current index
k = i * cols + j;
// diffusion coefficent
cN = c[k];
cS = c[iS[i] * cols + j];
cW = c[k];
cE = c[i * cols + jE[j]];
// divergence (equ 58)
D = cN * dN[k] + cS * dS[k] + cW * dW[k] + cE * dE[k];
// image update (equ 61)
J[k] = J[k] + 0.25*lambda*D;
}
}
#endif // CPU
#ifdef GPU
//Currently the input size must be divided by 16 - the block size
int block_x = cols/BLOCK_SIZE ;
int block_y = rows/BLOCK_SIZE ;
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(block_x , block_y);
//Copy data from main memory to device memory
hipMemcpy(J_cuda, J, sizeof(float) * size_I, hipMemcpyHostToDevice);
//Run kernels
hipLaunchKernelGGL(srad_cuda_1, dim3(dimGrid), dim3(dimBlock), 0, 0, E_C, W_C, N_C, S_C, J_cuda, C_cuda, cols, rows, q0sqr);
hipLaunchKernelGGL(srad_cuda_2, dim3(dimGrid), dim3(dimBlock), 0, 0, E_C, W_C, N_C, S_C, J_cuda, C_cuda, cols, rows, lambda, q0sqr);
//Copy data from device memory to main memory
hipMemcpy(J, J_cuda, sizeof(float) * size_I, hipMemcpyDeviceToHost);
#endif
}
hipDeviceSynchronize();
#ifdef OUTPUT
//Printing output
printf("Printing Output:\n");
for( int i = 0 ; i < rows ; i++){
for ( int j = 0 ; j < cols ; j++){
printf("%.5f ", J[i * cols + j]);
}
printf("\n");
}
#endif
printf("Computation Done\n");
free(I);
free(J);
#ifdef CPU
free(iN); free(iS); free(jW); free(jE);
free(dN); free(dS); free(dW); free(dE);
#endif
#ifdef GPU
hipFree(C_cuda);
hipFree(J_cuda);
hipFree(E_C);
hipFree(W_C);
hipFree(N_C);
hipFree(S_C);
#endif
free(c);
}
void random_matrix(float *I, int rows, int cols){
srand(7);
for( int i = 0 ; i < rows ; i++){
for ( int j = 0 ; j < cols ; j++){
I[i * cols + j] = rand()/(float)RAND_MAX ;
}
}
}
| e22609c3ffd23fbeb31c06ee6fa1830471a11957.cu | #include "hip/hip_runtime.h"
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include "srad.h"
// includes, project
#ifdef __CUDA_CC__
#include "cuda.h"
#endif
// includes, kernels
#include "srad_kernel.cu"
void random_matrix(float *I, int rows, int cols);
void runTest( int argc, char** argv);
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <rows> <cols> <y1> <y2> <x1> <x2> <lamda> <no. of iter>\n", argv[0]);
fprintf(stderr, "\t<rows> - number of rows\n");
fprintf(stderr, "\t<cols> - number of cols\n");
fprintf(stderr, "\t<y1> - y1 value of the speckle\n");
fprintf(stderr, "\t<y2> - y2 value of the speckle\n");
fprintf(stderr, "\t<x1> - x1 value of the speckle\n");
fprintf(stderr, "\t<x2> - x2 value of the speckle\n");
fprintf(stderr, "\t<lamda> - lambda (0,1)\n");
fprintf(stderr, "\t<no. of iter> - number of iterations\n");
exit(1);
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE);
runTest( argc, argv);
return EXIT_SUCCESS;
}
void
runTest( int argc, char** argv)
{
int rows, cols, size_I, size_R, niter = 10, iter;
float *I, *J, lambda, q0sqr, sum, sum2, tmp, meanROI,varROI ;
#ifdef CPU
float Jc, G2, L, num, den, qsqr;
int *iN,*iS,*jE,*jW, k;
float *dN,*dS,*dW,*dE;
float cN,cS,cW,cE,D;
#endif
#ifdef GPU
float *J_cuda;
float *C_cuda;
float *E_C, *W_C, *N_C, *S_C;
#endif
unsigned int r1, r2, c1, c2;
float *c;
if (argc == 9)
{
rows = atoi(argv[1]); //number of rows in the domain
cols = atoi(argv[2]); //number of cols in the domain
if ((rows%16!=0) || (cols%16!=0)){
fprintf(stderr, "rows and cols must be multiples of 16\n");
exit(1);
}
r1 = atoi(argv[3]); //y1 position of the speckle
r2 = atoi(argv[4]); //y2 position of the speckle
c1 = atoi(argv[5]); //x1 position of the speckle
c2 = atoi(argv[6]); //x2 position of the speckle
lambda = atof(argv[7]); //Lambda value
niter = atoi(argv[8]); //number of iterations
}
else{
usage(argc, argv);
}
size_I = cols * rows;
size_R = (r2-r1+1)*(c2-c1+1);
I = (float *)malloc( size_I * sizeof(float) );
J = (float *)malloc( size_I * sizeof(float) );
c = (float *)malloc(sizeof(float)* size_I) ;
#ifdef CPU
iN = (int *)malloc(sizeof(unsigned int*) * rows) ;
iS = (int *)malloc(sizeof(unsigned int*) * rows) ;
jW = (int *)malloc(sizeof(unsigned int*) * cols) ;
jE = (int *)malloc(sizeof(unsigned int*) * cols) ;
dN = (float *)malloc(sizeof(float)* size_I) ;
dS = (float *)malloc(sizeof(float)* size_I) ;
dW = (float *)malloc(sizeof(float)* size_I) ;
dE = (float *)malloc(sizeof(float)* size_I) ;
for (int i=0; i< rows; i++) {
iN[i] = i-1;
iS[i] = i+1;
}
for (int j=0; j< cols; j++) {
jW[j] = j-1;
jE[j] = j+1;
}
iN[0] = 0;
iS[rows-1] = rows-1;
jW[0] = 0;
jE[cols-1] = cols-1;
#endif
#ifdef GPU
//Allocate device memory
hipMalloc((void**)& J_cuda, sizeof(float)* size_I);
hipMalloc((void**)& C_cuda, sizeof(float)* size_I);
hipMalloc((void**)& E_C, sizeof(float)* size_I);
hipMalloc((void**)& W_C, sizeof(float)* size_I);
hipMalloc((void**)& S_C, sizeof(float)* size_I);
hipMalloc((void**)& N_C, sizeof(float)* size_I);
#endif
printf("Randomizing the input matrix\n");
//Generate a random matrix
random_matrix(I, rows, cols);
for (int k = 0; k < size_I; k++ ) {
J[k] = (float)exp(I[k]) ;
}
printf("Start the SRAD main loop\n");
for (iter=0; iter< niter; iter++){
sum=0; sum2=0;
for (int i=r1; i<=r2; i++) {
for (int j=c1; j<=c2; j++) {
tmp = J[i * cols + j];
sum += tmp ;
sum2 += tmp*tmp;
}
}
meanROI = sum / size_R;
varROI = (sum2 / size_R) - meanROI*meanROI;
q0sqr = varROI / (meanROI*meanROI);
#ifdef CPU
for (int i = 0 ; i < rows ; i++) {
for (int j = 0; j < cols; j++) {
k = i * cols + j;
Jc = J[k];
// directional derivates
dN[k] = J[iN[i] * cols + j] - Jc;
dS[k] = J[iS[i] * cols + j] - Jc;
dW[k] = J[i * cols + jW[j]] - Jc;
dE[k] = J[i * cols + jE[j]] - Jc;
G2 = (dN[k]*dN[k] + dS[k]*dS[k]
+ dW[k]*dW[k] + dE[k]*dE[k]) / (Jc*Jc);
L = (dN[k] + dS[k] + dW[k] + dE[k]) / Jc;
num = (0.5*G2) - ((1.0/16.0)*(L*L)) ;
den = 1 + (.25*L);
qsqr = num/(den*den);
// diffusion coefficent (equ 33)
den = (qsqr-q0sqr) / (q0sqr * (1+q0sqr)) ;
c[k] = 1.0 / (1.0+den) ;
// saturate diffusion coefficent
if (c[k] < 0) {c[k] = 0;}
else if (c[k] > 1) {c[k] = 1;}
}
}
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
// current index
k = i * cols + j;
// diffusion coefficent
cN = c[k];
cS = c[iS[i] * cols + j];
cW = c[k];
cE = c[i * cols + jE[j]];
// divergence (equ 58)
D = cN * dN[k] + cS * dS[k] + cW * dW[k] + cE * dE[k];
// image update (equ 61)
J[k] = J[k] + 0.25*lambda*D;
}
}
#endif // CPU
#ifdef GPU
//Currently the input size must be divided by 16 - the block size
int block_x = cols/BLOCK_SIZE ;
int block_y = rows/BLOCK_SIZE ;
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(block_x , block_y);
//Copy data from main memory to device memory
hipMemcpy(J_cuda, J, sizeof(float) * size_I, hipMemcpyHostToDevice);
//Run kernels
hipLaunchKernelGGL(srad_cuda_1, dim3(dimGrid), dim3(dimBlock), 0, 0, E_C, W_C, N_C, S_C, J_cuda, C_cuda, cols, rows, q0sqr);
hipLaunchKernelGGL(srad_cuda_2, dim3(dimGrid), dim3(dimBlock), 0, 0, E_C, W_C, N_C, S_C, J_cuda, C_cuda, cols, rows, lambda, q0sqr);
//Copy data from device memory to main memory
hipMemcpy(J, J_cuda, sizeof(float) * size_I, hipMemcpyDeviceToHost);
#endif
}
hipDeviceSynchronize();
#ifdef OUTPUT
//Printing output
printf("Printing Output:\n");
for( int i = 0 ; i < rows ; i++){
for ( int j = 0 ; j < cols ; j++){
printf("%.5f ", J[i * cols + j]);
}
printf("\n");
}
#endif
printf("Computation Done\n");
free(I);
free(J);
#ifdef CPU
free(iN); free(iS); free(jW); free(jE);
free(dN); free(dS); free(dW); free(dE);
#endif
#ifdef GPU
hipFree(C_cuda);
hipFree(J_cuda);
hipFree(E_C);
hipFree(W_C);
hipFree(N_C);
hipFree(S_C);
#endif
free(c);
}
void random_matrix(float *I, int rows, int cols){
srand(7);
for( int i = 0 ; i < rows ; i++){
for ( int j = 0 ; j < cols ; j++){
I[i * cols + j] = rand()/(float)RAND_MAX ;
}
}
}
|
58dba1deaf6cd628f7bbbe7ae0cf9d76362b1165.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#include <string>
#include <limits>
#include <unistd.h>
#include <omp.h>
#include <cmath>
#include <chrono>
#define TIMEOUT_TIME 5
double Unext(double* u, int i, int j, double hxs, double hys, int linesize){
double hyd=1/(hxs*hxs);
double hxd=1/(hys*hys);
return ((u[(i+1)*linesize+j]+u[(i-1)*linesize+j])*hxd+(u[i*linesize+j+1]+u[i*linesize+j-1])*hyd) / (2*(hxd+hyd));
}
double Calc_local_eps(double* u,double* uprev, int bxs,int bys){
int i,j;
double maxe=0;
int linesize=bxs+2;
#pragma omp parallel for private(j) reduction (max: maxe)
for(i=1;i<bys+1;++i){
for(j=1;j<bxs+1;++j){
maxe = ::max(maxe,std::abs(u[i*linesize+j]-uprev[i*linesize+j]));
}
}
return maxe;
}
int main(int argc, char* argv[]) {
int pxs,pys;
int bxs,bys;
std::string out_str("out.txt");
int str_len;
double mineps,leps;
double lx,ly;
double ul,ur,uu, ud;
double uzero;
MPI_Init(&argc, &argv);
std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now();
int i,j, wrank, wnprocs;
MPI_Comm_size(MPI_COMM_WORLD, &wnprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
if(wrank==0){
std::cin>>pxs>>pys;
std::cin>>bxs>>bys;
std::cin>>out_str;
std::cin>>mineps;
std::cin>>lx>>ly;
std::cin>>ul>>ur>>uu>>ud;
std::cin>>uzero;
str_len=out_str.length();
}
MPI_Bcast(&pxs,1,MPI_INT,0,MPI_COMM_WORLD);
MPI_Bcast(&pys,1,MPI_INT,0,MPI_COMM_WORLD);
MPI_Bcast(&bxs,1,MPI_INT,0,MPI_COMM_WORLD);
MPI_Bcast(&bys,1,MPI_INT,0,MPI_COMM_WORLD);
MPI_Bcast(&str_len,1,MPI_INT,0,MPI_COMM_WORLD);
out_str.resize(str_len);
MPI_Bcast((void*)out_str.data(),str_len,MPI_CHAR,0,MPI_COMM_WORLD);
MPI_Bcast(&mineps,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
MPI_Bcast(&lx,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
MPI_Bcast(&ly,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
MPI_Bcast(&ul,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
MPI_Bcast(&ur,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
MPI_Bcast(&uu,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
MPI_Bcast(&ud,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
MPI_Bcast(&uzero,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
if(wrank==1){
fprintf(stderr,"A %d (%d):pxs:%d pys:%d bxs:%d bys:%d lx:%lf ly:%lf ul:%lf ur:%lf uu:%lf ud:%lf uzero:%lf mineps:%lf %s\n",wrank, wnprocs , pxs,pys,bxs,bys,lx,ly,ul,ur,uu,ud,uzero,mineps, out_str.c_str());
}
int n=(bxs+2)*(bys+2);
double hxs=lx/(bxs*pxs);
double hys=ly/(bys*pys);
int linesize=bxs+2;
int color = wrank<pys*pxs ? 0: MPI_UNDEFINED;
MPI_Comm my_comm;
MPI_Comm_split(MPI_COMM_WORLD, color, wrank, &my_comm);
if(color==0){
char *buf, *buf_p;
int bsize;
double* dataswap = (double*)malloc(sizeof(double) * n);
double* data = (double*)malloc(sizeof(double) * n);
int rank, nprocs;
MPI_Comm_size(my_comm, &nprocs);
MPI_Comm_rank(my_comm, &rank);
MPI_Datatype vertvect;
MPI_Type_vector(bys, 1, bxs+2, MPI_DOUBLE, &vertvect);
MPI_Type_commit(&vertvect);
//Init
int x=rank%pxs,y=rank/pxs;
int up,down,left,right;
right = (x+1== pxs)? -1 : y*pxs + (x + 1) % pxs;
left = (x-1==-1) ? -1 : y*pxs + (x - 1) % pxs;
up= (y-1==-1) ? -1 :((y-1)*pxs) + x;
down = (y+1==pys)? -1: ((y+1)*pxs) + x;
#pragma omp parallel for
for(i = 0; i < n; i++){
data[i] = uzero;//can break? cause int init outside?
}
//Init buf
{
int lrs, uds;
MPI_Pack_size(bys,MPI_DOUBLE,my_comm,&lrs);
MPI_Pack_size(bxs,MPI_DOUBLE,my_comm,&uds);
int bufsize = 4*MPI_BSEND_OVERHEAD + lrs + uds;
buf = (char *)malloc( bufsize );
MPI_Buffer_attach(buf, bufsize);
}
//Init margins
if(down==-1){
for(i = (bys+1)*(bxs+2); i < (bys+2)*(bxs+2); ++i){
data[i] = ud;
dataswap[i]=ud;
}
}
if(up==-1){
for(i = 0; i < bxs+2; ++i){
data[i] = uu;
dataswap[i]=uu;
}
}
if(left==-1){
for(i = 0; i < bys+2; ++i){
data[i*(bxs+2)] = ul;
dataswap[i*(bxs+2)] = ul;
}
}
if(right==-1){
for(i = 0; i < bys+2; ++i){
data[(bxs+1)+i*(bxs+2)] = ur;
dataswap[(bxs+1)+i*(bxs+2)] = ur;
}
}
//fprintf(stderr, "%d: %d %d %d %d \n", rank, left , right, up , down);
//main
if(rank==0)
fprintf(stderr, "pxs %d,pys %d,bxs %d,bys %d,mineps %lf,lx %lf,ly %lf,ul %lf,ur %lf, uu %lf , ud %lf , uzero %lf, out_str: %s \n", pxs,pys,bxs,bys,mineps,lx,ly,ul,ur,uu,ud,uzero,out_str.c_str());
int count=0;
fprintf(stderr, "%d %d %d %d\n",left, right, down, up);
while(count<100000){
MPI_Request req[4];
MPI_Status statuses[4];
//main
#pragma omp parallel for private(j)
for(i=1;i<bys+1;++i){
for(j=1;j<bxs+1;++j){
dataswap[i*linesize+j]=Unext(data,i,j,hxs,hys,linesize);
}
}
std::swap(data,dataswap);
count++;
//calc eps
leps=Calc_local_eps(data,dataswap,bxs,bys);
//fprintf(stderr, "%d,%d: after leps\n", rank,count);
//reduce eps
MPI_Allreduce(MPI_IN_PLACE,&leps,1,MPI_DOUBLE,MPI_MAX,my_comm);
if(rank==0){
//fprintf(stderr, "leps: %lf\n",leps);
}
if(leps<mineps){
break;
}
//fprintf(stderr, "%d,%d: after allred\n", rank,count);
//!done
//recv/send from up to down
int req_count=0;
if(left!=-1){
MPI_Bsend((void*)(data+(bxs+2)+1), 1, vertvect, left, 0, my_comm);
MPI_Irecv((void*)(data+(bxs+2)), 1, vertvect,left,0,my_comm,&req[req_count]);
++req_count;
}
if(right!=-1){
MPI_Bsend(data+(bxs+2)*2-2, 1, vertvect, right, 0, my_comm);
MPI_Irecv(data+(bxs+2)*2-1, 1, vertvect, right, 0, my_comm,&req[req_count]);
++req_count;
}
if(up!=-1){
MPI_Bsend(data+(bxs+2)+1, bxs, MPI_DOUBLE, up, 0, my_comm);
MPI_Irecv(data+1, bxs, MPI_DOUBLE, up, 0, my_comm,&req[req_count]);
++req_count;
}
if(down!=-1){
MPI_Bsend(data+(bys)*(bxs+2)+1, bxs, MPI_DOUBLE, down, 0, my_comm);
MPI_Irecv(data+(bys+1)*(bxs+2)+1, bxs, MPI_DOUBLE, down, 0, my_comm,&req[req_count]);
++req_count;
}
//fprintf(stderr, "%d,%d: wait\n", rank,count);
MPI_Waitall(req_count, req,statuses);
//fprintf(stderr, "%d,%d: barrier\n", rank,count);
//MPI_Barrier(my_comm);//not needed
//fprintf(stderr, "%d,%d: done\n", rank,count);
}
MPI_Buffer_detach( &buf_p, &bsize );
//Write
int n_size = 15;
char * buff_out = (char *) malloc(sizeof(char) * (bxs) * (bys)*n_size);
memset(buff_out, ' ', (bxs) * (bys) * n_size * sizeof(char));
fprintf(stderr, "count : %d\n",count);
#pragma omp parallel for private(i)
for(j = 0; j < bys; ++j) {
for(i = 0; i < bxs; ++i){
sprintf(buff_out + (j * bxs + i)*n_size, " %.6e ", data[(j+1) * (bxs+2) + (i+1)]);
}
if (x + 1 == pxs){
buff_out[ (j + 1) * bxs * n_size - 1] = '\n';
}
}
#pragma omp parallel for
for(i = 0; i < (bxs) * (bys) * n_size ; ++i){
if (buff_out[i] == '\0'){
buff_out[i] = ' ';
}
}
MPI_Barrier(my_comm);
std::cout << "Time difference = " << std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count() << "[ms](" <<rank<<")"<< std::endl;
MPI_File fp;
MPI_Datatype filetype,datatype;
MPI_Type_vector(bys, bxs * n_size, bxs * pxs * n_size, MPI_CHAR, &filetype);
MPI_Type_commit(&filetype);
MPI_Type_vector(bys, bxs*n_size, bxs*n_size, MPI_CHAR, &datatype);
MPI_Type_commit(&datatype);
MPI_File_delete(out_str.c_str(), MPI_INFO_NULL);
MPI_File_open(my_comm, out_str.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fp);
MPI_File_set_view(fp, (y * bys * bxs * pxs + x*bxs)*n_size*sizeof(char), MPI_CHAR, filetype, "native", MPI_INFO_NULL);
MPI_File_write_all(fp, buff_out, 1, datatype, MPI_STATUS_IGNORE);
MPI_File_close(&fp);
std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
if(rank==0){
std::cout << "Time difference = " << std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count() << "[ms]" << std::endl;
}
MPI_Comm_free(&my_comm);
free(data);
}
if(wrank==0){
fprintf(stderr, "Program end\n" );
}
MPI_Finalize();
return 0;
} | 58dba1deaf6cd628f7bbbe7ae0cf9d76362b1165.cu | #include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#include <string>
#include <limits>
#include <unistd.h>
#include <omp.h>
#include <cmath>
#include <chrono>
#define TIMEOUT_TIME 5
double Unext(double* u, int i, int j, double hxs, double hys, int linesize){
double hyd=1/(hxs*hxs);
double hxd=1/(hys*hys);
return ((u[(i+1)*linesize+j]+u[(i-1)*linesize+j])*hxd+(u[i*linesize+j+1]+u[i*linesize+j-1])*hyd) / (2*(hxd+hyd));
}
double Calc_local_eps(double* u,double* uprev, int bxs,int bys){
int i,j;
double maxe=0;
int linesize=bxs+2;
#pragma omp parallel for private(j) reduction (max: maxe)
for(i=1;i<bys+1;++i){
for(j=1;j<bxs+1;++j){
maxe = std::max(maxe,std::abs(u[i*linesize+j]-uprev[i*linesize+j]));
}
}
return maxe;
}
int main(int argc, char* argv[]) {
int pxs,pys;
int bxs,bys;
std::string out_str("out.txt");
int str_len;
double mineps,leps;
double lx,ly;
double ul,ur,uu, ud;
double uzero;
MPI_Init(&argc, &argv);
std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now();
int i,j, wrank, wnprocs;
MPI_Comm_size(MPI_COMM_WORLD, &wnprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
if(wrank==0){
std::cin>>pxs>>pys;
std::cin>>bxs>>bys;
std::cin>>out_str;
std::cin>>mineps;
std::cin>>lx>>ly;
std::cin>>ul>>ur>>uu>>ud;
std::cin>>uzero;
str_len=out_str.length();
}
MPI_Bcast(&pxs,1,MPI_INT,0,MPI_COMM_WORLD);
MPI_Bcast(&pys,1,MPI_INT,0,MPI_COMM_WORLD);
MPI_Bcast(&bxs,1,MPI_INT,0,MPI_COMM_WORLD);
MPI_Bcast(&bys,1,MPI_INT,0,MPI_COMM_WORLD);
MPI_Bcast(&str_len,1,MPI_INT,0,MPI_COMM_WORLD);
out_str.resize(str_len);
MPI_Bcast((void*)out_str.data(),str_len,MPI_CHAR,0,MPI_COMM_WORLD);
MPI_Bcast(&mineps,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
MPI_Bcast(&lx,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
MPI_Bcast(&ly,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
MPI_Bcast(&ul,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
MPI_Bcast(&ur,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
MPI_Bcast(&uu,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
MPI_Bcast(&ud,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
MPI_Bcast(&uzero,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
if(wrank==1){
fprintf(stderr,"A %d (%d):pxs:%d pys:%d bxs:%d bys:%d lx:%lf ly:%lf ul:%lf ur:%lf uu:%lf ud:%lf uzero:%lf mineps:%lf %s\n",wrank, wnprocs , pxs,pys,bxs,bys,lx,ly,ul,ur,uu,ud,uzero,mineps, out_str.c_str());
}
int n=(bxs+2)*(bys+2);
double hxs=lx/(bxs*pxs);
double hys=ly/(bys*pys);
int linesize=bxs+2;
int color = wrank<pys*pxs ? 0: MPI_UNDEFINED;
MPI_Comm my_comm;
MPI_Comm_split(MPI_COMM_WORLD, color, wrank, &my_comm);
if(color==0){
char *buf, *buf_p;
int bsize;
double* dataswap = (double*)malloc(sizeof(double) * n);
double* data = (double*)malloc(sizeof(double) * n);
int rank, nprocs;
MPI_Comm_size(my_comm, &nprocs);
MPI_Comm_rank(my_comm, &rank);
MPI_Datatype vertvect;
MPI_Type_vector(bys, 1, bxs+2, MPI_DOUBLE, &vertvect);
MPI_Type_commit(&vertvect);
//Init
int x=rank%pxs,y=rank/pxs;
int up,down,left,right;
right = (x+1== pxs)? -1 : y*pxs + (x + 1) % pxs;
left = (x-1==-1) ? -1 : y*pxs + (x - 1) % pxs;
up= (y-1==-1) ? -1 :((y-1)*pxs) + x;
down = (y+1==pys)? -1: ((y+1)*pxs) + x;
#pragma omp parallel for
for(i = 0; i < n; i++){
data[i] = uzero;//can break? cause int init outside?
}
//Init buf
{
int lrs, uds;
MPI_Pack_size(bys,MPI_DOUBLE,my_comm,&lrs);
MPI_Pack_size(bxs,MPI_DOUBLE,my_comm,&uds);
int bufsize = 4*MPI_BSEND_OVERHEAD + lrs + uds;
buf = (char *)malloc( bufsize );
MPI_Buffer_attach(buf, bufsize);
}
//Init margins
if(down==-1){
for(i = (bys+1)*(bxs+2); i < (bys+2)*(bxs+2); ++i){
data[i] = ud;
dataswap[i]=ud;
}
}
if(up==-1){
for(i = 0; i < bxs+2; ++i){
data[i] = uu;
dataswap[i]=uu;
}
}
if(left==-1){
for(i = 0; i < bys+2; ++i){
data[i*(bxs+2)] = ul;
dataswap[i*(bxs+2)] = ul;
}
}
if(right==-1){
for(i = 0; i < bys+2; ++i){
data[(bxs+1)+i*(bxs+2)] = ur;
dataswap[(bxs+1)+i*(bxs+2)] = ur;
}
}
//fprintf(stderr, "%d: %d %d %d %d \n", rank, left , right, up , down);
//main
if(rank==0)
fprintf(stderr, "pxs %d,pys %d,bxs %d,bys %d,mineps %lf,lx %lf,ly %lf,ul %lf,ur %lf, uu %lf , ud %lf , uzero %lf, out_str: %s \n", pxs,pys,bxs,bys,mineps,lx,ly,ul,ur,uu,ud,uzero,out_str.c_str());
int count=0;
fprintf(stderr, "%d %d %d %d\n",left, right, down, up);
while(count<100000){
MPI_Request req[4];
MPI_Status statuses[4];
//main
#pragma omp parallel for private(j)
for(i=1;i<bys+1;++i){
for(j=1;j<bxs+1;++j){
dataswap[i*linesize+j]=Unext(data,i,j,hxs,hys,linesize);
}
}
std::swap(data,dataswap);
count++;
//calc eps
leps=Calc_local_eps(data,dataswap,bxs,bys);
//fprintf(stderr, "%d,%d: after leps\n", rank,count);
//reduce eps
MPI_Allreduce(MPI_IN_PLACE,&leps,1,MPI_DOUBLE,MPI_MAX,my_comm);
if(rank==0){
//fprintf(stderr, "leps: %lf\n",leps);
}
if(leps<mineps){
break;
}
//fprintf(stderr, "%d,%d: after allred\n", rank,count);
//!done
//recv/send from up to down
int req_count=0;
if(left!=-1){
MPI_Bsend((void*)(data+(bxs+2)+1), 1, vertvect, left, 0, my_comm);
MPI_Irecv((void*)(data+(bxs+2)), 1, vertvect,left,0,my_comm,&req[req_count]);
++req_count;
}
if(right!=-1){
MPI_Bsend(data+(bxs+2)*2-2, 1, vertvect, right, 0, my_comm);
MPI_Irecv(data+(bxs+2)*2-1, 1, vertvect, right, 0, my_comm,&req[req_count]);
++req_count;
}
if(up!=-1){
MPI_Bsend(data+(bxs+2)+1, bxs, MPI_DOUBLE, up, 0, my_comm);
MPI_Irecv(data+1, bxs, MPI_DOUBLE, up, 0, my_comm,&req[req_count]);
++req_count;
}
if(down!=-1){
MPI_Bsend(data+(bys)*(bxs+2)+1, bxs, MPI_DOUBLE, down, 0, my_comm);
MPI_Irecv(data+(bys+1)*(bxs+2)+1, bxs, MPI_DOUBLE, down, 0, my_comm,&req[req_count]);
++req_count;
}
//fprintf(stderr, "%d,%d: wait\n", rank,count);
MPI_Waitall(req_count, req,statuses);
//fprintf(stderr, "%d,%d: barrier\n", rank,count);
//MPI_Barrier(my_comm);//not needed
//fprintf(stderr, "%d,%d: done\n", rank,count);
}
MPI_Buffer_detach( &buf_p, &bsize );
//Write
int n_size = 15;
char * buff_out = (char *) malloc(sizeof(char) * (bxs) * (bys)*n_size);
memset(buff_out, ' ', (bxs) * (bys) * n_size * sizeof(char));
fprintf(stderr, "count : %d\n",count);
#pragma omp parallel for private(i)
for(j = 0; j < bys; ++j) {
for(i = 0; i < bxs; ++i){
sprintf(buff_out + (j * bxs + i)*n_size, " %.6e ", data[(j+1) * (bxs+2) + (i+1)]);
}
if (x + 1 == pxs){
buff_out[ (j + 1) * bxs * n_size - 1] = '\n';
}
}
#pragma omp parallel for
for(i = 0; i < (bxs) * (bys) * n_size ; ++i){
if (buff_out[i] == '\0'){
buff_out[i] = ' ';
}
}
MPI_Barrier(my_comm);
std::cout << "Time difference = " << std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count() << "[ms](" <<rank<<")"<< std::endl;
MPI_File fp;
MPI_Datatype filetype,datatype;
MPI_Type_vector(bys, bxs * n_size, bxs * pxs * n_size, MPI_CHAR, &filetype);
MPI_Type_commit(&filetype);
MPI_Type_vector(bys, bxs*n_size, bxs*n_size, MPI_CHAR, &datatype);
MPI_Type_commit(&datatype);
MPI_File_delete(out_str.c_str(), MPI_INFO_NULL);
MPI_File_open(my_comm, out_str.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fp);
MPI_File_set_view(fp, (y * bys * bxs * pxs + x*bxs)*n_size*sizeof(char), MPI_CHAR, filetype, "native", MPI_INFO_NULL);
MPI_File_write_all(fp, buff_out, 1, datatype, MPI_STATUS_IGNORE);
MPI_File_close(&fp);
std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
if(rank==0){
std::cout << "Time difference = " << std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count() << "[ms]" << std::endl;
}
MPI_Comm_free(&my_comm);
free(data);
}
if(wrank==0){
fprintf(stderr, "Program end\n" );
}
MPI_Finalize();
return 0;
} |
469136bae9e985b9a1b4a69e372bf06f3226d473.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hipfft.h>
#include <time.h>
#include <hip/hip_complex.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/reverse.h>
#include <thrust/transform_scan.h>
#include <thrust/for_each.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/execution_policy.h>
#include <thrust/extrema.h>
#include <float.h>
#include <math.h>
#ifdef _WIN32
#include<windows.h>
#else
#include <pthread.h>
#endif
#include "hip/hip_runtime_api.h"
#include "STOMP.h"
const char * format_str = "%lf";
const char * format_str_n = "%lf\n";
time_t START;
struct thread_args{
unsigned int tid;
thrust::device_vector<DATA_TYPE> *Ta, *Tb;
thrust::device_vector<unsigned long long int> *profile;
thrust::device_vector<unsigned int> *profileIdxs;
unsigned int m;
int exclusion;
int maxJoin;
int start, end;
int numWorkers;
};
struct thread_args targs[NUM_THREADS];
int nDevices;
#ifdef _WIN32
HANDLE threads[NUM_THREADS];
#else
pthread_t threads[NUM_THREADS];
#endif
static const unsigned int WORK_SIZE = 1024;
hipfftHandle plan[NUM_THREADS], plan2[NUM_THREADS], plan3[NUM_THREADS], plan4[NUM_THREADS];
//This macro checks return value of the CUDA runtime call and exits
//the application if the call failed.
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void STOMPinit(int size){
for(int i = 0; i < NUM_THREADS; ++i){
printf("HERE\n");
gpuErrchk(hipSetDevice(i % nDevices));
hipfftPlan1d(&plan[i], size * 2, CUFFT_FORWARD_PLAN, 1);
hipfftPlan1d(&plan2[i], size * 2, CUFFT_REVERSE_PLAN, 1);
printf("HERE\n");
}
gpuErrchk(hipSetDevice(0));
}
void STOMPclean(int size){
for(int i = 0; i < NUM_THREADS; ++i){
hipfftDestroy(plan[i]);
hipfftDestroy(plan2[i]);
hipfftPlan1d(&plan[i], size * 2, CUFFT_FORWARD_PLAN, 1);
hipfftPlan1d(&plan2[i], size * 2, CUFFT_REVERSE_PLAN, 1);
}
}
//Reads input time series from file
void readFile(const char* filename, thrust::host_vector<DATA_TYPE>& v){
FILE* f = fopen( filename, "r");
if(f == NULL){
printf("Unable to open %s for reading, please make sure it exists\n", filename);
exit(0);
}
DATA_TYPE num;
while(!feof(f)){
fscanf(f, format_str, &num);
v.push_back(num);
}
v.pop_back();
fclose(f);
}
//This kernel computes a sliding mean with specified window size and a corresponding prefix sum array (A)
__global__ void slidingMean(DATA_TYPE* A, int window, unsigned int size, DATA_TYPE* Means){
const DATA_TYPE coeff = 1.0 / (DATA_TYPE) window;
int a = blockIdx.x * blockDim.x + threadIdx.x;
int b = blockIdx.x * blockDim.x + threadIdx.x + window;
if(a == 0){
Means[a] = A[window - 1] * coeff;
}
if(a < size - 1){
//printf("%d\n", a + 1);
Means[a + 1] = (A[b] - A[a]) * coeff;
}
}
//This kernel computes a sliding standard deviaiton with specified window size, the corresponding means of each element, and the prefix squared sum at each element
__global__ void slidingStd(DATA_TYPE* squares, unsigned int window, unsigned int size, DATA_TYPE* Means, DATA_TYPE* stds){
const DATA_TYPE coeff = 1 / (DATA_TYPE)window;
int a = blockIdx.x * blockDim.x + threadIdx.x;
int b = blockIdx.x * blockDim.x + threadIdx.x + window;
if(a == 0){
stds[a] = sqrt((squares[window - 1] * coeff) - (Means[a] * Means[a]));
}
else if(b < size + window)
stds[a] = sqrt(((squares[b - 1] - squares[a - 1]) * coeff) - (Means[a] * Means[a]));
}
//This kernel computes the distance profile for a given window position, as long as the index is outside the exclusionZone
__global__ void CalculateDistProfile(DATA_TYPE* QT, DATA_TYPE* D, DATA_TYPE* Means, DATA_TYPE* stds, int m, int start, int n){
const DATA_TYPE Qmean = Means[start];
const DATA_TYPE Qstd = stds[start];
const int exclusionZone = m / 4;
int a = blockIdx.x * blockDim.x + threadIdx.x;
if(a < n && a > start - exclusionZone && a < start + exclusionZone ){
//if(a == start){
D[a] = FLT_MAX;
}else if( a < n){
//D[a] = sqrt(abs(2 * (m - (QT[a] - m * Means[a] * Qmean) / (stds[a] * Qstd))));
D[a] = sqrt(abs(2 * (m - (QT[a] - m * Means[a] * Qmean) / (stds[a] * Qstd))));
}
}
//This kernel divides each element in A by divisor
__global__ void divideBy(double* A, double divisor, unsigned int size){
int a = blockIdx.x * blockDim.x + threadIdx.x;
if(a < size){
A[a] /= divisor;
}
}
//Computes the sliding dot products for a given query using FFT
__host__ void SlidingDotProducts(const thrust::device_vector<DATA_TYPE>& Q, const thrust::device_vector<DATA_TYPE>& T, thrust::device_vector<DATA_TYPE>& P, hipfftHandle plan, hipfftHandle plan2){
int sz = T.size() * 2;
printf("Starting FFT Forward 1\n");
thrust::device_vector<__CUFFT_TYPE__> Qrc(sz);
gpuErrchk( hipPeekAtLastError() );
thrust::device_vector<DATA_TYPE> Qr(sz);
gpuErrchk( hipPeekAtLastError() );
thrust::reverse_copy(Q.begin(), Q.end(), Qr.begin());
gpuErrchk( hipPeekAtLastError() );
time_t start, now;
time(&start);
CUFFT_FORWARD__(plan, Qr.data().get(), Qrc.data().get());
gpuErrchk( hipPeekAtLastError() );
time(&now);
printf("FFT Forward 1 took %f seconds\n", difftime(start, now));
Qr.clear();
Qr.shrink_to_fit();
thrust::host_vector<__CUFFT_TYPE__> Qrc_h = Qrc;
Qrc.clear();
Qrc.shrink_to_fit();
printf("Allocating Tac\n");
thrust::device_vector<__CUFFT_TYPE__> Tac(sz);
printf("Allocating Ta\n");
thrust::device_vector<DATA_TYPE> Ta(sz);
thrust::copy(T.begin(), T.end(), Ta.begin());
gpuErrchk( hipPeekAtLastError() );
time(&start);
CUFFT_FORWARD__(plan, Ta.data().get(), Tac.data().get());
gpuErrchk( hipPeekAtLastError() );
time(&now);
printf("FFT Forward 2 took %f seconds\n", difftime(start, now));
Ta.clear();
Ta.shrink_to_fit();
Qrc = Qrc_h;
thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(Qrc.begin(), Tac.begin())),
thrust::make_zip_iterator(thrust::make_tuple(Qrc.end(), Tac.end())),
multiply());
printf("Finished elementwise multiply\n");
Tac.clear();
Tac.shrink_to_fit();
P.resize(sz);
printf("Starting FFT reverse\n");
time(&start);
CUFFT_REVERSE__(plan2, Qrc.data().get(), P.data().get());
gpuErrchk( hipPeekAtLastError() );
time(&now);
printf("FFT Reverse took %f seconds\n", difftime(start, now));
dim3 grid(sz / WORK_SIZE + 1, 1, 1);
dim3 block(WORK_SIZE, 1, 1);
printf("%d\n",sz / WORK_SIZE);
hipLaunchKernelGGL(( divideBy), dim3(grid),dim3(block), 0, 0, P.data().get(), P.size(), P.size());
gpuErrchk( hipPeekAtLastError() );
}
//Atomically updates the MP/idxs using a single 64-bit integer. We lose a small amount of precision in the output, if we do not do this we are unable
// to atomically update both the matrix profile and the indexes without using a critical section and dedicated locks.
__device__ inline unsigned long long int MPatomicMin(volatile unsigned long long int* address, double val, unsigned int idx)
{
float fval = (float)val;
mp_entry loc, loctest;
loc.floats[0] = fval;
loc.ints[1] = idx;
loctest.ulong = *address;
while (loctest.floats[0] > fval){
loctest.ulong = atomicCAS((unsigned long long int*) address, loctest.ulong, loc.ulong);
}
return loctest.ulong;
}
//Updates the global matrix profile based on a block-local, cached version
__device__ inline void UpdateMPGlobal(volatile unsigned long long* profile, volatile mp_entry* localMP, const int chunk, const int offset, const int n, const int factor){
int x = chunk*(blockDim.x/factor)+threadIdx.x;
if(x < n && ((mp_entry*) profile)[x].floats[0] > localMP[threadIdx.x+offset].floats[0])
{
MPatomicMin(&profile[x], localMP[threadIdx.x+offset].floats[0], localMP[threadIdx.x+offset].ints[1]);
}
}
//This version computes the matrix profile under the assumption that the input time series is actually a concatenation of some number of other time series with length (instanceLength)
//Ignores overlapping regions between independant time series when concatenated
__global__ void WavefrontUpdateSelfJoinWithExclusion(const double* QT, const double* Ta, const double* Tb, const double* means, const double* stds, unsigned long long int* profile, unsigned int m, unsigned int n, int startPos, int endPos, int numDevices, int instanceLength){
__shared__ volatile mp_entry localMPMain[WORK_SIZE * 2];
__shared__ volatile mp_entry localMPOther[WORK_SIZE];
__shared__ volatile bool updated[3];
int a = ((blockIdx.x * numDevices) + startPos) * blockDim.x + threadIdx.x;
//const int b = ((blockIdx.x * numDevices) + startPos + 1) * blockDim.x;
int exclusion = m / 4;
double workspace;
int localX = threadIdx.x + 1;
int localY = 1;
int chunkIdxMain = a / blockDim.x;
int chunkIdxOther = 0;
int mainStart = blockDim.x * chunkIdxMain;
int otherStart = 0;
if(a < n){
workspace = QT[a];
}else{
workspace = -1;
}
//Initialize Shared Data
if(mainStart+threadIdx.x < n){
localMPMain[threadIdx.x].ulong = profile[mainStart + threadIdx.x];
}else{
localMPMain[threadIdx.x].floats[0] = FLT_MAX;
localMPMain[threadIdx.x].ints[1] = 0;
}
if(mainStart+threadIdx.x+blockDim.x < n){
localMPMain[blockDim.x + threadIdx.x].ulong = profile[mainStart + blockDim.x + threadIdx.x];
}else{
localMPMain[blockDim.x + threadIdx.x].floats[0] = FLT_MAX;
localMPMain[blockDim.x + threadIdx.x].ints[1] = 0;
}
if(otherStart+threadIdx.x < n){
localMPOther[threadIdx.x].ulong = profile[otherStart + threadIdx.x];
}else{
localMPOther[threadIdx.x].floats[0] = FLT_MAX;
localMPOther[threadIdx.x].ints[1] = 0;
}
if(threadIdx.x == 0)
{
updated[0] = false;
updated[1] = false;
updated[2] = false;
}
int x = a + 1;
int y = 1;
while(mainStart < n && otherStart < n)
{
__syncthreads();
//Update to the end of the current chunk
while(x < n && y < n && localY < blockDim.x)
{
workspace = workspace - Ta[x - 1] * Tb[y - 1] + Ta[x + m - 1] * Tb[ y + m - 1];
if(x / instanceLength != y / instanceLength && x % instanceLength + m <= instanceLength && y % instanceLength + m <= instanceLength )
{
//Compute the next distance value
double dist = sqrt(abs(2 * (m - (workspace - m * means[x] * means[y]) / (stds[x] * stds[y]))));
//Check cache to see if we even need to try to update
if(localMPMain[localX].floats[0] > dist)
{
//Update the cache with the new min value atomically
MPatomicMin((unsigned long long int*)&localMPMain[localX], dist, y);
if(localX < blockDim.x && !updated[0]){
updated[0] = true;
}else if(!updated[1]){
updated[1] = true;
}
}
//Check cache to see if we even need to try to update
if(localMPOther[localY].floats[0] > dist)
{
//Update the cache with the new min value atomically
MPatomicMin((unsigned long long int*)&localMPOther[localY], dist, x);
if(!updated[2]){
updated[2] = true;
}
}
}
++x;
++y;
++localX;
++localY;
}
__syncthreads();
//If we updated any values in the cached MP, try to push them to the global "master" MP
if(updated[0]){
UpdateMPGlobal(profile, localMPMain, chunkIdxMain, 0,n,1);
}
if(updated[1]){
UpdateMPGlobal(profile, localMPMain, chunkIdxMain + 1, blockDim.x,n,1);
}
if(updated[2]){
UpdateMPGlobal(profile, localMPOther, chunkIdxOther, 0,n,1);
}
__syncthreads();
if(threadIdx.x == 0){
updated[0] = false;
updated[1] = false;
updated[2] = false;
}
mainStart += blockDim.x;
otherStart += blockDim.x;
//Update local cache to point to the next chunk of the MP
if(mainStart+threadIdx.x < n)
{
localMPMain[threadIdx.x].ulong = profile[mainStart + threadIdx.x];
}
else
{
localMPMain[threadIdx.x].floats[0] = FLT_MAX;
localMPMain[threadIdx.x].ints[1] = 0;
}
if(mainStart+threadIdx.x+blockDim.x < n)
{
localMPMain[blockDim.x + threadIdx.x].ulong = profile[mainStart + blockDim.x + threadIdx.x];
}
else
{
localMPMain[threadIdx.x + blockDim.x].floats[0] = FLT_MAX;
localMPMain[threadIdx.x + blockDim.x].ints[1] = 0;
}
if(otherStart+threadIdx.x < n)
{
localMPOther[threadIdx.x].ulong = profile[otherStart + threadIdx.x];
}
else
{
localMPOther[threadIdx.x].floats[0] = FLT_MAX;
localMPOther[threadIdx.x].ints[1] = 0;
}
localY = 0;
localX = threadIdx.x;
chunkIdxMain++;
chunkIdxOther++;
}
}
//Computes the matrix profile given the sliding dot products for the first query and the precomputed data statisics
__global__ void WavefrontUpdateSelfJoin(double* QT, double* Ta, double* Tb, double* means, double* stds, volatile unsigned long long int* profile, unsigned int m, unsigned int n, int startPos, int endPos, int numDevices){
__shared__ volatile mp_entry localMPMain[WORK_SIZE * 2];
__shared__ volatile mp_entry localMPOther[WORK_SIZE];
__shared__ volatile bool updated[3];
int a = ((blockIdx.x * numDevices) + startPos) * blockDim.x + threadIdx.x;
//const int b = ((blockIdx.x * numDevices) + startPos + 1) * blockDim.x;
int exclusion = m / 4;
double workspace;
int localX = threadIdx.x + 1;
int localY = 1;
int chunkIdxMain = a / blockDim.x;
int chunkIdxOther = 0;
int mainStart = blockDim.x * chunkIdxMain;
int otherStart = 0;
if(a < n){
workspace = QT[a];
}else{
workspace = -1;
}
//Initialize Shared Data
if(mainStart+threadIdx.x < n){
localMPMain[threadIdx.x].ulong = profile[mainStart + threadIdx.x];
}else{
localMPMain[threadIdx.x].floats[0] = FLT_MAX;
localMPMain[threadIdx.x].ints[1] = 0;
}
if(mainStart+threadIdx.x+blockDim.x < n){
localMPMain[blockDim.x + threadIdx.x].ulong = profile[mainStart + blockDim.x + threadIdx.x];
}else{
localMPMain[blockDim.x + threadIdx.x].floats[0] = FLT_MAX;
localMPMain[blockDim.x + threadIdx.x].ints[1] = 0;
}
if(otherStart+threadIdx.x < n){
localMPOther[threadIdx.x].ulong = profile[otherStart + threadIdx.x];
}else{
localMPOther[threadIdx.x].floats[0] = FLT_MAX;
localMPOther[threadIdx.x].ints[1] = 0;
}
if(threadIdx.x == 0)
{
updated[0] = false;
updated[1] = false;
updated[2] = false;
}
int x = a + 1;
int y = 1;
while(mainStart < n && otherStart < n)
{
__syncthreads();
//Update to the end of the current chunk
while(x < n && y < n && localY < blockDim.x)
{
workspace = workspace - Ta[x - 1] * Tb[y - 1] + Ta[x + m - 1] * Tb[ y + m - 1];
if(!(x > y - exclusion && x < y + exclusion))
{
//Compute the next distance value
double dist = sqrt(abs(2 * (m - (workspace - m * means[x] * means[y]) / (stds[x] * stds[y]))));
//Check cache to see if we even need to try to update
if(localMPMain[localX].floats[0] > dist)
{
//Update the cache with the new min value atomically
MPatomicMin((unsigned long long int*)&localMPMain[localX], dist, y);
if(localX < blockDim.x && !updated[0]){
updated[0] = true;
}else if(!updated[1]){
updated[1] = true;
}
}
//Check cache to see if we even need to try to update
if(localMPOther[localY].floats[0] > dist)
{
//Update the cache with the new min value atomically
MPatomicMin((unsigned long long int*)&localMPOther[localY], dist, x);
if(!updated[2]){
updated[2] = true;
}
}
}
++x;
++y;
++localX;
++localY;
}
__syncthreads();
//If we updated any values in the cached MP, try to push them to the global "master" MP
if(updated[0]){
UpdateMPGlobal(profile, localMPMain, chunkIdxMain, 0,n, 1);
}
if(updated[1]){
UpdateMPGlobal(profile, localMPMain, chunkIdxMain + 1, blockDim.x,n, 1);
}
if(updated[2]){
UpdateMPGlobal(profile, localMPOther, chunkIdxOther, 0,n, 1);
}
__syncthreads();
if(threadIdx.x == 0){
updated[0] = false;
updated[1] = false;
updated[2] = false;
}
mainStart += blockDim.x;
otherStart += blockDim.x;
//Update local cache to point to the next chunk of the MP
if(mainStart+threadIdx.x < n)
{
localMPMain[threadIdx.x].ulong = profile[mainStart + threadIdx.x];
}
else
{
localMPMain[threadIdx.x].floats[0] = FLT_MAX;
localMPMain[threadIdx.x].ints[1] = 0;
}
if(mainStart+threadIdx.x+blockDim.x < n)
{
localMPMain[blockDim.x + threadIdx.x].ulong = profile[mainStart + blockDim.x + threadIdx.x];
}
else
{
localMPMain[threadIdx.x + blockDim.x].floats[0] = FLT_MAX;
localMPMain[threadIdx.x + blockDim.x].ints[1] = 0;
}
if(otherStart+threadIdx.x < n)
{
localMPOther[threadIdx.x].ulong = profile[otherStart + threadIdx.x];
}
else
{
localMPOther[threadIdx.x].floats[0] = FLT_MAX;
localMPOther[threadIdx.x].ints[1] = 0;
}
localY = 0;
localX = threadIdx.x;
chunkIdxMain++;
chunkIdxOther++;
}
}
//Computes the matrix profile given the sliding dot products for the first query and the precomputed data statisics
//This version requires at LEAST 92KB shared memory per SM for maximum occupancy (currently this requirement is satisfied by compute capabilites 3.7, 5.2, and 6.1
//If your GPU does not meet this requirement, try this algorithm and WafefrontUpdateSelfJoin and pick the one that gives better peformance for your GPU
//The best performance for this version is achieved when using a block size of 1024 and the lowest power-of-two factor possible for the shared memory available on your GPU, for the Tesla K80 this value is 16.
//Minimum shared memory usage is 5*sizeof(double)*WORK_SIZE + X. X varies with the value of factor. X = 10*sizeof(double)*WORK_SIZE / factor
__global__ void WavefrontUpdateSelfJoinMaxSharedMem(const double* QT, const double* Ta, const double* Tb, const double* means, const double* stds, unsigned long long int* profile, unsigned int m, unsigned int n, int startPos, int endPos, int numDevices){
//Factor and threads per block must both be powers of two where: factor <= threads per block
//Use the smallest power of 2 possible for your GPU
const int factor = 16;
__shared__ volatile mp_entry localMPMain[WORK_SIZE + WORK_SIZE / factor];
__shared__ volatile mp_entry localMPOther[WORK_SIZE / factor];
__shared__ double A_low[WORK_SIZE + WORK_SIZE / factor];
__shared__ double A_high[WORK_SIZE + WORK_SIZE / factor];
__shared__ double mu_x[WORK_SIZE + WORK_SIZE / factor];
__shared__ double mu_y[WORK_SIZE / factor];
__shared__ double sigma_x[WORK_SIZE + WORK_SIZE / factor];
__shared__ double sigma_y[WORK_SIZE / factor];
__shared__ double B_high[WORK_SIZE / factor];
__shared__ double B_low[WORK_SIZE / factor];
__shared__ volatile bool updated[3];
int a = ((blockIdx.x * numDevices) + startPos) * blockDim.x + threadIdx.x;
//const int b = ((blockIdx.x * numDevices) + startPos + 1) * blockDim.x;
int exclusion = m / 4;
double workspace;
int localX = threadIdx.x + 1;
int localY = 1;
int chunkIdxMain = (a / blockDim.x) *factor;
int chunkIdxOther = 0;
int mainStart = (blockDim.x / factor) * chunkIdxMain;
int otherStart = 0;
if(a < n){
workspace = QT[a];
}else{
workspace = -1;
}
//Initialize Shared Data
if(mainStart+threadIdx.x < n){
localMPMain[threadIdx.x].ulong = profile[mainStart + threadIdx.x];
}else{
localMPMain[threadIdx.x].floats[0] = FLT_MAX;
localMPMain[threadIdx.x].ints[1] = 0;
}
if(threadIdx.x < blockDim.x / factor && mainStart+threadIdx.x+blockDim.x < n){
localMPMain[blockDim.x + threadIdx.x].ulong = profile[mainStart + blockDim.x + threadIdx.x];
}else if( threadIdx.x < blockDim.x / factor){
localMPMain[blockDim.x + threadIdx.x].floats[0] = FLT_MAX;
localMPMain[blockDim.x + threadIdx.x].ints[1] = 0;
}
if(threadIdx.x < blockDim.x / factor && otherStart+threadIdx.x < n){
localMPOther[threadIdx.x].ulong = profile[otherStart + threadIdx.x];
}else if(threadIdx.x < blockDim.x / factor){
localMPOther[threadIdx.x].floats[0] = FLT_MAX;
localMPOther[threadIdx.x].ints[1] = 0;
}
if(threadIdx.x == 0)
{
updated[0] = false;
updated[1] = false;
updated[2] = false;
}
int x = a + 1;
int y = 1;
if(x - 1 < n){
A_low[threadIdx.x] = Ta[x - 1];
}
if(x + m - 1 < n + m - 1){
A_high[threadIdx.x] = Ta[x + m - 1];
}
if(threadIdx.x < blockDim.x / factor && x + blockDim.x - 1 < n + m - 1){
A_low[threadIdx.x + blockDim.x] = Ta[x + blockDim.x - 1];
}
if(threadIdx.x < blockDim.x / factor && x + blockDim.x - 1 + m < n + m - 1){
A_high[threadIdx.x + blockDim.x] = Ta[x + blockDim.x - 1 + m];
}
if(a < n){
sigma_x[threadIdx.x] = stds[a];
mu_x[threadIdx.x] = means[a];
}
if(threadIdx.x < blockDim.x / factor && a + blockDim.x < n){
sigma_x[threadIdx.x + blockDim.x] = stds[a + blockDim.x];
mu_x[threadIdx.x + blockDim.x] = means[a + blockDim.x];
}
if(threadIdx.x < blockDim.x / factor){
B_low[threadIdx.x] = Tb[threadIdx.x];
B_high[threadIdx.x] = Tb[threadIdx.x + m];
sigma_y[threadIdx.x] = stds[threadIdx.x];
mu_y[threadIdx.x] = means[threadIdx.x];
}
int relativeX = threadIdx.x;
int relativeY = 0;
while(mainStart < n && otherStart < n)
{
__syncthreads();
//Update to the end of the current chunk
while(x < n && y < n && localY < blockDim.x / factor)
{
//workspace = workspace - Ta[x - 1] * Tb[y - 1] + Ta[x + m - 1] * Tb[ y + m - 1];
workspace = workspace - A_low[relativeX]* B_low[relativeY] + A_high[relativeX] * B_high[relativeY];
//workspace = workspace - Ta[x - 1] * B_low[relativeY] + Ta[x + m - 1] * B_high[relativeY];
//workspace = workspace - A_low[relativeX] * B_low[relativeY] + Ta[x + m - 1] * B_high[relativeY];
//workspace = workspace - Ta[x - 1] * B_low[relativeY] + A_high[relativeX] * B_high[relativeY];
if(!(x > y - exclusion && x < y + exclusion))
{
//Compute the next distance value
//double dist = sqrt(abs(2 * (m - (workspace - m * means[x] * means[y]) / (stds[x] * stds[y]))));
double dist = sqrt(abs(2 * (m - (workspace - m * mu_x[localX] * mu_y[localY]) / (sigma_x[localX] * sigma_y[localY]))));
//Check cache to see if we even need to try to update
if(localMPMain[localX].floats[0] > dist)
{
//Update the cache with the new min value atomically
MPatomicMin((unsigned long long int*)&localMPMain[localX], dist, y);
if(localX < blockDim.x && !updated[0]){
updated[0] = true;
}else if(!updated[1]){
updated[1] = true;
}
}
//Check cache to see if we even need to try to update
if(localMPOther[localY].floats[0] > dist)
{
//Update the cache with the new min value atomically
MPatomicMin((unsigned long long int*)&localMPOther[localY], dist, x);
if(!updated[2]){
updated[2] = true;
}
}
}
++x;
++y;
++localX;
++localY;
++relativeX;
++relativeY;
}
__syncthreads();
//If we updated any values in the cached MP, try to push them to the global "master" MP
if(updated[0]){
UpdateMPGlobal(profile, localMPMain, chunkIdxMain, 0,n, factor);
}
if(updated[1]){
if(threadIdx.x < blockDim.x / factor){
UpdateMPGlobal(profile, localMPMain, chunkIdxMain + factor, blockDim.x, n, factor);
}
}
if(updated[2]){
if(threadIdx.x < blockDim.x / factor){
UpdateMPGlobal(profile, localMPOther, chunkIdxOther, 0,n, factor);
}
}
__syncthreads();
if(threadIdx.x == 0){
updated[0] = false;
updated[1] = false;
updated[2] = false;
}
mainStart += blockDim.x / factor;
otherStart += blockDim.x / factor;
//Update local cache to point to the next chunk of the MP
if(mainStart+threadIdx.x < n)
{
localMPMain[threadIdx.x].ulong = profile[mainStart + threadIdx.x];
}
else
{
localMPMain[threadIdx.x].floats[0] = FLT_MAX;
localMPMain[threadIdx.x].ints[1] = 0;
}
if(threadIdx.x < blockDim.x / factor && mainStart+threadIdx.x+blockDim.x < n)
{
localMPMain[blockDim.x + threadIdx.x].ulong = profile[mainStart + blockDim.x + threadIdx.x];
}
else if( threadIdx.x < blockDim.x / factor)
{
localMPMain[threadIdx.x + blockDim.x].floats[0] = FLT_MAX;
localMPMain[threadIdx.x + blockDim.x].ints[1] = 0;
}
if(threadIdx.x < blockDim.x / factor && otherStart+threadIdx.x < n)
{
localMPOther[threadIdx.x].ulong = profile[otherStart + threadIdx.x];
}
else if( threadIdx.x < blockDim.x / factor)
{
localMPOther[threadIdx.x].floats[0] = FLT_MAX;
localMPOther[threadIdx.x].ints[1] = 0;
}
if(x - 1 < n + m - 1){
A_low[threadIdx.x] = Ta[x - 1];
}
if(threadIdx.x < blockDim.x / factor && x - 1 + blockDim.x < n +m - 1){
A_low[threadIdx.x + blockDim.x] = Ta[x + blockDim.x - 1];
}
if(x + m - 1 < n + m - 1){
A_high[threadIdx.x] = Ta[x + m - 1];
}
if(threadIdx.x < blockDim.x / factor && x + blockDim.x + m - 1 < n + m - 1){
A_high[threadIdx.x + blockDim.x] = Ta[x + blockDim.x + m - 1];
}
if(threadIdx.x < blockDim.x / factor && y + threadIdx.x - 1 < n + m - 1){
B_low[threadIdx.x] = Tb[y + threadIdx.x - 1];
}
if(threadIdx.x < blockDim.x / factor && y + threadIdx.x - 1 + m < n + m - 1){
B_high[threadIdx.x] = Tb[y + threadIdx.x + m - 1];
}
if(x < n){
sigma_x[threadIdx.x] = stds[x];
mu_x[threadIdx.x] = means[x];
}
if(threadIdx.x < blockDim.x / factor && x + blockDim.x < n){
sigma_x[threadIdx.x + blockDim.x] = stds[x + blockDim.x];
mu_x[threadIdx.x + blockDim.x] = means[x + blockDim.x];
}
if(threadIdx.x < blockDim.x / factor && y + threadIdx.x < n){
sigma_y[threadIdx.x] = stds[y + threadIdx.x];
mu_y[threadIdx.x] = means[y + threadIdx.x];
}
relativeY = 0;
localY = 0;
localX = threadIdx.x;
relativeX = threadIdx.x;
chunkIdxMain++;
chunkIdxOther++;
}
}
//Performs STOMP algorithm
#ifdef _WIN32
DWORD WINAPI doThreadSTOMP(LPVOID argsp){
#else
void* doThreadSTOMP(void* argsp) {
#endif
thread_args* args = (thread_args*) argsp;
int tid = args->tid;
gpuErrchk(hipSetDevice(tid % nDevices));
thrust::device_vector<DATA_TYPE>* Ta = args -> Ta;
thrust::device_vector<DATA_TYPE>* Tb = args -> Tb;
thrust::device_vector<unsigned long long int>* profile = args -> profile;
thrust::device_vector<unsigned int>* profileIdxs = args -> profileIdxs;
int numWorkers = args ->numWorkers;
int m = args -> m;
unsigned int start = args -> start;
unsigned int end = args -> end;
unsigned int n = Ta ->size() - m + 1;
unsigned int n2 = Tb -> size() - m + 1;
unsigned int sz = Ta -> size();
thrust::plus<DATA_TYPE> op1;
square op2;
printf("allocating grids\n");
dim3 grid(Ta -> size() / WORK_SIZE + 1, 1, 1);
dim3 grid3(n / WORK_SIZE + 1, 1, 1);
dim3 block(WORK_SIZE, 1, 1);
double oldTime = 0;
thrust::device_vector<DATA_TYPE> Qb(m);
gpuErrchk( hipPeekAtLastError() );
thrust::copy(thrust::hip::par,Tb -> begin(), Tb -> begin() + m, Qb.begin());
gpuErrchk( hipPeekAtLastError() );
thrust::device_vector<DATA_TYPE> QT;
SlidingDotProducts(Qb, *Ta, QT, plan[tid], plan2[tid]);
thrust::device_vector<DATA_TYPE> QTtrunc(n);
gpuErrchk( hipPeekAtLastError() );
thrust::copy(thrust::hip::par,QT.begin() + m - 1, QT.begin() + m + n - 1, QTtrunc.begin());
gpuErrchk( hipPeekAtLastError() );
QT.clear();
QT.shrink_to_fit();
thrust::device_vector<DATA_TYPE> Means(n), stds(n), squares(Ta -> size()), sums(Ta -> size());
thrust::inclusive_scan(Ta -> begin(),Ta -> end(),sums.begin(), op1);
thrust::transform_inclusive_scan(Ta -> begin(), Ta -> end(), squares.begin(), op2,op1);
hipLaunchKernelGGL(( slidingMean), dim3(grid), dim3(block), 0, 0, sums.data().get(),m, n, Means.data().get());
gpuErrchk( hipPeekAtLastError() );
hipLaunchKernelGGL(( slidingStd), dim3(grid), dim3(block), 0, 0, squares.data().get(), m, n, Means.data().get(), stds.data().get());
gpuErrchk( hipPeekAtLastError() );
sums.clear();
squares.clear();
sums.shrink_to_fit();
squares.shrink_to_fit();
printf("allocating DP");
thrust::device_vector<DATA_TYPE> D;
D.resize(n,_MAX_VAL_);
hipLaunchKernelGGL(( CalculateDistProfile), dim3(grid), dim3(block), 0, 0, QTtrunc.data().get(), D.data().get(), Means.data().get(), stds.data().get(), m, 0, n);
gpuErrchk( hipPeekAtLastError() );
//Initialize the indexes to the starting position
profileIdxs -> resize(n,1);
profile->resize(n, 0);
thrust::device_vector<double>::iterator it = thrust::min_element(D.begin(),D.end());
unsigned int pos = it - D.begin();
double val = *it;
//cout << pos << " " << val;
(*profileIdxs)[0] = pos;
D[0] = *it;
thrust::transform(D.begin(), D.end(), profileIdxs->begin(), profile->begin(), MPIDXCombine());
D.clear();
D.shrink_to_fit();
hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte);
time_t start2, now2;
time_t lastLogged;
time(&start2);
time(&lastLogged);
#ifdef USE_BEST_VERSION
hipLaunchKernelGGL(( WavefrontUpdateSelfJoinMaxSharedMem), dim3(dim3(ceil(numWorkers / (double) WORK_SIZE), 1, 1)),dim3(dim3(WORK_SIZE, 1,1)), 0, 0, QTtrunc.data().get(), Ta -> data().get(), Tb -> data().get(), Means.data().get(), stds.data().get(), profile -> data().get(), m, n, start, end, NUM_THREADS);
#else
hipLaunchKernelGGL(( WavefrontUpdateSelfJoin), dim3(dim3(ceil(numWorkers / (double) WORK_SIZE), 1, 1)),dim3(dim3(WORK_SIZE, 1,1)), 0, 0, QTtrunc.data().get(), Ta -> data().get(), Tb -> data().get(), Means.data().get(), stds.data().get(), profile -> data().get(), m, n, start, end, NUM_THREADS);
#endif
gpuErrchk( hipPeekAtLastError() );
hipDeviceSynchronize();
//std::cout << thrust::reduce(counts.begin(), counts.end(), 0, thrust::plus<unsigned long long>()) << std::endl;
time_t now3;
time(&now3);
printf("Finished thread %d over all iterations in %lf seconds\n", tid, difftime(now3, start2) + oldTime);
//pthread_exit(0);
return 0;
}
//Allocates threads on a CPU to distribute work to each specified device
__host__ void STOMP(thrust::host_vector<DATA_TYPE>& Ta, unsigned int m,
thrust::host_vector<float>& profile_h, thrust::host_vector<unsigned int>& profileIdxs_h){
gpuErrchk(hipGetDeviceCount(&nDevices));
STOMPinit(Ta.size());
thrust::device_vector<DATA_TYPE>* Ta_d = new thrust::device_vector<DATA_TYPE>[nDevices];
thrust::device_vector<unsigned long long int>* Profs[NUM_THREADS];
thrust::device_vector<unsigned int>* ProfsIdxs[NUM_THREADS];
for(int i = 0; i < nDevices; ++i){
gpuErrchk(hipSetDevice(i));
Ta_d[i] = Ta;
}
for(int i = 0; i < NUM_THREADS; ++i){
gpuErrchk(hipSetDevice(i % nDevices));
Profs[i] = new thrust::device_vector<unsigned long long int>();
ProfsIdxs[i] = new thrust::device_vector<unsigned int>();
}
gpuErrchk(hipSetDevice(0));
unsigned int n = Ta.size() - m + 1;
unsigned int lastend=0;
for(unsigned int i = 0; i < NUM_THREADS; ++i ){
lastend += ceil(n / (double) NUM_THREADS);
if(lastend > n){
lastend = n;
}
int workers = ceil(n / (double) NUM_THREADS);
std::cout << workers<< std::endl;
int tid = i;
targs[tid].Ta = &Ta_d[i % nDevices];
targs[tid].Tb = &Ta_d[i % nDevices];
targs[tid].tid = tid;
targs[tid].profile = Profs[tid];
targs[tid].profileIdxs = ProfsIdxs[tid];
targs[tid].m = m;
targs[tid].start = i;
targs[tid].numWorkers = ceil(n / (double) NUM_THREADS);
//lastend = n-floor(n*sqrt(double(NUM_THREADS-i-1)/double(NUM_THREADS-i)));
printf("val:%lf\n", sqrt(double(NUM_THREADS-i-1)/double(NUM_THREADS-i)));
targs[tid].end = n;
targs[tid].exclusion = m / 4;
targs[tid].maxJoin = 0;
printf("Launching thread %d, for start = %d, to end = %d\n", tid, targs[tid].start, targs[tid].end);
#ifdef _WIN32
threads[tid] = CreateThread(NULL, 0, doThreadSTOMP, (void*)&targs[tid], 0, NULL);
#else
int rc = pthread_create(&threads[tid], NULL, doThreadSTOMP, (void*)&targs[tid]);
#endif // _WIN32
++tid;
}
for(int x = 0; x < NUM_THREADS; x++)
#ifdef _WIN32
WaitForMultipleObjects(NUM_THREADS, threads, TRUE, INFINITE);
#else
pthread_join(threads[x], NULL);
#endif
gpuErrchk(hipSetDevice(0));
thrust::device_vector<float> profile(Ta.size() - m + 1, FLT_MAX);
thrust::device_vector<unsigned int> profileIdxs(Ta.size() - m + 1, 0);
//Move all pieces back to the same GPU to aggregate
//TODO:(This can be split into steps in the case we are using a massive number of GPUs)
for(int i = 0; i < NUM_THREADS; ++i)
{
if(i % nDevices != 0)
{
gpuErrchk(hipSetDevice(i % nDevices));
thrust::host_vector<unsigned long long int> temp = *Profs[i];
delete Profs[i];
delete ProfsIdxs[i];
gpuErrchk(hipSetDevice(0));
Profs[i] = new thrust::device_vector<unsigned long long int>(temp);
gpuErrchk( hipPeekAtLastError() );
ProfsIdxs[i] = new thrust::device_vector<unsigned int>();
gpuErrchk( hipPeekAtLastError() );
}
}
//Compute final distance profile (Aggragate what each thread produced)
for(int i = 0; i < NUM_THREADS; ++i){
int curstart=0;
thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(profile.begin(), profileIdxs.begin(), Profs[i] -> begin())), thrust::make_zip_iterator(thrust::make_tuple(profile.end(), profileIdxs.end(), Profs[i] -> end())), minWithIndex2());
gpuErrchk( hipPeekAtLastError() );
}
for(int i = 0; i < NUM_THREADS; ++i){
delete Profs[i];
delete ProfsIdxs[i];
}
delete [] Ta_d;
profile_h = profile;
profileIdxs_h = profileIdxs;
}
int main(int argc, char** argv) {
int window_size = atoi(argv[1]);
thrust::host_vector<DATA_TYPE> Th;
readFile(argv[2], Th);
//thrust::device_vector<DATA_TYPE> T;
//T = Th;
int size = Th.size();
thrust::host_vector<float> profile;
thrust::host_vector<unsigned int> profIdxs;
printf("Starting STOMP\n");
time_t now;
time(&START);
STOMP(Th,window_size,profile, profIdxs);
time(&now);
printf("Finished STOMP on %u data points in %f seconds.\n", size, difftime(now, START));
printf("Now writing result to files\n");
FILE* f1 = fopen( argv[3], "w");
FILE* f2 = fopen( argv[4], "w");
for(int i = 0; i < profIdxs.size(); ++i){
fprintf(f1, format_str_n, profile[i]);
fprintf(f2, "%u\n", profIdxs[i] + 1);
}
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipDeviceReset());
fclose(f1);
fclose(f2);
printf("Done\n");
return 0;
}
| 469136bae9e985b9a1b4a69e372bf06f3226d473.cu | #include <stdio.h>
#include <stdlib.h>
#include <cufft.h>
#include <time.h>
#include <cuComplex.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/reverse.h>
#include <thrust/transform_scan.h>
#include <thrust/for_each.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/execution_policy.h>
#include <thrust/extrema.h>
#include <float.h>
#include <math.h>
#ifdef _WIN32
#include<windows.h>
#else
#include <pthread.h>
#endif
#include "cuda_profiler_api.h"
#include "STOMP.h"
const char * format_str = "%lf";
const char * format_str_n = "%lf\n";
time_t START;
struct thread_args{
unsigned int tid;
thrust::device_vector<DATA_TYPE> *Ta, *Tb;
thrust::device_vector<unsigned long long int> *profile;
thrust::device_vector<unsigned int> *profileIdxs;
unsigned int m;
int exclusion;
int maxJoin;
int start, end;
int numWorkers;
};
struct thread_args targs[NUM_THREADS];
int nDevices;
#ifdef _WIN32
HANDLE threads[NUM_THREADS];
#else
pthread_t threads[NUM_THREADS];
#endif
static const unsigned int WORK_SIZE = 1024;
cufftHandle plan[NUM_THREADS], plan2[NUM_THREADS], plan3[NUM_THREADS], plan4[NUM_THREADS];
//This macro checks return value of the CUDA runtime call and exits
//the application if the call failed.
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void STOMPinit(int size){
for(int i = 0; i < NUM_THREADS; ++i){
printf("HERE\n");
gpuErrchk(cudaSetDevice(i % nDevices));
cufftPlan1d(&plan[i], size * 2, CUFFT_FORWARD_PLAN, 1);
cufftPlan1d(&plan2[i], size * 2, CUFFT_REVERSE_PLAN, 1);
printf("HERE\n");
}
gpuErrchk(cudaSetDevice(0));
}
void STOMPclean(int size){
for(int i = 0; i < NUM_THREADS; ++i){
cufftDestroy(plan[i]);
cufftDestroy(plan2[i]);
cufftPlan1d(&plan[i], size * 2, CUFFT_FORWARD_PLAN, 1);
cufftPlan1d(&plan2[i], size * 2, CUFFT_REVERSE_PLAN, 1);
}
}
//Reads input time series from file
void readFile(const char* filename, thrust::host_vector<DATA_TYPE>& v){
FILE* f = fopen( filename, "r");
if(f == NULL){
printf("Unable to open %s for reading, please make sure it exists\n", filename);
exit(0);
}
DATA_TYPE num;
while(!feof(f)){
fscanf(f, format_str, &num);
v.push_back(num);
}
v.pop_back();
fclose(f);
}
//This kernel computes a sliding mean with specified window size and a corresponding prefix sum array (A)
__global__ void slidingMean(DATA_TYPE* A, int window, unsigned int size, DATA_TYPE* Means){
const DATA_TYPE coeff = 1.0 / (DATA_TYPE) window;
int a = blockIdx.x * blockDim.x + threadIdx.x;
int b = blockIdx.x * blockDim.x + threadIdx.x + window;
if(a == 0){
Means[a] = A[window - 1] * coeff;
}
if(a < size - 1){
//printf("%d\n", a + 1);
Means[a + 1] = (A[b] - A[a]) * coeff;
}
}
//This kernel computes a sliding standard deviaiton with specified window size, the corresponding means of each element, and the prefix squared sum at each element
__global__ void slidingStd(DATA_TYPE* squares, unsigned int window, unsigned int size, DATA_TYPE* Means, DATA_TYPE* stds){
const DATA_TYPE coeff = 1 / (DATA_TYPE)window;
int a = blockIdx.x * blockDim.x + threadIdx.x;
int b = blockIdx.x * blockDim.x + threadIdx.x + window;
if(a == 0){
stds[a] = sqrt((squares[window - 1] * coeff) - (Means[a] * Means[a]));
}
else if(b < size + window)
stds[a] = sqrt(((squares[b - 1] - squares[a - 1]) * coeff) - (Means[a] * Means[a]));
}
//This kernel computes the distance profile for a given window position, as long as the index is outside the exclusionZone
__global__ void CalculateDistProfile(DATA_TYPE* QT, DATA_TYPE* D, DATA_TYPE* Means, DATA_TYPE* stds, int m, int start, int n){
const DATA_TYPE Qmean = Means[start];
const DATA_TYPE Qstd = stds[start];
const int exclusionZone = m / 4;
int a = blockIdx.x * blockDim.x + threadIdx.x;
if(a < n && a > start - exclusionZone && a < start + exclusionZone ){
//if(a == start){
D[a] = FLT_MAX;
}else if( a < n){
//D[a] = sqrt(abs(2 * (m - (QT[a] - m * Means[a] * Qmean) / (stds[a] * Qstd))));
D[a] = sqrt(abs(2 * (m - (QT[a] - m * Means[a] * Qmean) / (stds[a] * Qstd))));
}
}
//This kernel divides each element in A by divisor
__global__ void divideBy(double* A, double divisor, unsigned int size){
int a = blockIdx.x * blockDim.x + threadIdx.x;
if(a < size){
A[a] /= divisor;
}
}
//Computes the sliding dot products for a given query using FFT
__host__ void SlidingDotProducts(const thrust::device_vector<DATA_TYPE>& Q, const thrust::device_vector<DATA_TYPE>& T, thrust::device_vector<DATA_TYPE>& P, cufftHandle plan, cufftHandle plan2){
int sz = T.size() * 2;
printf("Starting FFT Forward 1\n");
thrust::device_vector<__CUFFT_TYPE__> Qrc(sz);
gpuErrchk( cudaPeekAtLastError() );
thrust::device_vector<DATA_TYPE> Qr(sz);
gpuErrchk( cudaPeekAtLastError() );
thrust::reverse_copy(Q.begin(), Q.end(), Qr.begin());
gpuErrchk( cudaPeekAtLastError() );
time_t start, now;
time(&start);
CUFFT_FORWARD__(plan, Qr.data().get(), Qrc.data().get());
gpuErrchk( cudaPeekAtLastError() );
time(&now);
printf("FFT Forward 1 took %f seconds\n", difftime(start, now));
Qr.clear();
Qr.shrink_to_fit();
thrust::host_vector<__CUFFT_TYPE__> Qrc_h = Qrc;
Qrc.clear();
Qrc.shrink_to_fit();
printf("Allocating Tac\n");
thrust::device_vector<__CUFFT_TYPE__> Tac(sz);
printf("Allocating Ta\n");
thrust::device_vector<DATA_TYPE> Ta(sz);
thrust::copy(T.begin(), T.end(), Ta.begin());
gpuErrchk( cudaPeekAtLastError() );
time(&start);
CUFFT_FORWARD__(plan, Ta.data().get(), Tac.data().get());
gpuErrchk( cudaPeekAtLastError() );
time(&now);
printf("FFT Forward 2 took %f seconds\n", difftime(start, now));
Ta.clear();
Ta.shrink_to_fit();
Qrc = Qrc_h;
thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(Qrc.begin(), Tac.begin())),
thrust::make_zip_iterator(thrust::make_tuple(Qrc.end(), Tac.end())),
multiply());
printf("Finished elementwise multiply\n");
Tac.clear();
Tac.shrink_to_fit();
P.resize(sz);
printf("Starting FFT reverse\n");
time(&start);
CUFFT_REVERSE__(plan2, Qrc.data().get(), P.data().get());
gpuErrchk( cudaPeekAtLastError() );
time(&now);
printf("FFT Reverse took %f seconds\n", difftime(start, now));
dim3 grid(sz / WORK_SIZE + 1, 1, 1);
dim3 block(WORK_SIZE, 1, 1);
printf("%d\n",sz / WORK_SIZE);
divideBy<<<grid,block>>>(P.data().get(), P.size(), P.size());
gpuErrchk( cudaPeekAtLastError() );
}
//Atomically updates the MP/idxs using a single 64-bit integer. We lose a small amount of precision in the output, if we do not do this we are unable
// to atomically update both the matrix profile and the indexes without using a critical section and dedicated locks.
__device__ inline unsigned long long int MPatomicMin(volatile unsigned long long int* address, double val, unsigned int idx)
{
float fval = (float)val;
mp_entry loc, loctest;
loc.floats[0] = fval;
loc.ints[1] = idx;
loctest.ulong = *address;
while (loctest.floats[0] > fval){
loctest.ulong = atomicCAS((unsigned long long int*) address, loctest.ulong, loc.ulong);
}
return loctest.ulong;
}
//Updates the global matrix profile based on a block-local, cached version
__device__ inline void UpdateMPGlobal(volatile unsigned long long* profile, volatile mp_entry* localMP, const int chunk, const int offset, const int n, const int factor){
int x = chunk*(blockDim.x/factor)+threadIdx.x;
if(x < n && ((mp_entry*) profile)[x].floats[0] > localMP[threadIdx.x+offset].floats[0])
{
MPatomicMin(&profile[x], localMP[threadIdx.x+offset].floats[0], localMP[threadIdx.x+offset].ints[1]);
}
}
//This version computes the matrix profile under the assumption that the input time series is actually a concatenation of some number of other time series with length (instanceLength)
//Ignores overlapping regions between independant time series when concatenated
__global__ void WavefrontUpdateSelfJoinWithExclusion(const double* QT, const double* Ta, const double* Tb, const double* means, const double* stds, unsigned long long int* profile, unsigned int m, unsigned int n, int startPos, int endPos, int numDevices, int instanceLength){
__shared__ volatile mp_entry localMPMain[WORK_SIZE * 2];
__shared__ volatile mp_entry localMPOther[WORK_SIZE];
__shared__ volatile bool updated[3];
int a = ((blockIdx.x * numDevices) + startPos) * blockDim.x + threadIdx.x;
//const int b = ((blockIdx.x * numDevices) + startPos + 1) * blockDim.x;
int exclusion = m / 4;
double workspace;
int localX = threadIdx.x + 1;
int localY = 1;
int chunkIdxMain = a / blockDim.x;
int chunkIdxOther = 0;
int mainStart = blockDim.x * chunkIdxMain;
int otherStart = 0;
if(a < n){
workspace = QT[a];
}else{
workspace = -1;
}
//Initialize Shared Data
if(mainStart+threadIdx.x < n){
localMPMain[threadIdx.x].ulong = profile[mainStart + threadIdx.x];
}else{
localMPMain[threadIdx.x].floats[0] = FLT_MAX;
localMPMain[threadIdx.x].ints[1] = 0;
}
if(mainStart+threadIdx.x+blockDim.x < n){
localMPMain[blockDim.x + threadIdx.x].ulong = profile[mainStart + blockDim.x + threadIdx.x];
}else{
localMPMain[blockDim.x + threadIdx.x].floats[0] = FLT_MAX;
localMPMain[blockDim.x + threadIdx.x].ints[1] = 0;
}
if(otherStart+threadIdx.x < n){
localMPOther[threadIdx.x].ulong = profile[otherStart + threadIdx.x];
}else{
localMPOther[threadIdx.x].floats[0] = FLT_MAX;
localMPOther[threadIdx.x].ints[1] = 0;
}
if(threadIdx.x == 0)
{
updated[0] = false;
updated[1] = false;
updated[2] = false;
}
int x = a + 1;
int y = 1;
while(mainStart < n && otherStart < n)
{
__syncthreads();
//Update to the end of the current chunk
while(x < n && y < n && localY < blockDim.x)
{
workspace = workspace - Ta[x - 1] * Tb[y - 1] + Ta[x + m - 1] * Tb[ y + m - 1];
if(x / instanceLength != y / instanceLength && x % instanceLength + m <= instanceLength && y % instanceLength + m <= instanceLength )
{
//Compute the next distance value
double dist = sqrt(abs(2 * (m - (workspace - m * means[x] * means[y]) / (stds[x] * stds[y]))));
//Check cache to see if we even need to try to update
if(localMPMain[localX].floats[0] > dist)
{
//Update the cache with the new min value atomically
MPatomicMin((unsigned long long int*)&localMPMain[localX], dist, y);
if(localX < blockDim.x && !updated[0]){
updated[0] = true;
}else if(!updated[1]){
updated[1] = true;
}
}
//Check cache to see if we even need to try to update
if(localMPOther[localY].floats[0] > dist)
{
//Update the cache with the new min value atomically
MPatomicMin((unsigned long long int*)&localMPOther[localY], dist, x);
if(!updated[2]){
updated[2] = true;
}
}
}
++x;
++y;
++localX;
++localY;
}
__syncthreads();
//If we updated any values in the cached MP, try to push them to the global "master" MP
if(updated[0]){
UpdateMPGlobal(profile, localMPMain, chunkIdxMain, 0,n,1);
}
if(updated[1]){
UpdateMPGlobal(profile, localMPMain, chunkIdxMain + 1, blockDim.x,n,1);
}
if(updated[2]){
UpdateMPGlobal(profile, localMPOther, chunkIdxOther, 0,n,1);
}
__syncthreads();
if(threadIdx.x == 0){
updated[0] = false;
updated[1] = false;
updated[2] = false;
}
mainStart += blockDim.x;
otherStart += blockDim.x;
//Update local cache to point to the next chunk of the MP
if(mainStart+threadIdx.x < n)
{
localMPMain[threadIdx.x].ulong = profile[mainStart + threadIdx.x];
}
else
{
localMPMain[threadIdx.x].floats[0] = FLT_MAX;
localMPMain[threadIdx.x].ints[1] = 0;
}
if(mainStart+threadIdx.x+blockDim.x < n)
{
localMPMain[blockDim.x + threadIdx.x].ulong = profile[mainStart + blockDim.x + threadIdx.x];
}
else
{
localMPMain[threadIdx.x + blockDim.x].floats[0] = FLT_MAX;
localMPMain[threadIdx.x + blockDim.x].ints[1] = 0;
}
if(otherStart+threadIdx.x < n)
{
localMPOther[threadIdx.x].ulong = profile[otherStart + threadIdx.x];
}
else
{
localMPOther[threadIdx.x].floats[0] = FLT_MAX;
localMPOther[threadIdx.x].ints[1] = 0;
}
localY = 0;
localX = threadIdx.x;
chunkIdxMain++;
chunkIdxOther++;
}
}
//Computes the matrix profile given the sliding dot products for the first query and the precomputed data statisics
__global__ void WavefrontUpdateSelfJoin(double* QT, double* Ta, double* Tb, double* means, double* stds, volatile unsigned long long int* profile, unsigned int m, unsigned int n, int startPos, int endPos, int numDevices){
__shared__ volatile mp_entry localMPMain[WORK_SIZE * 2];
__shared__ volatile mp_entry localMPOther[WORK_SIZE];
__shared__ volatile bool updated[3];
int a = ((blockIdx.x * numDevices) + startPos) * blockDim.x + threadIdx.x;
//const int b = ((blockIdx.x * numDevices) + startPos + 1) * blockDim.x;
int exclusion = m / 4;
double workspace;
int localX = threadIdx.x + 1;
int localY = 1;
int chunkIdxMain = a / blockDim.x;
int chunkIdxOther = 0;
int mainStart = blockDim.x * chunkIdxMain;
int otherStart = 0;
if(a < n){
workspace = QT[a];
}else{
workspace = -1;
}
//Initialize Shared Data
if(mainStart+threadIdx.x < n){
localMPMain[threadIdx.x].ulong = profile[mainStart + threadIdx.x];
}else{
localMPMain[threadIdx.x].floats[0] = FLT_MAX;
localMPMain[threadIdx.x].ints[1] = 0;
}
if(mainStart+threadIdx.x+blockDim.x < n){
localMPMain[blockDim.x + threadIdx.x].ulong = profile[mainStart + blockDim.x + threadIdx.x];
}else{
localMPMain[blockDim.x + threadIdx.x].floats[0] = FLT_MAX;
localMPMain[blockDim.x + threadIdx.x].ints[1] = 0;
}
if(otherStart+threadIdx.x < n){
localMPOther[threadIdx.x].ulong = profile[otherStart + threadIdx.x];
}else{
localMPOther[threadIdx.x].floats[0] = FLT_MAX;
localMPOther[threadIdx.x].ints[1] = 0;
}
if(threadIdx.x == 0)
{
updated[0] = false;
updated[1] = false;
updated[2] = false;
}
int x = a + 1;
int y = 1;
while(mainStart < n && otherStart < n)
{
__syncthreads();
//Update to the end of the current chunk
while(x < n && y < n && localY < blockDim.x)
{
workspace = workspace - Ta[x - 1] * Tb[y - 1] + Ta[x + m - 1] * Tb[ y + m - 1];
if(!(x > y - exclusion && x < y + exclusion))
{
//Compute the next distance value
double dist = sqrt(abs(2 * (m - (workspace - m * means[x] * means[y]) / (stds[x] * stds[y]))));
//Check cache to see if we even need to try to update
if(localMPMain[localX].floats[0] > dist)
{
//Update the cache with the new min value atomically
MPatomicMin((unsigned long long int*)&localMPMain[localX], dist, y);
if(localX < blockDim.x && !updated[0]){
updated[0] = true;
}else if(!updated[1]){
updated[1] = true;
}
}
//Check cache to see if we even need to try to update
if(localMPOther[localY].floats[0] > dist)
{
//Update the cache with the new min value atomically
MPatomicMin((unsigned long long int*)&localMPOther[localY], dist, x);
if(!updated[2]){
updated[2] = true;
}
}
}
++x;
++y;
++localX;
++localY;
}
__syncthreads();
//If we updated any values in the cached MP, try to push them to the global "master" MP
if(updated[0]){
UpdateMPGlobal(profile, localMPMain, chunkIdxMain, 0,n, 1);
}
if(updated[1]){
UpdateMPGlobal(profile, localMPMain, chunkIdxMain + 1, blockDim.x,n, 1);
}
if(updated[2]){
UpdateMPGlobal(profile, localMPOther, chunkIdxOther, 0,n, 1);
}
__syncthreads();
if(threadIdx.x == 0){
updated[0] = false;
updated[1] = false;
updated[2] = false;
}
mainStart += blockDim.x;
otherStart += blockDim.x;
//Update local cache to point to the next chunk of the MP
if(mainStart+threadIdx.x < n)
{
localMPMain[threadIdx.x].ulong = profile[mainStart + threadIdx.x];
}
else
{
localMPMain[threadIdx.x].floats[0] = FLT_MAX;
localMPMain[threadIdx.x].ints[1] = 0;
}
if(mainStart+threadIdx.x+blockDim.x < n)
{
localMPMain[blockDim.x + threadIdx.x].ulong = profile[mainStart + blockDim.x + threadIdx.x];
}
else
{
localMPMain[threadIdx.x + blockDim.x].floats[0] = FLT_MAX;
localMPMain[threadIdx.x + blockDim.x].ints[1] = 0;
}
if(otherStart+threadIdx.x < n)
{
localMPOther[threadIdx.x].ulong = profile[otherStart + threadIdx.x];
}
else
{
localMPOther[threadIdx.x].floats[0] = FLT_MAX;
localMPOther[threadIdx.x].ints[1] = 0;
}
localY = 0;
localX = threadIdx.x;
chunkIdxMain++;
chunkIdxOther++;
}
}
//Computes the matrix profile given the sliding dot products for the first query and the precomputed data statisics
//This version requires at LEAST 92KB shared memory per SM for maximum occupancy (currently this requirement is satisfied by compute capabilites 3.7, 5.2, and 6.1
//If your GPU does not meet this requirement, try this algorithm and WafefrontUpdateSelfJoin and pick the one that gives better peformance for your GPU
//The best performance for this version is achieved when using a block size of 1024 and the lowest power-of-two factor possible for the shared memory available on your GPU, for the Tesla K80 this value is 16.
//Minimum shared memory usage is 5*sizeof(double)*WORK_SIZE + X. X varies with the value of factor. X = 10*sizeof(double)*WORK_SIZE / factor
__global__ void WavefrontUpdateSelfJoinMaxSharedMem(const double* QT, const double* Ta, const double* Tb, const double* means, const double* stds, unsigned long long int* profile, unsigned int m, unsigned int n, int startPos, int endPos, int numDevices){
//Factor and threads per block must both be powers of two where: factor <= threads per block
//Use the smallest power of 2 possible for your GPU
const int factor = 16;
__shared__ volatile mp_entry localMPMain[WORK_SIZE + WORK_SIZE / factor];
__shared__ volatile mp_entry localMPOther[WORK_SIZE / factor];
__shared__ double A_low[WORK_SIZE + WORK_SIZE / factor];
__shared__ double A_high[WORK_SIZE + WORK_SIZE / factor];
__shared__ double mu_x[WORK_SIZE + WORK_SIZE / factor];
__shared__ double mu_y[WORK_SIZE / factor];
__shared__ double sigma_x[WORK_SIZE + WORK_SIZE / factor];
__shared__ double sigma_y[WORK_SIZE / factor];
__shared__ double B_high[WORK_SIZE / factor];
__shared__ double B_low[WORK_SIZE / factor];
__shared__ volatile bool updated[3];
int a = ((blockIdx.x * numDevices) + startPos) * blockDim.x + threadIdx.x;
//const int b = ((blockIdx.x * numDevices) + startPos + 1) * blockDim.x;
int exclusion = m / 4;
double workspace;
int localX = threadIdx.x + 1;
int localY = 1;
int chunkIdxMain = (a / blockDim.x) *factor;
int chunkIdxOther = 0;
int mainStart = (blockDim.x / factor) * chunkIdxMain;
int otherStart = 0;
if(a < n){
workspace = QT[a];
}else{
workspace = -1;
}
//Initialize Shared Data
if(mainStart+threadIdx.x < n){
localMPMain[threadIdx.x].ulong = profile[mainStart + threadIdx.x];
}else{
localMPMain[threadIdx.x].floats[0] = FLT_MAX;
localMPMain[threadIdx.x].ints[1] = 0;
}
if(threadIdx.x < blockDim.x / factor && mainStart+threadIdx.x+blockDim.x < n){
localMPMain[blockDim.x + threadIdx.x].ulong = profile[mainStart + blockDim.x + threadIdx.x];
}else if( threadIdx.x < blockDim.x / factor){
localMPMain[blockDim.x + threadIdx.x].floats[0] = FLT_MAX;
localMPMain[blockDim.x + threadIdx.x].ints[1] = 0;
}
if(threadIdx.x < blockDim.x / factor && otherStart+threadIdx.x < n){
localMPOther[threadIdx.x].ulong = profile[otherStart + threadIdx.x];
}else if(threadIdx.x < blockDim.x / factor){
localMPOther[threadIdx.x].floats[0] = FLT_MAX;
localMPOther[threadIdx.x].ints[1] = 0;
}
if(threadIdx.x == 0)
{
updated[0] = false;
updated[1] = false;
updated[2] = false;
}
int x = a + 1;
int y = 1;
if(x - 1 < n){
A_low[threadIdx.x] = Ta[x - 1];
}
if(x + m - 1 < n + m - 1){
A_high[threadIdx.x] = Ta[x + m - 1];
}
if(threadIdx.x < blockDim.x / factor && x + blockDim.x - 1 < n + m - 1){
A_low[threadIdx.x + blockDim.x] = Ta[x + blockDim.x - 1];
}
if(threadIdx.x < blockDim.x / factor && x + blockDim.x - 1 + m < n + m - 1){
A_high[threadIdx.x + blockDim.x] = Ta[x + blockDim.x - 1 + m];
}
if(a < n){
sigma_x[threadIdx.x] = stds[a];
mu_x[threadIdx.x] = means[a];
}
if(threadIdx.x < blockDim.x / factor && a + blockDim.x < n){
sigma_x[threadIdx.x + blockDim.x] = stds[a + blockDim.x];
mu_x[threadIdx.x + blockDim.x] = means[a + blockDim.x];
}
if(threadIdx.x < blockDim.x / factor){
B_low[threadIdx.x] = Tb[threadIdx.x];
B_high[threadIdx.x] = Tb[threadIdx.x + m];
sigma_y[threadIdx.x] = stds[threadIdx.x];
mu_y[threadIdx.x] = means[threadIdx.x];
}
int relativeX = threadIdx.x;
int relativeY = 0;
while(mainStart < n && otherStart < n)
{
__syncthreads();
//Update to the end of the current chunk
while(x < n && y < n && localY < blockDim.x / factor)
{
//workspace = workspace - Ta[x - 1] * Tb[y - 1] + Ta[x + m - 1] * Tb[ y + m - 1];
workspace = workspace - A_low[relativeX]* B_low[relativeY] + A_high[relativeX] * B_high[relativeY];
//workspace = workspace - Ta[x - 1] * B_low[relativeY] + Ta[x + m - 1] * B_high[relativeY];
//workspace = workspace - A_low[relativeX] * B_low[relativeY] + Ta[x + m - 1] * B_high[relativeY];
//workspace = workspace - Ta[x - 1] * B_low[relativeY] + A_high[relativeX] * B_high[relativeY];
if(!(x > y - exclusion && x < y + exclusion))
{
//Compute the next distance value
//double dist = sqrt(abs(2 * (m - (workspace - m * means[x] * means[y]) / (stds[x] * stds[y]))));
double dist = sqrt(abs(2 * (m - (workspace - m * mu_x[localX] * mu_y[localY]) / (sigma_x[localX] * sigma_y[localY]))));
//Check cache to see if we even need to try to update
if(localMPMain[localX].floats[0] > dist)
{
//Update the cache with the new min value atomically
MPatomicMin((unsigned long long int*)&localMPMain[localX], dist, y);
if(localX < blockDim.x && !updated[0]){
updated[0] = true;
}else if(!updated[1]){
updated[1] = true;
}
}
//Check cache to see if we even need to try to update
if(localMPOther[localY].floats[0] > dist)
{
//Update the cache with the new min value atomically
MPatomicMin((unsigned long long int*)&localMPOther[localY], dist, x);
if(!updated[2]){
updated[2] = true;
}
}
}
++x;
++y;
++localX;
++localY;
++relativeX;
++relativeY;
}
__syncthreads();
//If we updated any values in the cached MP, try to push them to the global "master" MP
if(updated[0]){
UpdateMPGlobal(profile, localMPMain, chunkIdxMain, 0,n, factor);
}
if(updated[1]){
if(threadIdx.x < blockDim.x / factor){
UpdateMPGlobal(profile, localMPMain, chunkIdxMain + factor, blockDim.x, n, factor);
}
}
if(updated[2]){
if(threadIdx.x < blockDim.x / factor){
UpdateMPGlobal(profile, localMPOther, chunkIdxOther, 0,n, factor);
}
}
__syncthreads();
if(threadIdx.x == 0){
updated[0] = false;
updated[1] = false;
updated[2] = false;
}
mainStart += blockDim.x / factor;
otherStart += blockDim.x / factor;
//Update local cache to point to the next chunk of the MP
if(mainStart+threadIdx.x < n)
{
localMPMain[threadIdx.x].ulong = profile[mainStart + threadIdx.x];
}
else
{
localMPMain[threadIdx.x].floats[0] = FLT_MAX;
localMPMain[threadIdx.x].ints[1] = 0;
}
if(threadIdx.x < blockDim.x / factor && mainStart+threadIdx.x+blockDim.x < n)
{
localMPMain[blockDim.x + threadIdx.x].ulong = profile[mainStart + blockDim.x + threadIdx.x];
}
else if( threadIdx.x < blockDim.x / factor)
{
localMPMain[threadIdx.x + blockDim.x].floats[0] = FLT_MAX;
localMPMain[threadIdx.x + blockDim.x].ints[1] = 0;
}
if(threadIdx.x < blockDim.x / factor && otherStart+threadIdx.x < n)
{
localMPOther[threadIdx.x].ulong = profile[otherStart + threadIdx.x];
}
else if( threadIdx.x < blockDim.x / factor)
{
localMPOther[threadIdx.x].floats[0] = FLT_MAX;
localMPOther[threadIdx.x].ints[1] = 0;
}
if(x - 1 < n + m - 1){
A_low[threadIdx.x] = Ta[x - 1];
}
if(threadIdx.x < blockDim.x / factor && x - 1 + blockDim.x < n +m - 1){
A_low[threadIdx.x + blockDim.x] = Ta[x + blockDim.x - 1];
}
if(x + m - 1 < n + m - 1){
A_high[threadIdx.x] = Ta[x + m - 1];
}
if(threadIdx.x < blockDim.x / factor && x + blockDim.x + m - 1 < n + m - 1){
A_high[threadIdx.x + blockDim.x] = Ta[x + blockDim.x + m - 1];
}
if(threadIdx.x < blockDim.x / factor && y + threadIdx.x - 1 < n + m - 1){
B_low[threadIdx.x] = Tb[y + threadIdx.x - 1];
}
if(threadIdx.x < blockDim.x / factor && y + threadIdx.x - 1 + m < n + m - 1){
B_high[threadIdx.x] = Tb[y + threadIdx.x + m - 1];
}
if(x < n){
sigma_x[threadIdx.x] = stds[x];
mu_x[threadIdx.x] = means[x];
}
if(threadIdx.x < blockDim.x / factor && x + blockDim.x < n){
sigma_x[threadIdx.x + blockDim.x] = stds[x + blockDim.x];
mu_x[threadIdx.x + blockDim.x] = means[x + blockDim.x];
}
if(threadIdx.x < blockDim.x / factor && y + threadIdx.x < n){
sigma_y[threadIdx.x] = stds[y + threadIdx.x];
mu_y[threadIdx.x] = means[y + threadIdx.x];
}
relativeY = 0;
localY = 0;
localX = threadIdx.x;
relativeX = threadIdx.x;
chunkIdxMain++;
chunkIdxOther++;
}
}
//Performs STOMP algorithm
#ifdef _WIN32
DWORD WINAPI doThreadSTOMP(LPVOID argsp){
#else
void* doThreadSTOMP(void* argsp) {
#endif
thread_args* args = (thread_args*) argsp;
int tid = args->tid;
gpuErrchk(cudaSetDevice(tid % nDevices));
thrust::device_vector<DATA_TYPE>* Ta = args -> Ta;
thrust::device_vector<DATA_TYPE>* Tb = args -> Tb;
thrust::device_vector<unsigned long long int>* profile = args -> profile;
thrust::device_vector<unsigned int>* profileIdxs = args -> profileIdxs;
int numWorkers = args ->numWorkers;
int m = args -> m;
unsigned int start = args -> start;
unsigned int end = args -> end;
unsigned int n = Ta ->size() - m + 1;
unsigned int n2 = Tb -> size() - m + 1;
unsigned int sz = Ta -> size();
thrust::plus<DATA_TYPE> op1;
square op2;
printf("allocating grids\n");
dim3 grid(Ta -> size() / WORK_SIZE + 1, 1, 1);
dim3 grid3(n / WORK_SIZE + 1, 1, 1);
dim3 block(WORK_SIZE, 1, 1);
double oldTime = 0;
thrust::device_vector<DATA_TYPE> Qb(m);
gpuErrchk( cudaPeekAtLastError() );
thrust::copy(thrust::cuda::par,Tb -> begin(), Tb -> begin() + m, Qb.begin());
gpuErrchk( cudaPeekAtLastError() );
thrust::device_vector<DATA_TYPE> QT;
SlidingDotProducts(Qb, *Ta, QT, plan[tid], plan2[tid]);
thrust::device_vector<DATA_TYPE> QTtrunc(n);
gpuErrchk( cudaPeekAtLastError() );
thrust::copy(thrust::cuda::par,QT.begin() + m - 1, QT.begin() + m + n - 1, QTtrunc.begin());
gpuErrchk( cudaPeekAtLastError() );
QT.clear();
QT.shrink_to_fit();
thrust::device_vector<DATA_TYPE> Means(n), stds(n), squares(Ta -> size()), sums(Ta -> size());
thrust::inclusive_scan(Ta -> begin(),Ta -> end(),sums.begin(), op1);
thrust::transform_inclusive_scan(Ta -> begin(), Ta -> end(), squares.begin(), op2,op1);
slidingMean<<<grid, block>>>(sums.data().get(),m, n, Means.data().get());
gpuErrchk( cudaPeekAtLastError() );
slidingStd<<<grid, block>>>(squares.data().get(), m, n, Means.data().get(), stds.data().get());
gpuErrchk( cudaPeekAtLastError() );
sums.clear();
squares.clear();
sums.shrink_to_fit();
squares.shrink_to_fit();
printf("allocating DP");
thrust::device_vector<DATA_TYPE> D;
D.resize(n,_MAX_VAL_);
CalculateDistProfile<<<grid, block>>>(QTtrunc.data().get(), D.data().get(), Means.data().get(), stds.data().get(), m, 0, n);
gpuErrchk( cudaPeekAtLastError() );
//Initialize the indexes to the starting position
profileIdxs -> resize(n,1);
profile->resize(n, 0);
thrust::device_vector<double>::iterator it = thrust::min_element(D.begin(),D.end());
unsigned int pos = it - D.begin();
double val = *it;
//cout << pos << " " << val;
(*profileIdxs)[0] = pos;
D[0] = *it;
thrust::transform(D.begin(), D.end(), profileIdxs->begin(), profile->begin(), MPIDXCombine());
D.clear();
D.shrink_to_fit();
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
time_t start2, now2;
time_t lastLogged;
time(&start2);
time(&lastLogged);
#ifdef USE_BEST_VERSION
WavefrontUpdateSelfJoinMaxSharedMem<<<dim3(ceil(numWorkers / (double) WORK_SIZE), 1, 1),dim3(WORK_SIZE, 1,1)>>>(QTtrunc.data().get(), Ta -> data().get(), Tb -> data().get(), Means.data().get(), stds.data().get(), profile -> data().get(), m, n, start, end, NUM_THREADS);
#else
WavefrontUpdateSelfJoin<<<dim3(ceil(numWorkers / (double) WORK_SIZE), 1, 1),dim3(WORK_SIZE, 1,1)>>>(QTtrunc.data().get(), Ta -> data().get(), Tb -> data().get(), Means.data().get(), stds.data().get(), profile -> data().get(), m, n, start, end, NUM_THREADS);
#endif
gpuErrchk( cudaPeekAtLastError() );
cudaDeviceSynchronize();
//std::cout << thrust::reduce(counts.begin(), counts.end(), 0, thrust::plus<unsigned long long>()) << std::endl;
time_t now3;
time(&now3);
printf("Finished thread %d over all iterations in %lf seconds\n", tid, difftime(now3, start2) + oldTime);
//pthread_exit(0);
return 0;
}
//Allocates threads on a CPU to distribute work to each specified device
__host__ void STOMP(thrust::host_vector<DATA_TYPE>& Ta, unsigned int m,
thrust::host_vector<float>& profile_h, thrust::host_vector<unsigned int>& profileIdxs_h){
gpuErrchk(cudaGetDeviceCount(&nDevices));
STOMPinit(Ta.size());
thrust::device_vector<DATA_TYPE>* Ta_d = new thrust::device_vector<DATA_TYPE>[nDevices];
thrust::device_vector<unsigned long long int>* Profs[NUM_THREADS];
thrust::device_vector<unsigned int>* ProfsIdxs[NUM_THREADS];
for(int i = 0; i < nDevices; ++i){
gpuErrchk(cudaSetDevice(i));
Ta_d[i] = Ta;
}
for(int i = 0; i < NUM_THREADS; ++i){
gpuErrchk(cudaSetDevice(i % nDevices));
Profs[i] = new thrust::device_vector<unsigned long long int>();
ProfsIdxs[i] = new thrust::device_vector<unsigned int>();
}
gpuErrchk(cudaSetDevice(0));
unsigned int n = Ta.size() - m + 1;
unsigned int lastend=0;
for(unsigned int i = 0; i < NUM_THREADS; ++i ){
lastend += ceil(n / (double) NUM_THREADS);
if(lastend > n){
lastend = n;
}
int workers = ceil(n / (double) NUM_THREADS);
std::cout << workers<< std::endl;
int tid = i;
targs[tid].Ta = &Ta_d[i % nDevices];
targs[tid].Tb = &Ta_d[i % nDevices];
targs[tid].tid = tid;
targs[tid].profile = Profs[tid];
targs[tid].profileIdxs = ProfsIdxs[tid];
targs[tid].m = m;
targs[tid].start = i;
targs[tid].numWorkers = ceil(n / (double) NUM_THREADS);
//lastend = n-floor(n*sqrt(double(NUM_THREADS-i-1)/double(NUM_THREADS-i)));
printf("val:%lf\n", sqrt(double(NUM_THREADS-i-1)/double(NUM_THREADS-i)));
targs[tid].end = n;
targs[tid].exclusion = m / 4;
targs[tid].maxJoin = 0;
printf("Launching thread %d, for start = %d, to end = %d\n", tid, targs[tid].start, targs[tid].end);
#ifdef _WIN32
threads[tid] = CreateThread(NULL, 0, doThreadSTOMP, (void*)&targs[tid], 0, NULL);
#else
int rc = pthread_create(&threads[tid], NULL, doThreadSTOMP, (void*)&targs[tid]);
#endif // _WIN32
++tid;
}
for(int x = 0; x < NUM_THREADS; x++)
#ifdef _WIN32
WaitForMultipleObjects(NUM_THREADS, threads, TRUE, INFINITE);
#else
pthread_join(threads[x], NULL);
#endif
gpuErrchk(cudaSetDevice(0));
thrust::device_vector<float> profile(Ta.size() - m + 1, FLT_MAX);
thrust::device_vector<unsigned int> profileIdxs(Ta.size() - m + 1, 0);
//Move all pieces back to the same GPU to aggregate
//TODO:(This can be split into steps in the case we are using a massive number of GPUs)
for(int i = 0; i < NUM_THREADS; ++i)
{
if(i % nDevices != 0)
{
gpuErrchk(cudaSetDevice(i % nDevices));
thrust::host_vector<unsigned long long int> temp = *Profs[i];
delete Profs[i];
delete ProfsIdxs[i];
gpuErrchk(cudaSetDevice(0));
Profs[i] = new thrust::device_vector<unsigned long long int>(temp);
gpuErrchk( cudaPeekAtLastError() );
ProfsIdxs[i] = new thrust::device_vector<unsigned int>();
gpuErrchk( cudaPeekAtLastError() );
}
}
//Compute final distance profile (Aggragate what each thread produced)
for(int i = 0; i < NUM_THREADS; ++i){
int curstart=0;
thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(profile.begin(), profileIdxs.begin(), Profs[i] -> begin())), thrust::make_zip_iterator(thrust::make_tuple(profile.end(), profileIdxs.end(), Profs[i] -> end())), minWithIndex2());
gpuErrchk( cudaPeekAtLastError() );
}
for(int i = 0; i < NUM_THREADS; ++i){
delete Profs[i];
delete ProfsIdxs[i];
}
delete [] Ta_d;
profile_h = profile;
profileIdxs_h = profileIdxs;
}
int main(int argc, char** argv) {
int window_size = atoi(argv[1]);
thrust::host_vector<DATA_TYPE> Th;
readFile(argv[2], Th);
//thrust::device_vector<DATA_TYPE> T;
//T = Th;
int size = Th.size();
thrust::host_vector<float> profile;
thrust::host_vector<unsigned int> profIdxs;
printf("Starting STOMP\n");
time_t now;
time(&START);
STOMP(Th,window_size,profile, profIdxs);
time(&now);
printf("Finished STOMP on %u data points in %f seconds.\n", size, difftime(now, START));
printf("Now writing result to files\n");
FILE* f1 = fopen( argv[3], "w");
FILE* f2 = fopen( argv[4], "w");
for(int i = 0; i < profIdxs.size(); ++i){
fprintf(f1, format_str_n, profile[i]);
fprintf(f2, "%u\n", profIdxs[i] + 1);
}
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaDeviceReset());
fclose(f1);
fclose(f2);
printf("Done\n");
return 0;
}
|
1873ba04a24ea48fa362b912c208134d3689c919.hip | // !!! This is a file automatically generated by hipify!!!
// C++ headers
#include <algorithm>
#include <numeric>
// CUDA runtime
#include <hip/hip_runtime.h>
// CMSSW headers
#include "CUDADataFormats/SiPixelCluster/interface/gpuClusteringConstants.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h"
#include "PixelRecHitGPUKernel.h"
#include "gpuPixelRecHits.h"
namespace {
__global__ void setHitsLayerStart(uint32_t const* __restrict__ hitsModuleStart,
pixelCPEforGPU::ParamsOnGPU const* cpeParams,
uint32_t* hitsLayerStart) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
assert(0 == hitsModuleStart[0]);
if (i < 11) {
hitsLayerStart[i] = hitsModuleStart[cpeParams->layerGeometry().layerStart[i]];
#ifdef GPU_DEBUG
printf("LayerStart %d %d: %d\n", i, cpeParams->layerGeometry().layerStart[i], hitsLayerStart[i]);
#endif
}
}
} // namespace
namespace pixelgpudetails {
TrackingRecHit2DGPU PixelRecHitGPUKernel::makeHitsAsync(SiPixelDigisCUDA const& digis_d,
SiPixelClustersCUDA const& clusters_d,
BeamSpotCUDA const& bs_d,
pixelCPEforGPU::ParamsOnGPU const* cpeParams,
hipStream_t stream) const {
auto nHits = clusters_d.nClusters();
TrackingRecHit2DGPU hits_d(nHits, clusters_d.offsetBPIX2(), cpeParams, clusters_d.clusModuleStart(), stream);
int threadsPerBlock = 128;
int blocks = digis_d.nModules(); // active modules (with digis)
#ifdef GPU_DEBUG
std::cout << "launching getHits kernel for " << blocks << " blocks" << std::endl;
#endif
// protect from empty events
if (blocks) {
hipLaunchKernelGGL(( gpuPixelRecHits::getHits), dim3(blocks), dim3(threadsPerBlock), 0, stream,
cpeParams, bs_d.data(), digis_d.view(), digis_d.nDigis(), clusters_d.view(), hits_d.view());
cudaCheck(hipGetLastError());
#ifdef GPU_DEBUG
cudaCheck(hipDeviceSynchronize());
#endif
}
// assuming full warp of threads is better than a smaller number...
if (nHits) {
hipLaunchKernelGGL(( setHitsLayerStart), dim3(1), dim3(32), 0, stream, clusters_d.clusModuleStart(), cpeParams, hits_d.hitsLayerStart());
cudaCheck(hipGetLastError());
cms::cuda::fillManyFromVector(
hits_d.phiBinner(), 10, hits_d.iphi(), hits_d.hitsLayerStart(), nHits, 256, hits_d.phiBinnerStorage(), stream);
cudaCheck(hipGetLastError());
#ifdef GPU_DEBUG
cudaCheck(hipDeviceSynchronize());
#endif
}
return hits_d;
}
} // namespace pixelgpudetails
| 1873ba04a24ea48fa362b912c208134d3689c919.cu | // C++ headers
#include <algorithm>
#include <numeric>
// CUDA runtime
#include <cuda_runtime.h>
// CMSSW headers
#include "CUDADataFormats/SiPixelCluster/interface/gpuClusteringConstants.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h"
#include "PixelRecHitGPUKernel.h"
#include "gpuPixelRecHits.h"
namespace {
__global__ void setHitsLayerStart(uint32_t const* __restrict__ hitsModuleStart,
pixelCPEforGPU::ParamsOnGPU const* cpeParams,
uint32_t* hitsLayerStart) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
assert(0 == hitsModuleStart[0]);
if (i < 11) {
hitsLayerStart[i] = hitsModuleStart[cpeParams->layerGeometry().layerStart[i]];
#ifdef GPU_DEBUG
printf("LayerStart %d %d: %d\n", i, cpeParams->layerGeometry().layerStart[i], hitsLayerStart[i]);
#endif
}
}
} // namespace
namespace pixelgpudetails {
TrackingRecHit2DGPU PixelRecHitGPUKernel::makeHitsAsync(SiPixelDigisCUDA const& digis_d,
SiPixelClustersCUDA const& clusters_d,
BeamSpotCUDA const& bs_d,
pixelCPEforGPU::ParamsOnGPU const* cpeParams,
cudaStream_t stream) const {
auto nHits = clusters_d.nClusters();
TrackingRecHit2DGPU hits_d(nHits, clusters_d.offsetBPIX2(), cpeParams, clusters_d.clusModuleStart(), stream);
int threadsPerBlock = 128;
int blocks = digis_d.nModules(); // active modules (with digis)
#ifdef GPU_DEBUG
std::cout << "launching getHits kernel for " << blocks << " blocks" << std::endl;
#endif
// protect from empty events
if (blocks) {
gpuPixelRecHits::getHits<<<blocks, threadsPerBlock, 0, stream>>>(
cpeParams, bs_d.data(), digis_d.view(), digis_d.nDigis(), clusters_d.view(), hits_d.view());
cudaCheck(cudaGetLastError());
#ifdef GPU_DEBUG
cudaCheck(cudaDeviceSynchronize());
#endif
}
// assuming full warp of threads is better than a smaller number...
if (nHits) {
setHitsLayerStart<<<1, 32, 0, stream>>>(clusters_d.clusModuleStart(), cpeParams, hits_d.hitsLayerStart());
cudaCheck(cudaGetLastError());
cms::cuda::fillManyFromVector(
hits_d.phiBinner(), 10, hits_d.iphi(), hits_d.hitsLayerStart(), nHits, 256, hits_d.phiBinnerStorage(), stream);
cudaCheck(cudaGetLastError());
#ifdef GPU_DEBUG
cudaCheck(cudaDeviceSynchronize());
#endif
}
return hits_d;
}
} // namespace pixelgpudetails
|
fb1c6b229932188778128861004fc719d3f3b5e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// O(N) operations
#include <stdio.h>
#include <iostream>
using namespace std;
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
hipGetErrorString(error)); \
exit(1); \
} \
}
const int BS = 1 << 5;
const int N = 1 << 10;
__global__ void up_sweep(float *g_idata, const int chunk)
{
int threadId = threadIdx.x;
int entireId = blockIdx.x * blockDim.x + threadIdx.x;
int offset = chunk;
for (int d = blockDim.x >> 1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (threadId < d)
{
// B
int ai = offset * (2 * (entireId / blockDim.x * d + threadId) + 1) - 1;
int bi = offset * (2 * (entireId / blockDim.x * d + threadId) + 2) - 1;
/*
if (chunk > 512)
{
printf("##%d: %d <- %d (%d <- %d)\n", entireId, 0, 0, bi, ai // (int)g_idata[bi], (int)g_idata[ai], bi, ai);
}
*/
if (!((0 <= ai && ai <= N) && (0 <= bi && bi <= N)))
continue;
/*
if (chunk >= 512)
{
printf("#%d: %lld <- %lld (%d <- %d)\n", entireId, (long long)g_idata[bi], (long long)g_idata[ai], bi, ai);
}
*/
g_idata[bi] += g_idata[ai];
}
offset <<= 1;
}
}
__global__ void assign_zero(float *g_idata, const long long pow2)
{
// C
g_idata[pow2 - 1] = 0; // clear the last element
}
__global__ void down_sweep(float *g_idata, const int chunk)
{
int threadId = threadIdx.x;
int entireId = blockIdx.x * blockDim.x + threadIdx.x;
int offset = chunk;
for (int d = blockDim.x >> 1; d > 0; d >>= 1)
offset <<= 1;
for (int d = 1; d < blockDim.x; d <<= 1) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (threadId < d)
{
// D
int ai = offset * (2 * (entireId / blockDim.x * d + threadId) + 1) - 1;
int bi = offset * (2 * (entireId / blockDim.x * d + threadId) + 2) - 1;
if (!((0 <= ai && ai <= N) && (0 <= bi && bi <= N)))
continue;
float t = g_idata[ai];
g_idata[ai] = g_idata[bi];
g_idata[bi] += t;
//printf("#%d: %d <-> %d (%d <-> %d)\n", entireId, (int)g_idata[bi], (int)g_idata[ai], bi, ai);
}
}
__syncthreads();
}
int main()
{
long long pow2 = 1;
while (pow2 < N)
pow2 <<= 1;
float *v;
v = (float *)malloc(sizeof(float) * pow2);
for (int i = 0; i < N; i++)
v[i] = i + 1;
float *g_idata;
hipMalloc((void **)&g_idata, sizeof(float) * pow2), hipMemcpy(g_idata, v, sizeof(float) * pow2, hipMemcpyDefault);
{ // calc
long long chunkSize;
for (chunkSize = 1; chunkSize < pow2; chunkSize *= BS)
{
hipLaunchKernelGGL(( up_sweep), dim3((pow2 + BS - 1) / BS), dim3(BS), 0, 0, g_idata, chunkSize);
CHECK(hipDeviceSynchronize());
//cout << "#" << chunkSize << endl;
}
/*
assign_zero<<<1, 1>>>(g_idata, pow2);
CHECK(hipDeviceSynchronize());
for (chunkSize /= BS; chunkSize > 0; chunkSize /= BS)
{
down_sweep<<<(pow2 + BS - 1) / BS, BS>>>(g_idata, chunkSize);
CHECK(hipDeviceSynchronize());
}
*/
}
float *res;
res = (float *)malloc(sizeof(float) * pow2);
hipMemcpy(res, g_idata, sizeof(float) * pow2, hipMemcpyDefault);
for (int i = N - 10; i < N; i++)
{
cout << i << " " << (long long)res[i] << "\n";
}
cout << flush;
}
| fb1c6b229932188778128861004fc719d3f3b5e6.cu | // O(N) operations
#include <stdio.h>
#include <iostream>
using namespace std;
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
const int BS = 1 << 5;
const int N = 1 << 10;
__global__ void up_sweep(float *g_idata, const int chunk)
{
int threadId = threadIdx.x;
int entireId = blockIdx.x * blockDim.x + threadIdx.x;
int offset = chunk;
for (int d = blockDim.x >> 1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (threadId < d)
{
// B
int ai = offset * (2 * (entireId / blockDim.x * d + threadId) + 1) - 1;
int bi = offset * (2 * (entireId / blockDim.x * d + threadId) + 2) - 1;
/*
if (chunk > 512)
{
printf("##%d: %d <- %d (%d <- %d)\n", entireId, 0, 0, bi, ai // (int)g_idata[bi], (int)g_idata[ai], bi, ai);
}
*/
if (!((0 <= ai && ai <= N) && (0 <= bi && bi <= N)))
continue;
/*
if (chunk >= 512)
{
printf("#%d: %lld <- %lld (%d <- %d)\n", entireId, (long long)g_idata[bi], (long long)g_idata[ai], bi, ai);
}
*/
g_idata[bi] += g_idata[ai];
}
offset <<= 1;
}
}
__global__ void assign_zero(float *g_idata, const long long pow2)
{
// C
g_idata[pow2 - 1] = 0; // clear the last element
}
__global__ void down_sweep(float *g_idata, const int chunk)
{
int threadId = threadIdx.x;
int entireId = blockIdx.x * blockDim.x + threadIdx.x;
int offset = chunk;
for (int d = blockDim.x >> 1; d > 0; d >>= 1)
offset <<= 1;
for (int d = 1; d < blockDim.x; d <<= 1) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (threadId < d)
{
// D
int ai = offset * (2 * (entireId / blockDim.x * d + threadId) + 1) - 1;
int bi = offset * (2 * (entireId / blockDim.x * d + threadId) + 2) - 1;
if (!((0 <= ai && ai <= N) && (0 <= bi && bi <= N)))
continue;
float t = g_idata[ai];
g_idata[ai] = g_idata[bi];
g_idata[bi] += t;
//printf("#%d: %d <-> %d (%d <-> %d)\n", entireId, (int)g_idata[bi], (int)g_idata[ai], bi, ai);
}
}
__syncthreads();
}
int main()
{
long long pow2 = 1;
while (pow2 < N)
pow2 <<= 1;
float *v;
v = (float *)malloc(sizeof(float) * pow2);
for (int i = 0; i < N; i++)
v[i] = i + 1;
float *g_idata;
cudaMalloc((void **)&g_idata, sizeof(float) * pow2), cudaMemcpy(g_idata, v, sizeof(float) * pow2, cudaMemcpyDefault);
{ // calc
long long chunkSize;
for (chunkSize = 1; chunkSize < pow2; chunkSize *= BS)
{
up_sweep<<<(pow2 + BS - 1) / BS, BS>>>(g_idata, chunkSize);
CHECK(cudaDeviceSynchronize());
//cout << "#" << chunkSize << endl;
}
/*
assign_zero<<<1, 1>>>(g_idata, pow2);
CHECK(cudaDeviceSynchronize());
for (chunkSize /= BS; chunkSize > 0; chunkSize /= BS)
{
down_sweep<<<(pow2 + BS - 1) / BS, BS>>>(g_idata, chunkSize);
CHECK(cudaDeviceSynchronize());
}
*/
}
float *res;
res = (float *)malloc(sizeof(float) * pow2);
cudaMemcpy(res, g_idata, sizeof(float) * pow2, cudaMemcpyDefault);
for (int i = N - 10; i < N; i++)
{
cout << i << " " << (long long)res[i] << "\n";
}
cout << flush;
}
|
4271aa38a0829c1b249df504f344cce0acde3347.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "mult.h"
node* mult(node *p, node *u, int n)
{
node *a = p->next, *b = u->next, *d;
node *t = new node;
t->next = NULL;
d = t;
node *y = new node;
/*node *r = new node;
r->zhi.clear();
r->xi.clear();
for (int i = 0; i < n; i++) {
r->zhi.insert(r->zhi.end(), 0);
}
r->next = NULL;
y->next = r;*/
y->next = NULL;
while (a != NULL)
{
b = u->next;
d = t;
while (b != NULL)
{
node *c = new node;
c->zhi.clear();
for (int i = 0; i<n; i++) {
c->zhi.insert(c->zhi.end(), (a->zhi[i] + b->zhi[i]));
}
c->xi = multi(a->xi, b->xi);
c->next = NULL;
d->next = c;
d = c;
b = b->next;
}
y = sum(y, t, n);
a = a->next;
}
return y;
}
__global__ void ConvertToInt(hipfftReal *a, int size)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
int* b = (int*)a;
for (int i = threadID; i < size; i += numThreads)
b[i] = static_cast<int>(round(a[i]));
}
vector<float> multi(vector<float> aa, vector<float> bb) {
int lena = aa.size();
int lenb = bb.size();
float aflag = aa[0];
float bflag = bb[0];
// cout << "aflag:" << aflag << ",bflag:" << bflag << endl;
if (aflag < 0)aa[0] = -aa[0];
if (bflag < 0)bb[0] = -bb[0];
vector<float> a(lenb);
vector<float> b(lena);
//vectorinsert
a.insert(a.end(), aa.begin(), aa.end());
b.insert(b.end(), bb.begin(), bb.end());
//printf(".....................do multiply............................\n");
//
vector<int> c = multiply(a, b);
const int base = 10;
//
for (int j = c.size() - 1; j > 0; j--)
{
if (c[j] >= base)
{
c[j - 1] += c[j] / base;
c[j] %= base;
}
}
//
c.pop_back();
int i = 0;
if (c[0] == 0) { i++; }
if ((aflag*bflag) < 0)c[0] = -c[0];
vector<float> result;
result.insert(result.end(), c.begin(), c.end());
//"%02d","%03d"
return result;
}
vector<int> multiply(const vector<float> &a, const vector<float> &b)
{
//
const int NX = a.size();
hipfftHandle plan_a, plan_b, plan_c;
hipfftComplex *data_a, *data_b;
vector<int> c(a.size());
//sizeof(int)==sizeof(float), sizeof(hipfftComplex)==2*sizeof(float)
hipMalloc((void**)&data_a, sizeof(hipfftComplex) * (NX / 2 + 1) * BATCH);
hipMalloc((void**)&data_b, sizeof(hipfftComplex) * (NX / 2 + 1) * BATCH);
hipMemcpy(data_a, a.data(), sizeof(float) * a.size(), hipMemcpyHostToDevice);
hipMemcpy(data_b, b.data(), sizeof(float) * b.size(), hipMemcpyHostToDevice);
if (hipGetLastError() != hipSuccess) { fprintf(stderr, "Cuda error: Failed to allocate\n"); return c; }
if (hipfftPlan1d(&plan_a, NX, HIPFFT_R2C, BATCH) != HIPFFT_SUCCESS) { fprintf(stderr, "CUFFT error: Plan creation failed"); return c; }
if (hipfftPlan1d(&plan_b, NX, HIPFFT_R2C, BATCH) != HIPFFT_SUCCESS) { fprintf(stderr, "CUFFT error: Plan creation failed"); return c; }
if (hipfftPlan1d(&plan_c, NX, HIPFFT_C2R, BATCH) != HIPFFT_SUCCESS) { fprintf(stderr, "CUFFT error: Plan creation failed"); return c; }
//A(x)
if (hipfftExecR2C(plan_a, (hipfftReal*)data_a, data_a) != HIPFFT_SUCCESS)
{
fprintf(stderr, "CUFFT error: ExecR2C Forward failed");
return c;
}
//B(x)
if (hipfftExecR2C(plan_b, (hipfftReal*)data_b, data_b) != HIPFFT_SUCCESS)
{
fprintf(stderr, "CUFFT error: ExecR2C Forward failed");
return c;
}
//
ComplexPointwiseMulAndScale << <NX / 256 + 1, 256 >> >(data_a, data_b, NX);
//C(x)
if (hipfftExecC2R(plan_c, data_b, (hipfftReal*)data_b) != HIPFFT_SUCCESS)
{
fprintf(stderr, "CUFFT error: ExecC2R Forward failed");
return c;
}
//
ConvertToInt << <NX / 256 + 1, 256 >> >((hipfftReal*)data_b, NX);
if (hipDeviceSynchronize() != hipSuccess)
{
fprintf(stderr, "Cuda error: Failed to synchronize\n");
return c;
}
hipMemcpy(&c[0], data_b, sizeof(float) * b.size(), hipMemcpyDeviceToHost);
hipfftDestroy(plan_a);
hipfftDestroy(plan_b);
hipfftDestroy(plan_c);
hipFree(data_a);
hipFree(data_b);
return c;
}
__global__ void ComplexPointwiseMulAndScale(hipfftComplex *a, hipfftComplex *b, int size)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
float scale = 1.0f / (float)size;
hipfftComplex c;
for (int i = threadID; i < size; i += numThreads)
{
c = cuCmulf(a[i], b[i]);//a*b
b[i] = make_cuFloatComplex(scale*cuCrealf(c), scale*cuCimagf(c));//
}
}
| 4271aa38a0829c1b249df504f344cce0acde3347.cu | #include "mult.h"
node* mult(node *p, node *u, int n)
{
node *a = p->next, *b = u->next, *d;
node *t = new node;
t->next = NULL;
d = t;
node *y = new node;
/*node *r = new node;
r->zhi.clear();
r->xi.clear();
for (int i = 0; i < n; i++) {
r->zhi.insert(r->zhi.end(), 0);
}
r->next = NULL;
y->next = r;*/
y->next = NULL;
while (a != NULL)
{
b = u->next;
d = t;
while (b != NULL)
{
node *c = new node;
c->zhi.clear();
for (int i = 0; i<n; i++) {
c->zhi.insert(c->zhi.end(), (a->zhi[i] + b->zhi[i]));
}
c->xi = multi(a->xi, b->xi);
c->next = NULL;
d->next = c;
d = c;
b = b->next;
}
y = sum(y, t, n);
a = a->next;
}
return y;
}
__global__ void ConvertToInt(cufftReal *a, int size)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
int* b = (int*)a;
for (int i = threadID; i < size; i += numThreads)
b[i] = static_cast<int>(round(a[i]));
}
vector<float> multi(vector<float> aa, vector<float> bb) {
int lena = aa.size();
int lenb = bb.size();
float aflag = aa[0];
float bflag = bb[0];
// cout << "aflag:" << aflag << ",bflag:" << bflag << endl;
if (aflag < 0)aa[0] = -aa[0];
if (bflag < 0)bb[0] = -bb[0];
vector<float> a(lenb);
vector<float> b(lena);
//在vector首部执行大量insert操作会严重影响性能,因此转换为在尾部执行
a.insert(a.end(), aa.begin(), aa.end());
b.insert(b.end(), bb.begin(), bb.end());
//printf(".....................do multiply............................\n");
//设置进制
vector<int> c = multiply(a, b);
const int base = 10;
//处理进位
for (int j = c.size() - 1; j > 0; j--)
{
if (c[j] >= base)
{
c[j - 1] += c[j] / base;
c[j] %= base;
}
}
//去掉多余的零
c.pop_back();
int i = 0;
if (c[0] == 0) { i++; }
if ((aflag*bflag) < 0)c[0] = -c[0];
vector<float> result;
result.insert(result.end(), c.begin(), c.end());
//输出最终结果,改了进制需要改这里的输出方式,比如百进制是"%02d",千进制是"%03d"
return result;
}
vector<int> multiply(const vector<float> &a, const vector<float> &b)
{
//时域转换频域后进行点乘
const int NX = a.size();
cufftHandle plan_a, plan_b, plan_c;
cufftComplex *data_a, *data_b;
vector<int> c(a.size());
//分配显卡内存并初始化,这里假设sizeof(int)==sizeof(float), sizeof(cufftComplex)==2*sizeof(float)
cudaMalloc((void**)&data_a, sizeof(cufftComplex) * (NX / 2 + 1) * BATCH);
cudaMalloc((void**)&data_b, sizeof(cufftComplex) * (NX / 2 + 1) * BATCH);
cudaMemcpy(data_a, a.data(), sizeof(float) * a.size(), cudaMemcpyHostToDevice);
cudaMemcpy(data_b, b.data(), sizeof(float) * b.size(), cudaMemcpyHostToDevice);
if (cudaGetLastError() != cudaSuccess) { fprintf(stderr, "Cuda error: Failed to allocate\n"); return c; }
if (cufftPlan1d(&plan_a, NX, CUFFT_R2C, BATCH) != CUFFT_SUCCESS) { fprintf(stderr, "CUFFT error: Plan creation failed"); return c; }
if (cufftPlan1d(&plan_b, NX, CUFFT_R2C, BATCH) != CUFFT_SUCCESS) { fprintf(stderr, "CUFFT error: Plan creation failed"); return c; }
if (cufftPlan1d(&plan_c, NX, CUFFT_C2R, BATCH) != CUFFT_SUCCESS) { fprintf(stderr, "CUFFT error: Plan creation failed"); return c; }
//把A(x)转换到频域
if (cufftExecR2C(plan_a, (cufftReal*)data_a, data_a) != CUFFT_SUCCESS)
{
fprintf(stderr, "CUFFT error: ExecR2C Forward failed");
return c;
}
//把B(x)转换到频域
if (cufftExecR2C(plan_b, (cufftReal*)data_b, data_b) != CUFFT_SUCCESS)
{
fprintf(stderr, "CUFFT error: ExecR2C Forward failed");
return c;
}
//点乘
ComplexPointwiseMulAndScale << <NX / 256 + 1, 256 >> >(data_a, data_b, NX);
//把C(x)转换回时域
if (cufftExecC2R(plan_c, data_b, (cufftReal*)data_b) != CUFFT_SUCCESS)
{
fprintf(stderr, "CUFFT error: ExecC2R Forward failed");
return c;
}
//将浮点数的结果转换为整数
ConvertToInt << <NX / 256 + 1, 256 >> >((cufftReal*)data_b, NX);
if (cudaDeviceSynchronize() != cudaSuccess)
{
fprintf(stderr, "Cuda error: Failed to synchronize\n");
return c;
}
cudaMemcpy(&c[0], data_b, sizeof(float) * b.size(), cudaMemcpyDeviceToHost);
cufftDestroy(plan_a);
cufftDestroy(plan_b);
cufftDestroy(plan_c);
cudaFree(data_a);
cudaFree(data_b);
return c;
}
__global__ void ComplexPointwiseMulAndScale(cufftComplex *a, cufftComplex *b, int size)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
float scale = 1.0f / (float)size;
cufftComplex c;
for (int i = threadID; i < size; i += numThreads)
{
c = cuCmulf(a[i], b[i]);//a*b
b[i] = make_cuFloatComplex(scale*cuCrealf(c), scale*cuCimagf(c));//分别对复数的实部和虚部乘以系数
}
}
|
aa560e10706bc4ea0251e8c15e6a5c20291d074b.hip | // !!! This is a file automatically generated by hipify!!!
/*
To compile:
nvcc -O3 -o mandelbrot mandelbrot.c png_util.c -I. -lpng -lm -fopenmp
Or just type:
module load gcc
make
To create an image with 4096 x 4096 pixels (last argument will be used to set number of threads):
./mandelbrot 4096 4096 1
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "png_util.h"
#include "hip/hip_runtime.h"
// Q2a: add include for CUDA header file here:
#define MXITER 1000
typedef struct {
double r;
double i;
}complex_t;
// return iterations before z leaves mandelbrot set for given c
__device__ int testpoint(complex_t c){
int iter;
complex_t z;
double temp;
z = c;
for(iter=0; iter<MXITER; iter++){
temp = (z.r*z.r) - (z.i*z.i) + c.r;
z.i = z.r*z.i*2. + c.i;
z.r = temp;
if((z.r*z.r+z.i*z.i)>4.0){
return iter;
}
}
return iter;
}
// perform Mandelbrot iteration on a grid of numbers in the complex plane
// record the iteration counts in the count array
// Q2c: transform this function into a CUDA kernel
__global__ void mandelbrot(int Nre, int Nim, complex_t cmin, complex_t cmax, float *count){
int n,m;
complex_t c;
double dr = (cmax.r-cmin.r)/(Nre-1);
double di = (cmax.i-cmin.i)/(Nim-1);;
int tIdx = threadIdx.x;
int tIdy = threadIdx.y;
int bIdx = blockIdx.x;
int bIdy = blockIdx.y;
int bSizex = blockDim.x;
int bSizey = blockDim.y;
int n = tIDx + bIDx*bSizex;
int m = tIDy + bIDy*bSizey;
c.r = cmin.r + dr*m;
c.i = cmin.i + di*n;
count[m + n *Nre] = testpoint(c);
}
int main(int argc, char **argv){
// to create a 4096x4096 pixel image [ last argument is placeholder for number of threads ]
// usage: ./mandelbrot 4096 4096 1
int Nre = atoi(argv[1]);
int Nim = atoi(argv[2]);
int Nthreads = atoi(argv[3]);
// Q2b: set the number of threads per block and the number of blocks here:
float *temp;
hipMalloc(&temp, Nre*Nim*sizeof(float));
int Bx = Nthreads;
int By = Nthreads;
dim3 B(Bx, By, 1);
int Gx = (4096 + Bx - 1) / Bx;
int Gy = (4096 + By - 1) / By;
dim3 G(Gx, Gy, 1);
// storage for the iteration counts
float *count = (float*) malloc(Nre*Nim*sizeof(float));
// Parameters for a bounding box for "c" that generates an interesting image
const float centRe = -.759856, centIm= .125547;
const float diam = 0.151579;
complex_t cmin;
complex_t cmax;
cmin.r = centRe - 0.5*diam;
cmax.r = centRe + 0.5*diam;
cmin.i = centIm - 0.5*diam;
cmax.i = centIm + 0.5*diam;
clock_t start = clock(); //start time in CPU cycles
// compute mandelbrot set
hipLaunchKernelGGL(( mandelbrot) , dim3(G), dim3(B), 0, 0, Nre, Nim, cmin, cmax, count);
hipMemcpy(count, temp, Nre*Nim*sizeof(float), hipMemcpyDeviceToHost);
clock_t end = clock(); //start time in CPU cycles
// print elapsed time
printf("elapsed = %f\n", ((double)(end-start))/CLOCKS_PER_SEC);
// output mandelbrot to png format image
FILE *fp = fopen("mandelbrot.png", "w");
printf("Printing mandelbrot.png...");
write_hot_png(fp, Nre, Nim, count, 0, 80);
printf("done.\n");
free(count);
exit(0);
return 0;
}
| aa560e10706bc4ea0251e8c15e6a5c20291d074b.cu | /*
To compile:
nvcc -O3 -o mandelbrot mandelbrot.c png_util.c -I. -lpng -lm -fopenmp
Or just type:
module load gcc
make
To create an image with 4096 x 4096 pixels (last argument will be used to set number of threads):
./mandelbrot 4096 4096 1
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "png_util.h"
#include "cuda.h"
// Q2a: add include for CUDA header file here:
#define MXITER 1000
typedef struct {
double r;
double i;
}complex_t;
// return iterations before z leaves mandelbrot set for given c
__device__ int testpoint(complex_t c){
int iter;
complex_t z;
double temp;
z = c;
for(iter=0; iter<MXITER; iter++){
temp = (z.r*z.r) - (z.i*z.i) + c.r;
z.i = z.r*z.i*2. + c.i;
z.r = temp;
if((z.r*z.r+z.i*z.i)>4.0){
return iter;
}
}
return iter;
}
// perform Mandelbrot iteration on a grid of numbers in the complex plane
// record the iteration counts in the count array
// Q2c: transform this function into a CUDA kernel
__global__ void mandelbrot(int Nre, int Nim, complex_t cmin, complex_t cmax, float *count){
int n,m;
complex_t c;
double dr = (cmax.r-cmin.r)/(Nre-1);
double di = (cmax.i-cmin.i)/(Nim-1);;
int tIdx = threadIdx.x;
int tIdy = threadIdx.y;
int bIdx = blockIdx.x;
int bIdy = blockIdx.y;
int bSizex = blockDim.x;
int bSizey = blockDim.y;
int n = tIDx + bIDx*bSizex;
int m = tIDy + bIDy*bSizey;
c.r = cmin.r + dr*m;
c.i = cmin.i + di*n;
count[m + n *Nre] = testpoint(c);
}
int main(int argc, char **argv){
// to create a 4096x4096 pixel image [ last argument is placeholder for number of threads ]
// usage: ./mandelbrot 4096 4096 1
int Nre = atoi(argv[1]);
int Nim = atoi(argv[2]);
int Nthreads = atoi(argv[3]);
// Q2b: set the number of threads per block and the number of blocks here:
float *temp;
cudaMalloc(&temp, Nre*Nim*sizeof(float));
int Bx = Nthreads;
int By = Nthreads;
dim3 B(Bx, By, 1);
int Gx = (4096 + Bx - 1) / Bx;
int Gy = (4096 + By - 1) / By;
dim3 G(Gx, Gy, 1);
// storage for the iteration counts
float *count = (float*) malloc(Nre*Nim*sizeof(float));
// Parameters for a bounding box for "c" that generates an interesting image
const float centRe = -.759856, centIm= .125547;
const float diam = 0.151579;
complex_t cmin;
complex_t cmax;
cmin.r = centRe - 0.5*diam;
cmax.r = centRe + 0.5*diam;
cmin.i = centIm - 0.5*diam;
cmax.i = centIm + 0.5*diam;
clock_t start = clock(); //start time in CPU cycles
// compute mandelbrot set
mandelbrot <<<G, B>>> (Nre, Nim, cmin, cmax, count);
cudaMemcpy(count, temp, Nre*Nim*sizeof(float), cudaMemcpyDeviceToHost);
clock_t end = clock(); //start time in CPU cycles
// print elapsed time
printf("elapsed = %f\n", ((double)(end-start))/CLOCKS_PER_SEC);
// output mandelbrot to png format image
FILE *fp = fopen("mandelbrot.png", "w");
printf("Printing mandelbrot.png...");
write_hot_png(fp, Nre, Nim, count, 0, 80);
printf("done.\n");
free(count);
exit(0);
return 0;
}
|
82bf8d0458db1b3a5b9faa44dd909a8b05c6b355.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "input.h"
#include "knn_functions.h"
#include "check.h"
#include "hipError_t.h"
#include "utility.h"
int main(int argc, char* argv[]) {
//salvare o meno il risultato su file
bool saveData = true;
bool checkresult = false;
//device
int deviceIndex = 0;
if((argc -1) != 2){
printf("Errore non sono stati specificati correttamente i file del dataset!\n");
exit(EXIT_FAILURE);
}
if (K > N){
printf("Errore il numero di vicini non pu essere superiore al numero di sample!\n");
exit(EXIT_FAILURE);
}
if (K % 2 == 0){
printf("Inserire un numero di vicini dispari!\n");
exit(EXIT_FAILURE);
}
const char * trainFile = argv[1];
const char * testFile = argv[2];
//numero di schede presenti
int count;
HANDLE_ERROR( hipGetDeviceCount( &count ) );
//check esistenza scheda disponbile
if(deviceIndex < count)
{
HANDLE_ERROR(hipSetDevice(deviceIndex));
}
else
{
printf("Device non disponbile!\n");
exit(EXIT_FAILURE);
}
// propriet della scheda video
int threads;
hipDeviceGetAttribute(&threads, hipDeviceAttributeMaxThreadsPerBlock, deviceIndex);
//hipDeviceProp_t prop;
//HANDLE_ERROR(hipGetDeviceProperties(&prop, deviceIndex));
//printf("M : %d Max threads per block: %d\n",M, prop.maxThreadsPerBlock );
//printf("Max thread dimensions: (%d, %d, %d)\n",prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] );
//printf("Max grid dimensions: (%d, %d, %d)\n",prop.maxGridSize[0], prop.maxGridSize[1],prop.maxGridSize[2] );
//printf("\n" );
//Check sforamento numero di thread per blocco
if (BLOCK_SIZE * BLOCK_SIZE > threads){
printf("Errore, superato massimo numero di thread per blocco!\n");
exit(EXIT_FAILURE);
}
// misurare il tempo di esecuzione
hipEvent_t start, stop;//, stopRead, stopSendData, primoStep, secondoStep;
HANDLE_ERROR( hipEventCreate( &start ) );
HANDLE_ERROR( hipEventCreate( &stop ) );
//HANDLE_ERROR( hipEventCreate( &stopRead ) );
//HANDLE_ERROR( hipEventCreate( &stopSendData ) );
//HANDLE_ERROR( hipEventCreate( &primoStep ) );
//HANDLE_ERROR( hipEventCreate( &secondoStep ) );
HANDLE_ERROR( hipEventRecord( start, 0 ) );
float * trainingData= (float *) malloc(N* M * sizeof(float));
float * testingData= (float *) malloc(P* M * sizeof(float));
//HANDLE_ERROR( hipHostMalloc( (void**)&trainingData, N*M * sizeof( *trainingData ), hipHostMallocDefault ) );
//HANDLE_ERROR( hipHostMalloc( (void**)&testingData, P*M * sizeof( *testingData ), hipHostMallocDefault ) );
int * classesTraining = (int*) malloc(N *sizeof(int));
int * classesTesting = (int*) malloc(P *sizeof(int));
float * dist = (float *) malloc(P* N * sizeof(float));
//HANDLE_ERROR( hipHostMalloc( (void**)&dist, P*M * sizeof( *dist ), hipHostMallocDefault ) );
if(trainingData == NULL || testingData == NULL || classesTesting == NULL || classesTraining == NULL){
printf("Not enough memory!\n");
exit(EXIT_FAILURE);
}
//reading data from file
read_file(trainFile, N, M, trainingData, classesTraining);
read_file(testFile, P, M, testingData, classesTesting);
// get stop time, and display the timing results
//HANDLE_ERROR( hipEventRecord( stopRead, 0 ) );
//HANDLE_ERROR( hipEventSynchronize( stopRead ) );
//float elapsedTimeRead;
//HANDLE_ERROR( hipEventElapsedTime( &elapsedTimeRead, start, stopRead ) );
//printf( "Lettura dati eseguita in: %f \n", elapsedTimeRead/1000 );
// puntattori ai dati sul device
float* dev_train;
float* dev_test;
float* dev_dist;
int* dev_label;
size_t pitch, pitchTest, pitchDist, pitchLabel;
HANDLE_ERROR(hipMallocPitch((void**)&dev_train, &pitch, M * sizeof(float), N));
HANDLE_ERROR(hipMemcpy2D(dev_train, pitch, trainingData, M*sizeof(float), M*sizeof(float), N, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMallocPitch((void**)&dev_test, &pitchTest, M * sizeof(float), P));
HANDLE_ERROR(hipMemcpy2D(dev_test, pitchTest, testingData, M*sizeof(float), M*sizeof(float), P, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMallocPitch((void**)&dev_dist, &pitchDist, N * sizeof(float), P));
// alloco memoria per il dataset sulla gpu in memoria globale
//HANDLE_ERROR( hipMemcpy( dev_dist, dist, N * P * sizeof(float), hipMemcpyHostToDevice ) );
//HANDLE_ERROR( hipEventRecord( stopSendData, 0 ) );
//HANDLE_ERROR( hipEventSynchronize( stopSendData ) );
//HANDLE_ERROR( hipEventElapsedTime( &elapsedTimeRead, start, stopSendData ) );
//printf( "Copia dati su GPU eseguita dopo : %f secondi\n", elapsedTimeRead/1000 );
//HANDLE_ERROR( hipMemcpy( dev_label, label, N * P * sizeof(int), hipMemcpyHostToDevice ) );
// creo blocchi da BLOCK_SIZE * BLOCK_SIZE thread
dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1);
//Numero di blocchi
int dim_row = (P +1 % BLOCK_SIZE == 0) ? P / BLOCK_SIZE : P / BLOCK_SIZE + 1;
int dim_col = (N + 1 % BLOCK_SIZE == 0) ? N / BLOCK_SIZE : N / BLOCK_SIZE + 1;
dim3 grid(dim_col, dim_row, 1); // a grid of CUDA thread blocks
//printf("Numero di blocchi %d %d da %d \n", dim_row, dim_col, BLOCK_SIZE);
//hipFuncSetCacheConfig(computeDist_kernel, hipFuncCachePreferL1);
// calcola distanza euclidea tra punti train e test
hipLaunchKernelGGL(( computeDist_kernel), dim3(grid), dim3(block), 0, 0, dev_train, pitch, dev_test, pitchTest, dev_dist, pitchDist);//, dev_label);
int * label = (int*) malloc(P * K *sizeof(int));
int* countsLabel = (int*) malloc(sizeof(int)* LABELS);
int* confusionMatrix = (int*) malloc(sizeof(int)* LABELS * LABELS);
if(confusionMatrix ==NULL || countsLabel == NULL || label == NULL){
printf("Not enough memory!\n");
exit(EXIT_FAILURE);
}
// inizializza a zero la matrice di confusione
initilizeArray(confusionMatrix, LABELS*LABELS, 0);
// barriera per assicurarsi che tutte le distanze siano state calcolate
hipDeviceSynchronize();
//HANDLE_ERROR( hipEventRecord( primoStep, 0 ) );
//HANDLE_ERROR( hipEventSynchronize( primoStep ) );
//HANDLE_ERROR( hipEventElapsedTime( &elapsedTimeRead, start, primoStep ) );
//printf( "Distanze calcolate dopo : %f secondi\n", elapsedTimeRead/1000 );
//dealloco dataset su device non pi utile
HANDLE_ERROR( hipFree(dev_train) );
HANDLE_ERROR( hipFree(dev_test) );
//alloco le label
HANDLE_ERROR(hipMallocPitch((void**)&dev_label, &pitchLabel, K * sizeof(float), P));
dim3 blockSort(BLOCK_SIZE, 1, 1);
dim3 gridSort(dim_row, 1, 1);
//printf("Numero di blocchi per il sort %d da %d \n", dim_row, BLOCK_SIZE);
hipLaunchKernelGGL(( sort_kernel), dim3(gridSort), dim3(blockSort), 0, 0, dev_dist, pitchDist, dev_label, pitchLabel);
// barriera per assicurare che siano tutti ordinat
hipDeviceSynchronize();
//recupero risultati dalla GPU
//HANDLE_ERROR(hipMemcpy(dist , dev_dist, P * N * sizeof(float), hipMemcpyDeviceToHost ) );
HANDLE_ERROR(hipMemcpy2D(label, K * sizeof(float), dev_label, pitchLabel, K * sizeof(float), P, hipMemcpyDeviceToHost));
//HANDLE_ERROR(hipMemcpy(label , dev_label, P * K * sizeof(int), hipMemcpyDeviceToHost ) );
//HANDLE_ERROR( hipEventRecord( secondoStep, 0 ) );
//HANDLE_ERROR( hipEventSynchronize( secondoStep ) );
//HANDLE_ERROR( hipEventElapsedTime( &elapsedTimeRead, start, secondoStep ) );
//printf( "Ordinate e ricevute dopo : %f secondi\n", elapsedTimeRead/1000 );
/*printf("Dopoooooooo\n");
for(int i=0; i < P; i++){
for(int j=0; j < K; j++)
printf(" %d ", label[i*K +j]);
printf("\n\n");
}
*/
// numero di errori di classificazione commessi dall'algoritmo KNN
int error = 0;
//il calcolo della matrice di confusione finale viene lasciato alla cpu
for (int i=0; i<P; i++){
initilizeArray(countsLabel, LABELS, 0);
int bestLabel = 0;
for(int j=0; j<K; j++){
int indice = label[i*K+j];
int classe = classesTraining[indice];
countsLabel[classe] = countsLabel[classe] + 1;
if(countsLabel[classe] > countsLabel[bestLabel])
bestLabel = classe;
}
int realLabel = classesTesting[i];
if (realLabel != bestLabel){
error = error + 1;
}
//update confusion matrix
confusionMatrix[realLabel * LABELS + bestLabel] = confusionMatrix[realLabel * LABELS + bestLabel] +1;
}
//stampa Confusion matrix
//printConfusionMatrix(confusionMatrix);
//printf("Errori totali: %d\n", error);
//printf("Record corretti: %d accuratezza (%.2f%%); ", P - error, 100 - ((float) error / P) * 100);
// controllo risultato con il seriale
if(checkresult == true){
checkResultKNN(trainingData, testingData, classesTraining, classesTesting, confusionMatrix);
}
// dealloca memoria CPU
//HANDLE_ERROR( hipHostFree( trainingData) );
//HANDLE_ERROR( hipHostFree( testingData ) );
//HANDLE_ERROR( hipHostFree( dist ) );
free(trainingData); trainingData = NULL;
free(testingData); testingData = NULL;
free(dist); dist=NULL;
free(classesTraining); classesTraining = NULL;
free(classesTesting); classesTesting = NULL;
free(confusionMatrix); confusionMatrix=NULL;
free(label); label=NULL;
free(countsLabel); countsLabel= NULL;
//dealloco memoria GPU
//HANDLE_ERROR( hipFree(dev_train) );
//HANDLE_ERROR( hipFree(dev_test) );
HANDLE_ERROR( hipFree(dev_label ) );
HANDLE_ERROR( hipFree(dev_dist ) );
// conteggio tempo totale di esecuzione
HANDLE_ERROR( hipEventRecord( stop, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stop ) );
float elapsedTime;
HANDLE_ERROR( hipEventElapsedTime( &elapsedTime, start, stop ) );
//printf( "Total time: %f \n", elapsedTime/1000 );
HANDLE_ERROR( hipEventDestroy( start ) );
HANDLE_ERROR( hipEventDestroy( stop ) );
//HANDLE_ERROR( hipEventDestroy( stopRead ) );
//save on file
if(saveData == true)
saveResultsOnFile(elapsedTime/1000);
return 0;
}
| 82bf8d0458db1b3a5b9faa44dd909a8b05c6b355.cu | #include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "input.h"
#include "knn_functions.h"
#include "check.h"
#include "cudaError.h"
#include "utility.h"
int main(int argc, char* argv[]) {
//salvare o meno il risultato su file
bool saveData = true;
bool checkresult = false;
//device
int deviceIndex = 0;
if((argc -1) != 2){
printf("Errore non sono stati specificati correttamente i file del dataset!\n");
exit(EXIT_FAILURE);
}
if (K > N){
printf("Errore il numero di vicini non può essere superiore al numero di sample!\n");
exit(EXIT_FAILURE);
}
if (K % 2 == 0){
printf("Inserire un numero di vicini dispari!\n");
exit(EXIT_FAILURE);
}
const char * trainFile = argv[1];
const char * testFile = argv[2];
//numero di schede presenti
int count;
HANDLE_ERROR( cudaGetDeviceCount( &count ) );
//check esistenza scheda disponbile
if(deviceIndex < count)
{
HANDLE_ERROR(cudaSetDevice(deviceIndex));
}
else
{
printf("Device non disponbile!\n");
exit(EXIT_FAILURE);
}
// proprietà della scheda video
int threads;
cudaDeviceGetAttribute(&threads, cudaDevAttrMaxThreadsPerBlock, deviceIndex);
//cudaDeviceProp prop;
//HANDLE_ERROR(cudaGetDeviceProperties(&prop, deviceIndex));
//printf("M : %d Max threads per block: %d\n",M, prop.maxThreadsPerBlock );
//printf("Max thread dimensions: (%d, %d, %d)\n",prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] );
//printf("Max grid dimensions: (%d, %d, %d)\n",prop.maxGridSize[0], prop.maxGridSize[1],prop.maxGridSize[2] );
//printf("\n" );
//Check sforamento numero di thread per blocco
if (BLOCK_SIZE * BLOCK_SIZE > threads){
printf("Errore, superato massimo numero di thread per blocco!\n");
exit(EXIT_FAILURE);
}
// misurare il tempo di esecuzione
cudaEvent_t start, stop;//, stopRead, stopSendData, primoStep, secondoStep;
HANDLE_ERROR( cudaEventCreate( &start ) );
HANDLE_ERROR( cudaEventCreate( &stop ) );
//HANDLE_ERROR( cudaEventCreate( &stopRead ) );
//HANDLE_ERROR( cudaEventCreate( &stopSendData ) );
//HANDLE_ERROR( cudaEventCreate( &primoStep ) );
//HANDLE_ERROR( cudaEventCreate( &secondoStep ) );
HANDLE_ERROR( cudaEventRecord( start, 0 ) );
float * trainingData= (float *) malloc(N* M * sizeof(float));
float * testingData= (float *) malloc(P* M * sizeof(float));
//HANDLE_ERROR( cudaHostAlloc( (void**)&trainingData, N*M * sizeof( *trainingData ), cudaHostAllocDefault ) );
//HANDLE_ERROR( cudaHostAlloc( (void**)&testingData, P*M * sizeof( *testingData ), cudaHostAllocDefault ) );
int * classesTraining = (int*) malloc(N *sizeof(int));
int * classesTesting = (int*) malloc(P *sizeof(int));
float * dist = (float *) malloc(P* N * sizeof(float));
//HANDLE_ERROR( cudaHostAlloc( (void**)&dist, P*M * sizeof( *dist ), cudaHostAllocDefault ) );
if(trainingData == NULL || testingData == NULL || classesTesting == NULL || classesTraining == NULL){
printf("Not enough memory!\n");
exit(EXIT_FAILURE);
}
//reading data from file
read_file(trainFile, N, M, trainingData, classesTraining);
read_file(testFile, P, M, testingData, classesTesting);
// get stop time, and display the timing results
//HANDLE_ERROR( cudaEventRecord( stopRead, 0 ) );
//HANDLE_ERROR( cudaEventSynchronize( stopRead ) );
//float elapsedTimeRead;
//HANDLE_ERROR( cudaEventElapsedTime( &elapsedTimeRead, start, stopRead ) );
//printf( "Lettura dati eseguita in: %f \n", elapsedTimeRead/1000 );
// puntattori ai dati sul device
float* dev_train;
float* dev_test;
float* dev_dist;
int* dev_label;
size_t pitch, pitchTest, pitchDist, pitchLabel;
HANDLE_ERROR(cudaMallocPitch((void**)&dev_train, &pitch, M * sizeof(float), N));
HANDLE_ERROR(cudaMemcpy2D(dev_train, pitch, trainingData, M*sizeof(float), M*sizeof(float), N, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMallocPitch((void**)&dev_test, &pitchTest, M * sizeof(float), P));
HANDLE_ERROR(cudaMemcpy2D(dev_test, pitchTest, testingData, M*sizeof(float), M*sizeof(float), P, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMallocPitch((void**)&dev_dist, &pitchDist, N * sizeof(float), P));
// alloco memoria per il dataset sulla gpu in memoria globale
//HANDLE_ERROR( cudaMemcpy( dev_dist, dist, N * P * sizeof(float), cudaMemcpyHostToDevice ) );
//HANDLE_ERROR( cudaEventRecord( stopSendData, 0 ) );
//HANDLE_ERROR( cudaEventSynchronize( stopSendData ) );
//HANDLE_ERROR( cudaEventElapsedTime( &elapsedTimeRead, start, stopSendData ) );
//printf( "Copia dati su GPU eseguita dopo : %f secondi\n", elapsedTimeRead/1000 );
//HANDLE_ERROR( cudaMemcpy( dev_label, label, N * P * sizeof(int), cudaMemcpyHostToDevice ) );
// creo blocchi da BLOCK_SIZE * BLOCK_SIZE thread
dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1);
//Numero di blocchi
int dim_row = (P +1 % BLOCK_SIZE == 0) ? P / BLOCK_SIZE : P / BLOCK_SIZE + 1;
int dim_col = (N + 1 % BLOCK_SIZE == 0) ? N / BLOCK_SIZE : N / BLOCK_SIZE + 1;
dim3 grid(dim_col, dim_row, 1); // a grid of CUDA thread blocks
//printf("Numero di blocchi %d %d da %d \n", dim_row, dim_col, BLOCK_SIZE);
//cudaFuncSetCacheConfig(computeDist_kernel, cudaFuncCachePreferL1);
// calcola distanza euclidea tra punti train e test
computeDist_kernel<<<grid, block>>>(dev_train, pitch, dev_test, pitchTest, dev_dist, pitchDist);//, dev_label);
int * label = (int*) malloc(P * K *sizeof(int));
int* countsLabel = (int*) malloc(sizeof(int)* LABELS);
int* confusionMatrix = (int*) malloc(sizeof(int)* LABELS * LABELS);
if(confusionMatrix ==NULL || countsLabel == NULL || label == NULL){
printf("Not enough memory!\n");
exit(EXIT_FAILURE);
}
// inizializza a zero la matrice di confusione
initilizeArray(confusionMatrix, LABELS*LABELS, 0);
// barriera per assicurarsi che tutte le distanze siano state calcolate
cudaDeviceSynchronize();
//HANDLE_ERROR( cudaEventRecord( primoStep, 0 ) );
//HANDLE_ERROR( cudaEventSynchronize( primoStep ) );
//HANDLE_ERROR( cudaEventElapsedTime( &elapsedTimeRead, start, primoStep ) );
//printf( "Distanze calcolate dopo : %f secondi\n", elapsedTimeRead/1000 );
//dealloco dataset su device non più utile
HANDLE_ERROR( cudaFree(dev_train) );
HANDLE_ERROR( cudaFree(dev_test) );
//alloco le label
HANDLE_ERROR(cudaMallocPitch((void**)&dev_label, &pitchLabel, K * sizeof(float), P));
dim3 blockSort(BLOCK_SIZE, 1, 1);
dim3 gridSort(dim_row, 1, 1);
//printf("Numero di blocchi per il sort %d da %d \n", dim_row, BLOCK_SIZE);
sort_kernel<<<gridSort, blockSort>>>(dev_dist, pitchDist, dev_label, pitchLabel);
// barriera per assicurare che siano tutti ordinat
cudaDeviceSynchronize();
//recupero risultati dalla GPU
//HANDLE_ERROR(cudaMemcpy(dist , dev_dist, P * N * sizeof(float), cudaMemcpyDeviceToHost ) );
HANDLE_ERROR(cudaMemcpy2D(label, K * sizeof(float), dev_label, pitchLabel, K * sizeof(float), P, cudaMemcpyDeviceToHost));
//HANDLE_ERROR(cudaMemcpy(label , dev_label, P * K * sizeof(int), cudaMemcpyDeviceToHost ) );
//HANDLE_ERROR( cudaEventRecord( secondoStep, 0 ) );
//HANDLE_ERROR( cudaEventSynchronize( secondoStep ) );
//HANDLE_ERROR( cudaEventElapsedTime( &elapsedTimeRead, start, secondoStep ) );
//printf( "Ordinate e ricevute dopo : %f secondi\n", elapsedTimeRead/1000 );
/*printf("Dopoooooooo\n");
for(int i=0; i < P; i++){
for(int j=0; j < K; j++)
printf(" %d ", label[i*K +j]);
printf("\n\n");
}
*/
// numero di errori di classificazione commessi dall'algoritmo KNN
int error = 0;
//il calcolo della matrice di confusione finale viene lasciato alla cpu
for (int i=0; i<P; i++){
initilizeArray(countsLabel, LABELS, 0);
int bestLabel = 0;
for(int j=0; j<K; j++){
int indice = label[i*K+j];
int classe = classesTraining[indice];
countsLabel[classe] = countsLabel[classe] + 1;
if(countsLabel[classe] > countsLabel[bestLabel])
bestLabel = classe;
}
int realLabel = classesTesting[i];
if (realLabel != bestLabel){
error = error + 1;
}
//update confusion matrix
confusionMatrix[realLabel * LABELS + bestLabel] = confusionMatrix[realLabel * LABELS + bestLabel] +1;
}
//stampa Confusion matrix
//printConfusionMatrix(confusionMatrix);
//printf("Errori totali: %d\n", error);
//printf("Record corretti: %d accuratezza (%.2f%%); ", P - error, 100 - ((float) error / P) * 100);
// controllo risultato con il seriale
if(checkresult == true){
checkResultKNN(trainingData, testingData, classesTraining, classesTesting, confusionMatrix);
}
// dealloca memoria CPU
//HANDLE_ERROR( cudaFreeHost( trainingData) );
//HANDLE_ERROR( cudaFreeHost( testingData ) );
//HANDLE_ERROR( cudaFreeHost( dist ) );
free(trainingData); trainingData = NULL;
free(testingData); testingData = NULL;
free(dist); dist=NULL;
free(classesTraining); classesTraining = NULL;
free(classesTesting); classesTesting = NULL;
free(confusionMatrix); confusionMatrix=NULL;
free(label); label=NULL;
free(countsLabel); countsLabel= NULL;
//dealloco memoria GPU
//HANDLE_ERROR( cudaFree(dev_train) );
//HANDLE_ERROR( cudaFree(dev_test) );
HANDLE_ERROR( cudaFree(dev_label ) );
HANDLE_ERROR( cudaFree(dev_dist ) );
// conteggio tempo totale di esecuzione
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
float elapsedTime;
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, start, stop ) );
//printf( "Total time: %f \n", elapsedTime/1000 );
HANDLE_ERROR( cudaEventDestroy( start ) );
HANDLE_ERROR( cudaEventDestroy( stop ) );
//HANDLE_ERROR( cudaEventDestroy( stopRead ) );
//save on file
if(saveData == true)
saveResultsOnFile(elapsedTime/1000);
return 0;
}
|
3fef76655c1af92ecf73f58664c2d8fddfd8f44b.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <opencv2/opencv.hpp>
#include <opencv2/core/cuda.hpp>
#include <opencv2/cudaimgproc.hpp>
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#include <mex.h>
#include "approxPoseEsti.h"
#include "coarseToFinePoseEsti.h"
#include "apePreCal.h"
#include "apeCommon.h"
namespace ape {
void approxPoseEsti(const cv::Mat &tmp,
const cv::Mat &img,
float fx,
float fy,
float cx,
float cy,
float min_dim,
float min_tz,
float max_tz,
float epsilon,
int prm_lvls,
bool photo_inva,
bool verbose,
double *ex_mat) {
if (cv::cuda::getCudaEnabledDeviceCount() > 0) {
cv::cuda::setDevice(0);
cv::cuda::resetDevice();
Timer time;
long long t1;
if (verbose) {
time.Reset();
time.Start();
}
ApeParams ape_params;
// allocate
cv::cuda::GpuMat tmp_d(tmp.rows, tmp.cols, CV_32FC3);
cv::cuda::GpuMat img_d(img.rows, img.cols, CV_32FC4);
// pre-calculation
preCal(tmp, img, fx, fy, cx, cy, min_dim, min_tz, max_tz, epsilon, verbose, &tmp_d, &img_d, &ape_params);
if (verbose) {
time.Pause();
t1 = time.GetCount();
time.Reset();
time.Start();
}
// coarse-to-fine pose estimation
coarseToFinePoseEstimation(tmp_d, img_d, prm_lvls, photo_inva, verbose, &ape_params, ex_mat);
if (verbose) {
time.Pause();
mexPrintf("[*** Approximation Pose Estimation ***] Runtime: %f seconds\n", (t1+time.GetCount()) / 1e6);
mexEvalString("drawnow;");
}
}
}
} // namespace ape
| 3fef76655c1af92ecf73f58664c2d8fddfd8f44b.cu | #include <iostream>
#include <cuda.h>
#include <opencv2/opencv.hpp>
#include <opencv2/core/cuda.hpp>
#include <opencv2/cudaimgproc.hpp>
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#include <mex.h>
#include "approxPoseEsti.h"
#include "coarseToFinePoseEsti.h"
#include "apePreCal.h"
#include "apeCommon.h"
namespace ape {
void approxPoseEsti(const cv::Mat &tmp,
const cv::Mat &img,
float fx,
float fy,
float cx,
float cy,
float min_dim,
float min_tz,
float max_tz,
float epsilon,
int prm_lvls,
bool photo_inva,
bool verbose,
double *ex_mat) {
if (cv::cuda::getCudaEnabledDeviceCount() > 0) {
cv::cuda::setDevice(0);
cv::cuda::resetDevice();
Timer time;
long long t1;
if (verbose) {
time.Reset();
time.Start();
}
ApeParams ape_params;
// allocate
cv::cuda::GpuMat tmp_d(tmp.rows, tmp.cols, CV_32FC3);
cv::cuda::GpuMat img_d(img.rows, img.cols, CV_32FC4);
// pre-calculation
preCal(tmp, img, fx, fy, cx, cy, min_dim, min_tz, max_tz, epsilon, verbose, &tmp_d, &img_d, &ape_params);
if (verbose) {
time.Pause();
t1 = time.GetCount();
time.Reset();
time.Start();
}
// coarse-to-fine pose estimation
coarseToFinePoseEstimation(tmp_d, img_d, prm_lvls, photo_inva, verbose, &ape_params, ex_mat);
if (verbose) {
time.Pause();
mexPrintf("[*** Approximation Pose Estimation ***] Runtime: %f seconds\n", (t1+time.GetCount()) / 1e6);
mexEvalString("drawnow;");
}
}
}
} // namespace ape
|
74cb4ad595b113f7529be4c5df0ddad2dd538e67.hip | // !!! This is a file automatically generated by hipify!!!
/*
* (C) Copyright 1996-2016 ECMWF.
*
* This software is licensed under the terms of the Apache Licence Version 2.0
* which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
* In applying this licence, ECMWF does not waive the privileges and immunities
* granted to it by virtue of its status as an intergovernmental organisation nor
* does it submit to any jurisdiction.
*/
#include <hip/hip_runtime.h>
#include "atlas/library/config.h"
#include "tests/AtlasTestEnvironment.h"
#include "atlas/array/SVector.h"
using namespace atlas::array;
namespace atlas {
namespace test {
__global__
void kernel_exe(int* list_ints_ptr, size_t size, int offset, bool* result )
{
SVector<int> list_ints(list_ints_ptr, size);
*result = *result && (list_ints[offset] == 3);
*result = *result && (list_ints[offset+1] == 4);
list_ints[offset]++;
list_ints[offset+1]++;
}
CASE( "test_svector" )
{
SVector<int> list_ints(2);
list_ints[0] = 3;
list_ints[1] = 4;
EXPECT( list_ints[0] == 3 );
EXPECT( list_ints[1] == 4 );
EXPECT( list_ints.size() == 2);
bool *result;
hipError_t err = hipMallocManaged(&result, sizeof(bool));
if(err != hipSuccess)
throw_AssertionFailed("failed to allocate GPU memory");
*result=true;
hipLaunchKernelGGL(( kernel_exe), dim3(1),dim3(1), 0, 0, list_ints.data(), list_ints.size(), 0, result);
hipDeviceSynchronize();
err = hipGetLastError();
if(err != hipSuccess)
throw_AssertionFailed("failed to execute kernel");
EXPECT( *result );
EXPECT( list_ints[0] == 4);
EXPECT( list_ints[1] == 5);
}
CASE( "test_svector_resize" )
{
SVector<int> list_ints(2);
list_ints[0] = 3;
list_ints[1] = 4;
EXPECT( list_ints[0] == 3 );
EXPECT( list_ints[1] == 4 );
EXPECT( list_ints.size() == 2);
list_ints.resize(5);
EXPECT( list_ints.size() == 5);
bool *result;
hipError_t err = hipMallocManaged(&result, sizeof(bool));
if(err != hipSuccess)
throw_AssertionFailed("failed to allocate GPU memory");
*result=true;
list_ints[3] = 3;
list_ints[4] = 4;
hipLaunchKernelGGL(( kernel_exe), dim3(1),dim3(1), 0, 0, list_ints.data(), list_ints.size(), 3, result);
hipDeviceSynchronize();
err = hipGetLastError();
if(err != hipSuccess)
throw_AssertionFailed("failed to execute kernel");
EXPECT( *result );
EXPECT( list_ints[3] == 4);
EXPECT( list_ints[4] == 5);
}
}
}
int main(int argc, char **argv) {
return atlas::test::run( argc, argv );
}
| 74cb4ad595b113f7529be4c5df0ddad2dd538e67.cu | /*
* (C) Copyright 1996-2016 ECMWF.
*
* This software is licensed under the terms of the Apache Licence Version 2.0
* which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
* In applying this licence, ECMWF does not waive the privileges and immunities
* granted to it by virtue of its status as an intergovernmental organisation nor
* does it submit to any jurisdiction.
*/
#include <cuda_runtime.h>
#include "atlas/library/config.h"
#include "tests/AtlasTestEnvironment.h"
#include "atlas/array/SVector.h"
using namespace atlas::array;
namespace atlas {
namespace test {
__global__
void kernel_exe(int* list_ints_ptr, size_t size, int offset, bool* result )
{
SVector<int> list_ints(list_ints_ptr, size);
*result = *result && (list_ints[offset] == 3);
*result = *result && (list_ints[offset+1] == 4);
list_ints[offset]++;
list_ints[offset+1]++;
}
CASE( "test_svector" )
{
SVector<int> list_ints(2);
list_ints[0] = 3;
list_ints[1] = 4;
EXPECT( list_ints[0] == 3 );
EXPECT( list_ints[1] == 4 );
EXPECT( list_ints.size() == 2);
bool *result;
cudaError_t err = cudaMallocManaged(&result, sizeof(bool));
if(err != cudaSuccess)
throw_AssertionFailed("failed to allocate GPU memory");
*result=true;
kernel_exe<<<1,1>>>(list_ints.data(), list_ints.size(), 0, result);
cudaDeviceSynchronize();
err = cudaGetLastError();
if(err != cudaSuccess)
throw_AssertionFailed("failed to execute kernel");
EXPECT( *result );
EXPECT( list_ints[0] == 4);
EXPECT( list_ints[1] == 5);
}
CASE( "test_svector_resize" )
{
SVector<int> list_ints(2);
list_ints[0] = 3;
list_ints[1] = 4;
EXPECT( list_ints[0] == 3 );
EXPECT( list_ints[1] == 4 );
EXPECT( list_ints.size() == 2);
list_ints.resize(5);
EXPECT( list_ints.size() == 5);
bool *result;
cudaError_t err = cudaMallocManaged(&result, sizeof(bool));
if(err != cudaSuccess)
throw_AssertionFailed("failed to allocate GPU memory");
*result=true;
list_ints[3] = 3;
list_ints[4] = 4;
kernel_exe<<<1,1>>>(list_ints.data(), list_ints.size(), 3, result);
cudaDeviceSynchronize();
err = cudaGetLastError();
if(err != cudaSuccess)
throw_AssertionFailed("failed to execute kernel");
EXPECT( *result );
EXPECT( list_ints[3] == 4);
EXPECT( list_ints[4] == 5);
}
}
}
int main(int argc, char **argv) {
return atlas::test::run( argc, argv );
}
|
201f280ba528e0f963183ba88f2c8497baede01f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernCalcMu( const size_t numPoints, const size_t pointDim, const double* X, const double* loggamma, const double* GammaK, double* dest ) {
// Assumes a 2D grid of 1024x1 1D blocks
int b = blockIdx.y * gridDim.x + blockIdx.x;
int i = b * blockDim.x + threadIdx.x;
if(i >= numPoints) {
return;
}
const double a = exp(loggamma[i]) / exp(*GammaK);
const double* x = & X[i * pointDim];
double* y = & dest[i * pointDim];
for(size_t i = 0; i < pointDim; ++i) {
y[i] = a * x[i];
}
} | 201f280ba528e0f963183ba88f2c8497baede01f.cu | #include "includes.h"
__global__ void kernCalcMu( const size_t numPoints, const size_t pointDim, const double* X, const double* loggamma, const double* GammaK, double* dest ) {
// Assumes a 2D grid of 1024x1 1D blocks
int b = blockIdx.y * gridDim.x + blockIdx.x;
int i = b * blockDim.x + threadIdx.x;
if(i >= numPoints) {
return;
}
const double a = exp(loggamma[i]) / exp(*GammaK);
const double* x = & X[i * pointDim];
double* y = & dest[i * pointDim];
for(size_t i = 0; i < pointDim; ++i) {
y[i] = a * x[i];
}
} |
48c656b4bbab3ef61c698725703c9fc0ceb1e9a3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "normalize_delta_kernel.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
float *mean = NULL;
hipMalloc(&mean, XSIZE*YSIZE);
float *variance = NULL;
hipMalloc(&variance, XSIZE*YSIZE);
float *mean_delta = NULL;
hipMalloc(&mean_delta, XSIZE*YSIZE);
float *variance_delta = NULL;
hipMalloc(&variance_delta, XSIZE*YSIZE);
int batch = 2;
int filters = 2;
int spatial = 2;
float *delta = NULL;
hipMalloc(&delta, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
normalize_delta_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,x,mean,variance,mean_delta,variance_delta,batch,filters,spatial,delta);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
normalize_delta_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,x,mean,variance,mean_delta,variance_delta,batch,filters,spatial,delta);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
normalize_delta_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,x,mean,variance,mean_delta,variance_delta,batch,filters,spatial,delta);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 48c656b4bbab3ef61c698725703c9fc0ceb1e9a3.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "normalize_delta_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
float *mean = NULL;
cudaMalloc(&mean, XSIZE*YSIZE);
float *variance = NULL;
cudaMalloc(&variance, XSIZE*YSIZE);
float *mean_delta = NULL;
cudaMalloc(&mean_delta, XSIZE*YSIZE);
float *variance_delta = NULL;
cudaMalloc(&variance_delta, XSIZE*YSIZE);
int batch = 2;
int filters = 2;
int spatial = 2;
float *delta = NULL;
cudaMalloc(&delta, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
normalize_delta_kernel<<<gridBlock,threadBlock>>>(N,x,mean,variance,mean_delta,variance_delta,batch,filters,spatial,delta);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
normalize_delta_kernel<<<gridBlock,threadBlock>>>(N,x,mean,variance,mean_delta,variance_delta,batch,filters,spatial,delta);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
normalize_delta_kernel<<<gridBlock,threadBlock>>>(N,x,mean,variance,mean_delta,variance_delta,batch,filters,spatial,delta);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
92d280dffee2a401665a410655e65ec5f2fd97f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
// indica que uma funo que vai rodar no device
__global__ void hello()
{
printf("Oi mundo! De thread: %d De: bloco %d\n", threadIdx.x, blockIdx.x);
}
int main(void)
{
int num_threads = 5;
int num_blocks = 5;
//chama a funo e especfica blocos e threads
hipLaunchKernelGGL(( hello), dim3(num_blocks),dim3(num_threads), 0, 0, );
//espera o cdigo da gpu terminar
hipDeviceSynchronize();
return 0;
}
| 92d280dffee2a401665a410655e65ec5f2fd97f5.cu | #include <stdio.h>
// indica que √© uma fun√√o que vai rodar no device
__global__ void hello()
{
printf("Oi mundo! De thread: %d De: bloco %d\n", threadIdx.x, blockIdx.x);
}
int main(void)
{
int num_threads = 5;
int num_blocks = 5;
//chama a fun√√o e espec√fica blocos e threads
hello<<<num_blocks,num_threads>>>();
//espera o c√digo da gpu terminar
cudaDeviceSynchronize();
return 0;
}
|
ed694a0e69b8c0ac58dca4dd8a0c72fe515f60b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "multilogit.cuh"
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/kernel/fill.cuh>
namespace NKernel {
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void MultiLogitValAndFirstDerImpl(const float* targetClasses, int numClasses, ui32 size,
const float* weights,
const float* predictions,
const ui32* loadPredictionsIndices,
ui64 predictionsAlignSize,
float* functionValue,
float* der,
ui64 derAlignSize) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
const int effectiveClassCount = numClasses - 1;
float tmpScore = 0;
float classApprox[ElementsPerThread];
ui16 targetClass[ElementsPerThread];
float sumExpApproxForAllClasses[ElementsPerThread];
float weight[ElementsPerThread];
float maxApprox[ElementsPerThread];
ui32 loadApproxIndex[ElementsPerThread];
{
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
loadApproxIndex[j] = loadPredictionsIndices && idx < size ? __ldg(loadPredictionsIndices + idx) : idx;
targetClass[j] = idx < size ? static_cast<ui16>(__ldg(targetClasses + idx)) : 0;
maxApprox[j] = 0;
for (int k = 0; k < effectiveClassCount; ++k) {
maxApprox[j] = idx < size ? max(maxApprox[j], __ldg(predictions + loadApproxIndex[j] + k * predictionsAlignSize)) : 0;
}
const float tmp = targetClass[j] < effectiveClassCount && idx < size ? __ldg(predictions + loadApproxIndex[j] + targetClass[j] * predictionsAlignSize) : 0.0f;
classApprox[j] = tmp - maxApprox[j];
sumExpApproxForAllClasses[j] = 0.0f;
for (int k = 0; k < effectiveClassCount; ++k) {
sumExpApproxForAllClasses[j] += idx < size ? __expf(__ldg(predictions + loadApproxIndex[j] + k * predictionsAlignSize) - maxApprox[j]) : 0.0f;
}
sumExpApproxForAllClasses[j] += __expf(0.0f - maxApprox[j]);
}
}
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
weight[j] = (weights && (idx < size)) ? weights[idx] : 1.0f;
}
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
if (der && idx < size) {
for (int k = 0; k < effectiveClassCount; ++k) {
const float pk = __expf(__ldg(predictions + loadApproxIndex[j] + k * predictionsAlignSize) - maxApprox[j]) / sumExpApproxForAllClasses[j];
der[idx + k * derAlignSize] = weight[j] * ((targetClass[j] == k ? 1.0f : 0.0f) - pk);
}
}
if (functionValue) {
const float logDenum = __logf(sumExpApproxForAllClasses[j]);
tmpScore += (idx < size) ? weight[j] * (classApprox[j] - logDenum) : 0;
}
}
if (functionValue) {
__shared__ float tmpScores[BlockSize];
tmpScores[threadIdx.x] = tmpScore;
__syncthreads();
float val = FastInBlockReduce<float>(threadIdx.x, tmpScores, BlockSize);
if (threadIdx.x == 0) {
atomicAdd(functionValue, val);
}
}
}
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void MultiLogitSecondDerRowImpl(const float* targetClasses, int numClasses, ui32 size,
const float* weights,
const float* predictions,
ui64 predictionsAlignSize,
int der2Row,
ui64 der2AlignSize,
float* der2) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
const int effectiveClassCount = numClasses - 1;
float sumExpApproxForAllClasses[ElementsPerThread];
float weight[ElementsPerThread];
float maxApprox[ElementsPerThread];
{
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
maxApprox[j] = 0;
for (int k = 0; k < effectiveClassCount; ++k) {
maxApprox[j] = idx < size ? max(maxApprox[j], __ldg(predictions + idx + k * predictionsAlignSize)) : 0;
}
sumExpApproxForAllClasses[j] = 0.0f;
for (int k = 0; k < effectiveClassCount; ++k) {
sumExpApproxForAllClasses[j] += idx < size ? __expf(__ldg(predictions + idx + k * predictionsAlignSize) - maxApprox[j]) : 0;
}
sumExpApproxForAllClasses[j] += __expf(0.0f - maxApprox[j]);
}
}
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
weight[j] = (weights && (idx < size)) ? weights[idx] : 1.0f;
}
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
if (idx < size) {
float pRow = 0;
if (der2Row < effectiveClassCount) {
pRow = __expf(__ldg(predictions + idx + der2Row * predictionsAlignSize) - maxApprox[j]) / sumExpApproxForAllClasses[j];
} else {
pRow = __expf(-maxApprox[j]) / sumExpApproxForAllClasses[j];
}
for (int k = 0; k < der2Row; ++k) {
const float pk = __expf(__ldg(predictions + idx + k * predictionsAlignSize) - maxApprox[j]) / sumExpApproxForAllClasses[j];
der2[idx + k * der2AlignSize] = -weight[j] * pk * pRow;
}
der2[idx + der2Row * der2AlignSize] = weight[j] * (1.0 - pRow) * pRow;
}
}
}
void MultiLogitValueAndDer(const float* targetClasses, int numClasses,
const float* targetWeights,
ui32 size,
const float* predictions, ui32 predictionsAlignSize,
const ui32* loadPredictionsIndices,
float* functionValue,
float* der, ui32 derAlignSize,
TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 1;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
//TODO: get rid of this
if (functionValue) {
FillBuffer(functionValue, 0.0f, 1, stream);
}
if (numBlocks) {
hipLaunchKernelGGL(( MultiLogitValAndFirstDerImpl < blockSize, elementsPerThreads >), dim3(numBlocks), dim3(blockSize), 0, stream, targetClasses, numClasses, size, targetWeights, predictions, loadPredictionsIndices, predictionsAlignSize, functionValue, der, derAlignSize);
}
}
void MultiLogitSecondDer(const float* targetClasses, int numClasses,
const float* targetWeights,
ui32 size,
const float* predictions, ui32 predictionsAlignSize,
float* der2,
int der2Row, ui32 der2AlignSize,
TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 1;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
if (numBlocks) {
hipLaunchKernelGGL(( MultiLogitSecondDerRowImpl < blockSize, elementsPerThreads >), dim3(numBlocks), dim3(blockSize), 0, stream, targetClasses, numClasses, size, targetWeights, predictions, predictionsAlignSize, der2Row, der2AlignSize, der2);
}
}
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void RMSEWithUncertaintyValAndFirstDerImpl(
const float* target, ui32 size,
const float* weights,
const float* predictions,
const ui32* loadPredictionsIndices,
ui64 predictionsAlignSize,
float* functionValue,
float* der,
ui64 derAlignSize
) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
float tmpScore = 0;
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
if (idx >= size) {
continue;
}
const ui32 loadApproxIndex = loadPredictionsIndices ? __ldg(loadPredictionsIndices + idx) : idx;
const float weight = weights ? __ldg(weights + idx) : 1.0f;
const float approx0 = __ldg(predictions + loadApproxIndex);
const float approx1 = __ldg(predictions + loadApproxIndex + predictionsAlignSize);
const float direction = __ldg(target + idx) - approx0;
const float expApprox1 = __expf(-2 * approx1);
if (der) { // -gradient
der[idx] = weight * direction;
der[idx + derAlignSize] = weight * (direction * direction * expApprox1 - 1);
}
if (functionValue) {
// np.log(2 * np.pi) / 2.0 = 0.9189385332046
tmpScore += -weight * (0.9189385332046 + approx1 + 0.5 * expApprox1 * direction * direction);
}
}
if (functionValue) {
__shared__ float tmpScores[BlockSize];
tmpScores[threadIdx.x] = tmpScore;
__syncthreads();
float val = FastInBlockReduce<float>(threadIdx.x, tmpScores, BlockSize);
if (threadIdx.x == 0) {
atomicAdd(functionValue, val);
}
}
}
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void RMSEWithUncertaintySecondDerRowImpl(
const float* target, ui32 size,
const float* weights,
const float* predictions,
ui64 predictionsAlignSize,
int der2Row,
ui64 der2AlignSize,
float* der2
) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
if (der2Row == 0) {
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const ui32 idx = tid + j * BlockSize;
if (idx < size) {
der2[idx] = weights ? __ldg(weights + idx) : 1.0f;
}
}
} else if (der2Row == 1) {
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const ui32 idx = tid + j * BlockSize;
if (idx < size) {
const float approx0 = __ldg(predictions + idx);
const float approx1 = __ldg(predictions + idx + predictionsAlignSize);
const float weight = weights ? __ldg(weights + idx) : 1.0f;
const float miss = __ldg(target + idx) - approx0;
const float expApprox1 = __expf(-2 * approx1);
der2[idx] = 0.0f;
der2[idx + der2AlignSize] = 2 * weight * miss * miss * expApprox1;
}
}
} else {
// unreachable
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
if (idx < size) {
for (int k = 0; k < der2Row; ++k) {
der2[idx + k * der2AlignSize] = 0.0;
}
}
}
}
}
void RMSEWithUncertaintyValueAndDer(
const float* target,
const float* weights,
ui32 size,
const float* predictions, ui32 predictionsAlignSize,
const ui32* loadPredictionsIndices,
float* functionValue,
float* der, ui32 derAlignSize,
TCudaStream stream
) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 4;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
if (functionValue) {
FillBuffer(functionValue, 0.0f, 1, stream);
}
if (numBlocks) {
hipLaunchKernelGGL(( RMSEWithUncertaintyValAndFirstDerImpl < blockSize, elementsPerThreads >), dim3(numBlocks), dim3(blockSize), 0, stream, target, size, weights, predictions, loadPredictionsIndices, predictionsAlignSize, functionValue, der, derAlignSize);
}
}
void RMSEWithUncertaintySecondDer(
const float* target,
const float* weights,
ui32 size,
const float* predictions, ui32 predictionsAlignSize,
float* der2,
int der2Row, ui32 der2AlignSize,
TCudaStream stream
) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 4;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
if (numBlocks) {
hipLaunchKernelGGL(( RMSEWithUncertaintySecondDerRowImpl < blockSize, elementsPerThreads >), dim3(numBlocks), dim3(blockSize), 0, stream, target, size, weights, predictions, predictionsAlignSize, der2Row, der2AlignSize, der2);
}
}
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void MultiCrossEntropyValueAndDerImpl(
ui32 targetCount,
ui32 size,
const float* targets, ui32 targetAlignSize,
const float* weights,
const float* predictions, ui32 predictionsAlignSize,
const ui32* loadPredictionsIndices,
float* functionValue,
float* der, ui32 derAlignSize
) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
float sumDimErrors = 0;
for (int dim = 0; dim < targetCount; ++dim) {
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
if (idx >= size) {
continue;
}
const ui32 loadApproxIndex = loadPredictionsIndices ? __ldg(loadPredictionsIndices + idx) : idx;
const float weight = weights ? __ldg(weights + idx) : 1.0f;
const float target = __ldg(targets + idx + dim * targetAlignSize);
const float approx = __ldg(predictions + loadApproxIndex + dim * predictionsAlignSize);
const float expApprox = __expf(approx);
if (functionValue) {
sumDimErrors += -(isfinite(expApprox) ? __logf(1.0f + expApprox) : approx) * weight;
sumDimErrors += (target * approx) * weight;
}
if (der) { // -gradient
const float sigmoid = isfinite(expApprox) ? expApprox / (1.0f + expApprox) : 1.0f;
der[idx + dim * derAlignSize] = (-sigmoid + target) * weight;
}
}
}
if (functionValue) {
__shared__ float tmpScores[BlockSize];
tmpScores[threadIdx.x] = sumDimErrors / targetCount;
__syncthreads();
float val = FastInBlockReduce<float>(threadIdx.x, tmpScores, BlockSize);
if (threadIdx.x == 0) {
atomicAdd(functionValue, val);
}
}
}
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void MultiCrossEntropySecondDerImpl(
ui32 targetCount,
ui32 size,
const float* targets, ui32 targetAlignSize,
const float* weights,
const float* predictions, ui32 predictionsAlignSize,
float* der2,
int der2Row, ui32 der2AlignSize
) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const ui32 idx = tid + j * BlockSize;
if (idx < size) {
for (int k = 0; k < der2Row; ++k) {
der2[idx + k * der2AlignSize] = 0.0f;
}
const float approx = __ldg(predictions + idx + der2Row * predictionsAlignSize);
const float expApprox = __expf(approx);
const float weight = weights ? __ldg(weights + idx) : 1.0f;
const float target = __ldg(targets + idx + der2Row * targetAlignSize);
const float negSigmoid = isfinite(expApprox) ? -expApprox / (1.0f + expApprox) : -1.0f;
der2[idx + der2Row * der2AlignSize] = -negSigmoid * (1.0f + negSigmoid) * weight;
}
}
}
void MultiCrossEntropyValueAndDer(
ui32 targetCount,
ui32 size,
const float* target, ui32 tragetAlignSize,
const float* weights,
const float* predictions, ui32 predictionsAlignSize,
const ui32* loadPredictionsIndices,
float* functionValue,
float* der, ui32 derAlignSize,
TCudaStream stream
) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 4;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
if (functionValue) {
FillBuffer(functionValue, 0.0f, 1, stream);
}
if (numBlocks) {
hipLaunchKernelGGL(( MultiCrossEntropyValueAndDerImpl < blockSize, elementsPerThreads >), dim3(numBlocks), dim3(blockSize), 0, stream,
targetCount,
size,
target, tragetAlignSize,
weights,
predictions, predictionsAlignSize,
loadPredictionsIndices,
functionValue,
der, derAlignSize);
}
}
void MultiCrossEntropySecondDer(
ui32 targetCount,
ui32 size,
const float* target, ui32 tragetAlignSize,
const float* weights,
const float* predictions, ui32 predictionsAlignSize,
float* der2,
ui32 der2Row, ui32 der2AlignSize,
TCudaStream stream
) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 4;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
if (numBlocks) {
hipLaunchKernelGGL(( MultiCrossEntropySecondDerImpl < blockSize, elementsPerThreads >), dim3(numBlocks), dim3(blockSize), 0, stream,
targetCount,
size,
target, tragetAlignSize,
weights,
predictions, predictionsAlignSize,
der2,
der2Row, der2AlignSize);
}
}
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void MultiClassOneVsAllValAndFirstDerImpl(const float* targetClasses, int numClasses, ui32 size,
const float* weights,
const float* predictions,
const ui32* loadPredictionsIndices,
ui64 predictionsAlignSize,
float* functionValue,
float* der,
ui64 derAlignSize) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
float tmpScore = 0;
ui16 targetClass[ElementsPerThread];
float weight[ElementsPerThread];
ui32 loadPredictionIndex[ElementsPerThread];
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
loadPredictionIndex[j] = loadPredictionsIndices && idx < size ? __ldg(loadPredictionsIndices + idx) : idx;
targetClass[j] = idx < size ? static_cast<ui16>(__ldg(targetClasses + idx)) : 0;
weight[j] = (weights && (idx < size)) ? weights[idx] : 1.0f;
}
for (int clazz = 0; clazz < numClasses; ++clazz) {
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
const float val = idx < size ? __ldg(predictions + loadPredictionIndex[j] + clazz * predictionsAlignSize) : 0.0f;
const float expVal = __expf(val);
const float p = ClipProb(expVal / (1.0f + expVal));
const float c = clazz == targetClass[j] ? 1.0f : 0.0f;
const float direction = c - p;
if (der && idx < size) {
der[idx + clazz * derAlignSize] = weight[j] * direction;
}
if (functionValue) {
const float logExpValPlusOne = isfinite(expVal) ? __logf(1 + expVal) : val;
tmpScore += (idx < size) ? weight[j] * (c * val - logExpValPlusOne) / numClasses : 0;
}
}
}
if (functionValue) {
__shared__ float tmpScores[BlockSize];
tmpScores[threadIdx.x] = tmpScore;
__syncthreads();
float val = FastInBlockReduce<float>(threadIdx.x, tmpScores, BlockSize);
if (threadIdx.x == 0) {
atomicAdd(functionValue, val);
}
}
}
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void MultiClassOneVsAllSecondDerImpl(const float* targetClasses, int numClasses, ui32 size,
const float* weights,
const float* predictions,
ui64 predictionsAlignSize,
ui64 der2AlignSize,
float* der2) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
float weight[ElementsPerThread];
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
weight[j] = (weights && (idx < size)) ? weights[idx] : 1.0f;
}
for (int clazz = 0; clazz < numClasses; ++clazz) {
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
const float val = idx < size ? __ldg(predictions + idx + clazz * predictionsAlignSize) : 0.0f;
const float expVal = __expf(val);
const float p = ClipProb(expVal / (1.0f + expVal));
if (der2 && idx < size) {
der2[idx + clazz * der2AlignSize] = weight[j] * p * (1.0f - p);
}
}
}
}
void MultiClassOneVsAllValueAndDer(const float* targetClasses, int numClasses,
const float* targetWeights,
ui32 size,
const float* predictions, ui32 predictionsAlignSize,
const ui32* loadPredictionsIndices,
float* functionValue,
float* der, ui32 derAlignSize,
TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 1;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
CB_ENSURE(numClasses <= 65536);
//TODO: get rid of this
if (functionValue) {
FillBuffer(functionValue, 0.0f, 1, stream);
}
if (numBlocks) {
hipLaunchKernelGGL(( MultiClassOneVsAllValAndFirstDerImpl < blockSize, elementsPerThreads >), dim3(numBlocks), dim3(blockSize), 0, stream, targetClasses, numClasses, size, targetWeights, predictions, loadPredictionsIndices, predictionsAlignSize, functionValue, der, derAlignSize);
}
}
void MultiClassOneVsAllSecondDer(const float* targetClasses, int numClasses,
const float* targetWeights,
ui32 size,
const float* predictions, ui32 predictionsAlignSize,
float* der2,
ui32 der2AlignSize,
TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 2;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
CB_ENSURE(numClasses <= 65536);
if (numBlocks) {
hipLaunchKernelGGL(( MultiClassOneVsAllSecondDerImpl < blockSize, elementsPerThreads >), dim3(numBlocks), dim3(blockSize), 0, stream, targetClasses, numClasses, size, targetWeights, predictions, predictionsAlignSize, der2AlignSize, der2);
}
}
__global__ void BuildConfusionMatrixBinsImpl(const float* targetClasses, int numClasses, ui32 size,
const float* predictions, ui32 predictionsDim,
ui64 predictionsAlignSize,
bool isBinClass,
float binTargetProbabilityThreshold,
ui32* bins) {
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
ui32 targetClass;
float bestApprox = NegativeInfty();
int bestClass = -1;
predictions += i;
if (isBinClass) {
targetClass = __ldg(targetClasses + i) > binTargetProbabilityThreshold;
bestClass = __ldg(predictions) > 0;
} else {
targetClass = static_cast<ui16>(__ldg(targetClasses + i));
for (int clazz = 0; clazz < numClasses; ++clazz) {
const float approx = clazz < predictionsDim ? __ldg(predictions + clazz * predictionsAlignSize) : 0.0f;
if (approx > bestApprox) {
bestApprox = approx;
bestClass = clazz;
}
}
}
bins[i] = bestClass * numClasses + targetClass;
}
}
void BuildConfusionMatrixBins(const float* targetClasses, int numClasses, ui32 size,
const float* predictions, int predictionsDim, ui32 predictionsAlignSize,
bool isBinClass,
float binTargetProbabilityThreshold,
ui32* bins,
TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (size + blockSize - 1) / blockSize;
CB_ENSURE(numClasses < 65536);
if (numBlocks) {
BuildConfusionMatrixBinsImpl << < numBlocks, blockSize, 0, stream >> >(targetClasses, numClasses, size, predictions, predictionsDim, predictionsAlignSize, isBinClass, binTargetProbabilityThreshold, bins);
}
}
}
| ed694a0e69b8c0ac58dca4dd8a0c72fe515f60b8.cu | #include "multilogit.cuh"
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/kernel/fill.cuh>
namespace NKernel {
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void MultiLogitValAndFirstDerImpl(const float* targetClasses, int numClasses, ui32 size,
const float* weights,
const float* predictions,
const ui32* loadPredictionsIndices,
ui64 predictionsAlignSize,
float* functionValue,
float* der,
ui64 derAlignSize) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
const int effectiveClassCount = numClasses - 1;
float tmpScore = 0;
float classApprox[ElementsPerThread];
ui16 targetClass[ElementsPerThread];
float sumExpApproxForAllClasses[ElementsPerThread];
float weight[ElementsPerThread];
float maxApprox[ElementsPerThread];
ui32 loadApproxIndex[ElementsPerThread];
{
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
loadApproxIndex[j] = loadPredictionsIndices && idx < size ? __ldg(loadPredictionsIndices + idx) : idx;
targetClass[j] = idx < size ? static_cast<ui16>(__ldg(targetClasses + idx)) : 0;
maxApprox[j] = 0;
for (int k = 0; k < effectiveClassCount; ++k) {
maxApprox[j] = idx < size ? max(maxApprox[j], __ldg(predictions + loadApproxIndex[j] + k * predictionsAlignSize)) : 0;
}
const float tmp = targetClass[j] < effectiveClassCount && idx < size ? __ldg(predictions + loadApproxIndex[j] + targetClass[j] * predictionsAlignSize) : 0.0f;
classApprox[j] = tmp - maxApprox[j];
sumExpApproxForAllClasses[j] = 0.0f;
for (int k = 0; k < effectiveClassCount; ++k) {
sumExpApproxForAllClasses[j] += idx < size ? __expf(__ldg(predictions + loadApproxIndex[j] + k * predictionsAlignSize) - maxApprox[j]) : 0.0f;
}
sumExpApproxForAllClasses[j] += __expf(0.0f - maxApprox[j]);
}
}
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
weight[j] = (weights && (idx < size)) ? weights[idx] : 1.0f;
}
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
if (der && idx < size) {
for (int k = 0; k < effectiveClassCount; ++k) {
const float pk = __expf(__ldg(predictions + loadApproxIndex[j] + k * predictionsAlignSize) - maxApprox[j]) / sumExpApproxForAllClasses[j];
der[idx + k * derAlignSize] = weight[j] * ((targetClass[j] == k ? 1.0f : 0.0f) - pk);
}
}
if (functionValue) {
const float logDenum = __logf(sumExpApproxForAllClasses[j]);
tmpScore += (idx < size) ? weight[j] * (classApprox[j] - logDenum) : 0;
}
}
if (functionValue) {
__shared__ float tmpScores[BlockSize];
tmpScores[threadIdx.x] = tmpScore;
__syncthreads();
float val = FastInBlockReduce<float>(threadIdx.x, tmpScores, BlockSize);
if (threadIdx.x == 0) {
atomicAdd(functionValue, val);
}
}
}
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void MultiLogitSecondDerRowImpl(const float* targetClasses, int numClasses, ui32 size,
const float* weights,
const float* predictions,
ui64 predictionsAlignSize,
int der2Row,
ui64 der2AlignSize,
float* der2) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
const int effectiveClassCount = numClasses - 1;
float sumExpApproxForAllClasses[ElementsPerThread];
float weight[ElementsPerThread];
float maxApprox[ElementsPerThread];
{
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
maxApprox[j] = 0;
for (int k = 0; k < effectiveClassCount; ++k) {
maxApprox[j] = idx < size ? max(maxApprox[j], __ldg(predictions + idx + k * predictionsAlignSize)) : 0;
}
sumExpApproxForAllClasses[j] = 0.0f;
for (int k = 0; k < effectiveClassCount; ++k) {
sumExpApproxForAllClasses[j] += idx < size ? __expf(__ldg(predictions + idx + k * predictionsAlignSize) - maxApprox[j]) : 0;
}
sumExpApproxForAllClasses[j] += __expf(0.0f - maxApprox[j]);
}
}
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
weight[j] = (weights && (idx < size)) ? weights[idx] : 1.0f;
}
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
if (idx < size) {
float pRow = 0;
if (der2Row < effectiveClassCount) {
pRow = __expf(__ldg(predictions + idx + der2Row * predictionsAlignSize) - maxApprox[j]) / sumExpApproxForAllClasses[j];
} else {
pRow = __expf(-maxApprox[j]) / sumExpApproxForAllClasses[j];
}
for (int k = 0; k < der2Row; ++k) {
const float pk = __expf(__ldg(predictions + idx + k * predictionsAlignSize) - maxApprox[j]) / sumExpApproxForAllClasses[j];
der2[idx + k * der2AlignSize] = -weight[j] * pk * pRow;
}
der2[idx + der2Row * der2AlignSize] = weight[j] * (1.0 - pRow) * pRow;
}
}
}
void MultiLogitValueAndDer(const float* targetClasses, int numClasses,
const float* targetWeights,
ui32 size,
const float* predictions, ui32 predictionsAlignSize,
const ui32* loadPredictionsIndices,
float* functionValue,
float* der, ui32 derAlignSize,
TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 1;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
//TODO: get rid of this
if (functionValue) {
FillBuffer(functionValue, 0.0f, 1, stream);
}
if (numBlocks) {
MultiLogitValAndFirstDerImpl < blockSize, elementsPerThreads ><<<numBlocks, blockSize, 0, stream>>>(targetClasses, numClasses, size, targetWeights, predictions, loadPredictionsIndices, predictionsAlignSize, functionValue, der, derAlignSize);
}
}
void MultiLogitSecondDer(const float* targetClasses, int numClasses,
const float* targetWeights,
ui32 size,
const float* predictions, ui32 predictionsAlignSize,
float* der2,
int der2Row, ui32 der2AlignSize,
TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 1;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
if (numBlocks) {
MultiLogitSecondDerRowImpl < blockSize, elementsPerThreads ><<<numBlocks, blockSize, 0, stream>>>(targetClasses, numClasses, size, targetWeights, predictions, predictionsAlignSize, der2Row, der2AlignSize, der2);
}
}
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void RMSEWithUncertaintyValAndFirstDerImpl(
const float* target, ui32 size,
const float* weights,
const float* predictions,
const ui32* loadPredictionsIndices,
ui64 predictionsAlignSize,
float* functionValue,
float* der,
ui64 derAlignSize
) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
float tmpScore = 0;
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
if (idx >= size) {
continue;
}
const ui32 loadApproxIndex = loadPredictionsIndices ? __ldg(loadPredictionsIndices + idx) : idx;
const float weight = weights ? __ldg(weights + idx) : 1.0f;
const float approx0 = __ldg(predictions + loadApproxIndex);
const float approx1 = __ldg(predictions + loadApproxIndex + predictionsAlignSize);
const float direction = __ldg(target + idx) - approx0;
const float expApprox1 = __expf(-2 * approx1);
if (der) { // -gradient
der[idx] = weight * direction;
der[idx + derAlignSize] = weight * (direction * direction * expApprox1 - 1);
}
if (functionValue) {
// np.log(2 * np.pi) / 2.0 = 0.9189385332046
tmpScore += -weight * (0.9189385332046 + approx1 + 0.5 * expApprox1 * direction * direction);
}
}
if (functionValue) {
__shared__ float tmpScores[BlockSize];
tmpScores[threadIdx.x] = tmpScore;
__syncthreads();
float val = FastInBlockReduce<float>(threadIdx.x, tmpScores, BlockSize);
if (threadIdx.x == 0) {
atomicAdd(functionValue, val);
}
}
}
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void RMSEWithUncertaintySecondDerRowImpl(
const float* target, ui32 size,
const float* weights,
const float* predictions,
ui64 predictionsAlignSize,
int der2Row,
ui64 der2AlignSize,
float* der2
) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
if (der2Row == 0) {
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const ui32 idx = tid + j * BlockSize;
if (idx < size) {
der2[idx] = weights ? __ldg(weights + idx) : 1.0f;
}
}
} else if (der2Row == 1) {
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const ui32 idx = tid + j * BlockSize;
if (idx < size) {
const float approx0 = __ldg(predictions + idx);
const float approx1 = __ldg(predictions + idx + predictionsAlignSize);
const float weight = weights ? __ldg(weights + idx) : 1.0f;
const float miss = __ldg(target + idx) - approx0;
const float expApprox1 = __expf(-2 * approx1);
der2[idx] = 0.0f;
der2[idx + der2AlignSize] = 2 * weight * miss * miss * expApprox1;
}
}
} else {
// unreachable
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
if (idx < size) {
for (int k = 0; k < der2Row; ++k) {
der2[idx + k * der2AlignSize] = 0.0;
}
}
}
}
}
void RMSEWithUncertaintyValueAndDer(
const float* target,
const float* weights,
ui32 size,
const float* predictions, ui32 predictionsAlignSize,
const ui32* loadPredictionsIndices,
float* functionValue,
float* der, ui32 derAlignSize,
TCudaStream stream
) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 4;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
if (functionValue) {
FillBuffer(functionValue, 0.0f, 1, stream);
}
if (numBlocks) {
RMSEWithUncertaintyValAndFirstDerImpl < blockSize, elementsPerThreads ><<<numBlocks, blockSize, 0, stream>>>(target, size, weights, predictions, loadPredictionsIndices, predictionsAlignSize, functionValue, der, derAlignSize);
}
}
void RMSEWithUncertaintySecondDer(
const float* target,
const float* weights,
ui32 size,
const float* predictions, ui32 predictionsAlignSize,
float* der2,
int der2Row, ui32 der2AlignSize,
TCudaStream stream
) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 4;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
if (numBlocks) {
RMSEWithUncertaintySecondDerRowImpl < blockSize, elementsPerThreads ><<<numBlocks, blockSize, 0, stream>>>(target, size, weights, predictions, predictionsAlignSize, der2Row, der2AlignSize, der2);
}
}
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void MultiCrossEntropyValueAndDerImpl(
ui32 targetCount,
ui32 size,
const float* targets, ui32 targetAlignSize,
const float* weights,
const float* predictions, ui32 predictionsAlignSize,
const ui32* loadPredictionsIndices,
float* functionValue,
float* der, ui32 derAlignSize
) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
float sumDimErrors = 0;
for (int dim = 0; dim < targetCount; ++dim) {
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
if (idx >= size) {
continue;
}
const ui32 loadApproxIndex = loadPredictionsIndices ? __ldg(loadPredictionsIndices + idx) : idx;
const float weight = weights ? __ldg(weights + idx) : 1.0f;
const float target = __ldg(targets + idx + dim * targetAlignSize);
const float approx = __ldg(predictions + loadApproxIndex + dim * predictionsAlignSize);
const float expApprox = __expf(approx);
if (functionValue) {
sumDimErrors += -(isfinite(expApprox) ? __logf(1.0f + expApprox) : approx) * weight;
sumDimErrors += (target * approx) * weight;
}
if (der) { // -gradient
const float sigmoid = isfinite(expApprox) ? expApprox / (1.0f + expApprox) : 1.0f;
der[idx + dim * derAlignSize] = (-sigmoid + target) * weight;
}
}
}
if (functionValue) {
__shared__ float tmpScores[BlockSize];
tmpScores[threadIdx.x] = sumDimErrors / targetCount;
__syncthreads();
float val = FastInBlockReduce<float>(threadIdx.x, tmpScores, BlockSize);
if (threadIdx.x == 0) {
atomicAdd(functionValue, val);
}
}
}
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void MultiCrossEntropySecondDerImpl(
ui32 targetCount,
ui32 size,
const float* targets, ui32 targetAlignSize,
const float* weights,
const float* predictions, ui32 predictionsAlignSize,
float* der2,
int der2Row, ui32 der2AlignSize
) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const ui32 idx = tid + j * BlockSize;
if (idx < size) {
for (int k = 0; k < der2Row; ++k) {
der2[idx + k * der2AlignSize] = 0.0f;
}
const float approx = __ldg(predictions + idx + der2Row * predictionsAlignSize);
const float expApprox = __expf(approx);
const float weight = weights ? __ldg(weights + idx) : 1.0f;
const float target = __ldg(targets + idx + der2Row * targetAlignSize);
const float negSigmoid = isfinite(expApprox) ? -expApprox / (1.0f + expApprox) : -1.0f;
der2[idx + der2Row * der2AlignSize] = -negSigmoid * (1.0f + negSigmoid) * weight;
}
}
}
void MultiCrossEntropyValueAndDer(
ui32 targetCount,
ui32 size,
const float* target, ui32 tragetAlignSize,
const float* weights,
const float* predictions, ui32 predictionsAlignSize,
const ui32* loadPredictionsIndices,
float* functionValue,
float* der, ui32 derAlignSize,
TCudaStream stream
) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 4;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
if (functionValue) {
FillBuffer(functionValue, 0.0f, 1, stream);
}
if (numBlocks) {
MultiCrossEntropyValueAndDerImpl < blockSize, elementsPerThreads ><<<numBlocks, blockSize, 0, stream>>>(
targetCount,
size,
target, tragetAlignSize,
weights,
predictions, predictionsAlignSize,
loadPredictionsIndices,
functionValue,
der, derAlignSize);
}
}
void MultiCrossEntropySecondDer(
ui32 targetCount,
ui32 size,
const float* target, ui32 tragetAlignSize,
const float* weights,
const float* predictions, ui32 predictionsAlignSize,
float* der2,
ui32 der2Row, ui32 der2AlignSize,
TCudaStream stream
) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 4;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
if (numBlocks) {
MultiCrossEntropySecondDerImpl < blockSize, elementsPerThreads ><<<numBlocks, blockSize, 0, stream>>>(
targetCount,
size,
target, tragetAlignSize,
weights,
predictions, predictionsAlignSize,
der2,
der2Row, der2AlignSize);
}
}
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void MultiClassOneVsAllValAndFirstDerImpl(const float* targetClasses, int numClasses, ui32 size,
const float* weights,
const float* predictions,
const ui32* loadPredictionsIndices,
ui64 predictionsAlignSize,
float* functionValue,
float* der,
ui64 derAlignSize) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
float tmpScore = 0;
ui16 targetClass[ElementsPerThread];
float weight[ElementsPerThread];
ui32 loadPredictionIndex[ElementsPerThread];
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
loadPredictionIndex[j] = loadPredictionsIndices && idx < size ? __ldg(loadPredictionsIndices + idx) : idx;
targetClass[j] = idx < size ? static_cast<ui16>(__ldg(targetClasses + idx)) : 0;
weight[j] = (weights && (idx < size)) ? weights[idx] : 1.0f;
}
for (int clazz = 0; clazz < numClasses; ++clazz) {
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
const float val = idx < size ? __ldg(predictions + loadPredictionIndex[j] + clazz * predictionsAlignSize) : 0.0f;
const float expVal = __expf(val);
const float p = ClipProb(expVal / (1.0f + expVal));
const float c = clazz == targetClass[j] ? 1.0f : 0.0f;
const float direction = c - p;
if (der && idx < size) {
der[idx + clazz * derAlignSize] = weight[j] * direction;
}
if (functionValue) {
const float logExpValPlusOne = isfinite(expVal) ? __logf(1 + expVal) : val;
tmpScore += (idx < size) ? weight[j] * (c * val - logExpValPlusOne) / numClasses : 0;
}
}
}
if (functionValue) {
__shared__ float tmpScores[BlockSize];
tmpScores[threadIdx.x] = tmpScore;
__syncthreads();
float val = FastInBlockReduce<float>(threadIdx.x, tmpScores, BlockSize);
if (threadIdx.x == 0) {
atomicAdd(functionValue, val);
}
}
}
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void MultiClassOneVsAllSecondDerImpl(const float* targetClasses, int numClasses, ui32 size,
const float* weights,
const float* predictions,
ui64 predictionsAlignSize,
ui64 der2AlignSize,
float* der2) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
float weight[ElementsPerThread];
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
weight[j] = (weights && (idx < size)) ? weights[idx] : 1.0f;
}
for (int clazz = 0; clazz < numClasses; ++clazz) {
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
const float val = idx < size ? __ldg(predictions + idx + clazz * predictionsAlignSize) : 0.0f;
const float expVal = __expf(val);
const float p = ClipProb(expVal / (1.0f + expVal));
if (der2 && idx < size) {
der2[idx + clazz * der2AlignSize] = weight[j] * p * (1.0f - p);
}
}
}
}
void MultiClassOneVsAllValueAndDer(const float* targetClasses, int numClasses,
const float* targetWeights,
ui32 size,
const float* predictions, ui32 predictionsAlignSize,
const ui32* loadPredictionsIndices,
float* functionValue,
float* der, ui32 derAlignSize,
TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 1;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
CB_ENSURE(numClasses <= 65536);
//TODO: get rid of this
if (functionValue) {
FillBuffer(functionValue, 0.0f, 1, stream);
}
if (numBlocks) {
MultiClassOneVsAllValAndFirstDerImpl < blockSize, elementsPerThreads ><<<numBlocks, blockSize, 0, stream>>>(targetClasses, numClasses, size, targetWeights, predictions, loadPredictionsIndices, predictionsAlignSize, functionValue, der, derAlignSize);
}
}
void MultiClassOneVsAllSecondDer(const float* targetClasses, int numClasses,
const float* targetWeights,
ui32 size,
const float* predictions, ui32 predictionsAlignSize,
float* der2,
ui32 der2AlignSize,
TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 2;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
CB_ENSURE(numClasses <= 65536);
if (numBlocks) {
MultiClassOneVsAllSecondDerImpl < blockSize, elementsPerThreads ><<<numBlocks, blockSize, 0, stream>>>(targetClasses, numClasses, size, targetWeights, predictions, predictionsAlignSize, der2AlignSize, der2);
}
}
__global__ void BuildConfusionMatrixBinsImpl(const float* targetClasses, int numClasses, ui32 size,
const float* predictions, ui32 predictionsDim,
ui64 predictionsAlignSize,
bool isBinClass,
float binTargetProbabilityThreshold,
ui32* bins) {
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
ui32 targetClass;
float bestApprox = NegativeInfty();
int bestClass = -1;
predictions += i;
if (isBinClass) {
targetClass = __ldg(targetClasses + i) > binTargetProbabilityThreshold;
bestClass = __ldg(predictions) > 0;
} else {
targetClass = static_cast<ui16>(__ldg(targetClasses + i));
for (int clazz = 0; clazz < numClasses; ++clazz) {
const float approx = clazz < predictionsDim ? __ldg(predictions + clazz * predictionsAlignSize) : 0.0f;
if (approx > bestApprox) {
bestApprox = approx;
bestClass = clazz;
}
}
}
bins[i] = bestClass * numClasses + targetClass;
}
}
void BuildConfusionMatrixBins(const float* targetClasses, int numClasses, ui32 size,
const float* predictions, int predictionsDim, ui32 predictionsAlignSize,
bool isBinClass,
float binTargetProbabilityThreshold,
ui32* bins,
TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (size + blockSize - 1) / blockSize;
CB_ENSURE(numClasses < 65536);
if (numBlocks) {
BuildConfusionMatrixBinsImpl << < numBlocks, blockSize, 0, stream >> >(targetClasses, numClasses, size, predictions, predictionsDim, predictionsAlignSize, isBinClass, binTargetProbabilityThreshold, bins);
}
}
}
|
218fe79382e65d2708a24c32ae02dad0a9acd52c.hip | // !!! This is a file automatically generated by hipify!!!
//=============================================================================
// FILE: nccl.cu
//
// DESC: This file implements the mutli-gpu communication functionality
//
// NOTES: Uses the 'Nickel' NCCL library located at: https://github.com/NVIDIA/nccl
//=============================================================================
#include "util.h"
#include "nccl.h"
#include "memory.h"
#include "..\_nccl\nccl.h"
#include <nvapi.h>
#include <rocm_smi/rocm_smi.h>
//=============================================================================
// Function Definitions
//=============================================================================
typedef ncclResult_t (*LPNCCLCOMMINITRANK)(ncclComm_t* comm, int ndev, ncclUniqueId commId, int rank);
typedef ncclResult_t (*LPNCCLCOMMINITALL)(ncclComm_t* comm, int ndev, const int* devlist);
typedef ncclResult_t (*LPNCCLCOMMDESTROY)(ncclComm_t comm);
typedef const char* (*LPNCCLGETERRORSTRING)(ncclResult_t result);
typedef ncclResult_t (*LPNCCLALLREDUCE)(const void* sendbuff, void* recvbuff, int count, ncclDataType_t datatype, ncclRedOp_t op, ncclComm_t comm, hipStream_t stream);
typedef ncclResult_t (*LPNCCLBCAST)(void* buff, int count, ncclDataType_t datatype, int root, ncclComm_t comm, hipStream_t stream);
extern HMODULE g_hModule;
//=============================================================================
// Private Classes
//=============================================================================
class Data
{
public:
ncclUniqueId m_id;
ncclComm_t m_comm;
int m_nCount;
int m_nRank;
HINSTANCE m_hDLL;
LPNCCLCOMMINITALL m_pCommInitAll;
LPNCCLCOMMINITRANK m_pCommInitRank;
LPNCCLCOMMDESTROY m_pCommDestroy;
LPNCCLALLREDUCE m_pAllReduce;
LPNCCLBCAST m_pBcast;
LPNCCLGETERRORSTRING m_pGetErrorString;
Data(int nCount, int nRank, char* szId)
{
m_hDLL = NULL;
m_comm = NULL;
m_nCount = nCount;
m_nRank = nRank;
strncpy(m_id.internal, szId, NCCL_UNIQUE_ID_BYTES);
}
LONG Initialize()
{
TCHAR szPath[1024] = { 0 };
TCHAR szNcclPath[1024] = { 0 };
TCHAR* pszVer = NULL;
LONG lErr = GetModuleFileName(g_hModule, szPath, sizeof(szPath));
if (lErr == 0 || lErr == sizeof(szPath))
return ERROR_PARAM_NULL;
int nLen = (int)_tcslen(szPath);
for (int i = nLen - 1; i >= 0; i--)
{
if (szPath[i] == _T('\\') && i < nLen-1)
{
for (int j = i; j < nLen; j++)
{
if (szPath[j] == _T('.'))
{
pszVer = &szPath[j];
break;
}
}
_tcsncpy(szNcclPath, szPath, i + 1);
break;
}
}
_tcscat(szNcclPath, _T("nccl64_134"));
if (pszVer != NULL)
_tcscat(szNcclPath, pszVer);
else
_tcscat(szNcclPath, _T(".dll"));
m_hDLL = LoadLibrary(szNcclPath);
if (m_hDLL == NULL)
return ERROR_CUDA_MISSING_NCCL64DLL;
m_pCommInitAll = (LPNCCLCOMMINITALL)GetProcAddress(m_hDLL, "ncclCommInitAll");
if (m_pCommInitAll == NULL)
return ERROR_PARAM_NULL;
m_pCommInitRank = (LPNCCLCOMMINITRANK)GetProcAddress(m_hDLL, "ncclCommInitRank");
if (m_pCommInitRank == NULL)
return ERROR_PARAM_NULL;
m_pCommDestroy = (LPNCCLCOMMDESTROY)GetProcAddress(m_hDLL, "ncclCommDestroy");
if (m_pCommDestroy == NULL)
return ERROR_PARAM_NULL;
m_pAllReduce = (LPNCCLALLREDUCE)GetProcAddress(m_hDLL, "ncclAllReduce");
if (m_pAllReduce == NULL)
return ERROR_PARAM_NULL;
m_pBcast = (LPNCCLBCAST)GetProcAddress(m_hDLL, "ncclBcast");
if (m_pBcast == NULL)
return ERROR_PARAM_NULL;
m_pGetErrorString = (LPNCCLGETERRORSTRING)GetProcAddress(m_hDLL, "ncclGetErrorString");
if (m_pGetErrorString == NULL)
return ERROR_PARAM_NULL;
return 0;
}
ncclResult_t NcclCommInitRank(ncclComm_t* comm, int ndev, ncclUniqueId commId, int rank)
{
return (*m_pCommInitRank)(comm, ndev, commId, rank);
}
ncclResult_t NcclCommInitAll(ncclComm_t* comm, int ndev, const int* devlist)
{
return (*m_pCommInitAll)(comm, ndev, devlist);
}
ncclResult_t NcclCommDestroy(ncclComm_t comm)
{
return (*m_pCommDestroy)(comm);
}
const char* NcclGetErrorString(ncclResult_t result)
{
return (*m_pGetErrorString)(result);
}
ncclResult_t NcclAllReduce(const void* sendbuff, void* recvbuff, int count, ncclDataType_t datatype, ncclRedOp_t op, ncclComm_t comm, hipStream_t stream)
{
return (*m_pAllReduce)(sendbuff, recvbuff, count, datatype, op, comm, stream);
}
ncclResult_t NcclBcast(void* buff, int count, ncclDataType_t datatype, int root, ncclComm_t comm, hipStream_t stream)
{
return (*m_pBcast)(buff, count, datatype, root, comm, stream);
}
~Data()
{
if (m_comm != NULL)
{
NcclCommDestroy(m_comm);
m_comm = NULL;
}
if (m_hDLL != NULL)
{
FreeLibrary(m_hDLL);
m_hDLL = NULL;
m_pCommDestroy = NULL;
m_pCommInitAll = NULL;
m_pCommInitRank = NULL;
m_pAllReduce = NULL;
m_pBcast = NULL;
m_pGetErrorString = NULL;
}
}
};
//=============================================================================
// Class Methods
//=============================================================================
template <class T>
long ncclHandle<T>::isDisplayConnectedToGpu(int nGpuID, bool* pbIsDisplayOn)
{
LONG lErr;
rsmi_status_t res;
void* device;
*pbIsDisplayOn = false;
char rgPciID[256];
if (lErr = hipDeviceGetPCIBusId(rgPciID, 255, nGpuID))
return lErr;
if ((res = nvmlDeviceGetHandleByPciBusId_v2(rgPciID, (uint32_t*)&device)) != RSMI_STATUS_SUCCESS)
return (int)res;
nvmlEnableState_t active;
if ((res = nvmlDeviceGetDisplayMode((uint32_t)device, &active)) == RSMI_STATUS_SUCCESS)
{
if (active == NVML_FEATURE_ENABLED)
*pbIsDisplayOn = true;
}
return 0;
}
template <class T>
void ncclHandle<T>::setBufferSize(long lBufferCount)
{
if (lBufferCount > 0)
{
lBufferCount *= sizeof(T);
lBufferCount /= 64;
lBufferCount = (lBufferCount + 1) * 64;
char szBuffer[256];
snprintf(szBuffer, 255, "NCCL_BUFFSIZE=%ld", lBufferCount);
putenv(szBuffer);
}
}
template <class T>
long ncclHandle<T>::Initialize(Memory<T>* pMem, Math<T>* pMath, int nGpuID, int nCount, int nRank, char* szId)
{
long lErr;
int nDevCount;
m_nGpuID = nGpuID;
Update(pMem, pMath);
if (lErr = hipGetDeviceCount(&nDevCount))
return lErr;
if (nGpuID < 0 || nGpuID >= nDevCount)
return ERROR_PARAM_OUT_OF_RANGE;
if (!m_bNvmlInit)
{
rsmi_status_t res;
if ((res = nvmlInit_v2()) != RSMI_STATUS_SUCCESS)
return (int)res;
m_bNvmlInit = true;
}
bool bDisplayOn = false;
if (lErr = isDisplayConnectedToGpu(nGpuID, &bDisplayOn))
return lErr;
if (bDisplayOn)
return ERROR_CUDA_NOTSUPPORED_ON_DISPLAYGPU;
m_pData = new Data(nCount, nRank, szId);
if (m_pData == NULL)
return ERROR_MEMORY_OUT;
if (lErr = m_pData->Initialize())
return lErr;
return 0;
}
template long ncclHandle<double>::Initialize(Memory<double>* pMem, Math<double>* pMath, int nGpuID, int nCount, int nRank, char* szId);
template long ncclHandle<float>::Initialize(Memory<float>* pMem, Math<float>* pMath, int nGpuID, int nCount, int nRank, char* szId);
template <class T>
long ncclHandle<T>::Update(Memory<T>* pMem, Math<T>* pMath)
{
m_pMem = pMem;
m_pMath = pMath;
m_pMemCol = pMem->GetMemoryCollection();
m_nRefCount++;
return 0;
}
template long ncclHandle<double>::Update(Memory<double>* pMem, Math<double>* pMath);
template long ncclHandle<float>::Update(Memory<float>* pMem, Math<float>* pMath);
template <class T>
long ncclHandle<T>::CleanUp()
{
m_nRefCount--;
if (m_nRefCount == 0)
{
if (m_pData != NULL)
{
delete m_pData;
m_pData = NULL;
}
}
if (m_bNvmlInit)
{
nvmlShutdown();
m_bNvmlInit = false;
}
return 0;
}
template long ncclHandle<double>::CleanUp();
template long ncclHandle<float>::CleanUp();
template <class T>
LPCSTR ncclHandle<T>::GetErrorString(long lErr)
{
return m_pData->NcclGetErrorString((ncclResult_t)lErr);
}
template LPCSTR ncclHandle<double>::GetErrorString(long lErr);
template LPCSTR ncclHandle<float>::GetErrorString(long lErr);
template <class T>
long ncclHandle<T>::InitSingleProcess(long lBufferCount, int nCount, ncclHandle<T>* rgHandles[])
{
LONG lErr;
ncclComm_t* rgComm = (ncclComm_t*)malloc(sizeof(ncclComm_t) * nCount);
if (rgComm == NULL)
return ERROR_MEMORY_OUT;
int* rgGpu = (int*)malloc(sizeof(int) * nCount);
if (rgGpu == NULL)
{
free(rgComm);
return ERROR_MEMORY_OUT;
}
for (int i = 0; i < nCount; i++)
{
rgGpu[i] = rgHandles[i]->m_nGpuID;
}
setBufferSize(lBufferCount);
lErr = m_pData->NcclCommInitAll(rgComm, nCount, rgGpu);
if (!lErr)
{
for (int i = 0; i < nCount; i++)
{
rgHandles[i]->m_pData->m_comm = rgComm[i];
}
}
free(rgComm);
free(rgGpu);
return lErr;
}
template long ncclHandle<double>::InitSingleProcess(long lBufferCount, int nCount, ncclHandle<double>* rgHandles[]);
template long ncclHandle<float>::InitSingleProcess(long lBufferCount, int nCount, ncclHandle<float>* rgHandles[]);
template <class T>
long ncclHandle<T>::InitMultiProcess(long lBufferCount)
{
setBufferSize(lBufferCount);
return m_pData->NcclCommInitRank(&m_pData->m_comm, m_pData->m_nCount, m_pData->m_id, m_pData->m_nRank);
}
template long ncclHandle<double>::InitMultiProcess(long lBufferCount);
template long ncclHandle<float>::InitMultiProcess(long lBufferCount);
template <class T>
long ncclHandle<T>::Broadcast(long hStream, long hX, int nCount)
{
ncclDataType_t type = (sizeof(T) == sizeof(double)) ? ncclDouble : ncclFloat;
MemoryItem* pX;
LONG lErr;
if (lErr = m_pMemCol->GetData(hX, &pX))
return lErr;
T* x = (T*)pX->Data();
if (lErr = hipSetDevice(m_nGpuID))
return lErr;
hipStream_t stream = hipStreamDefault;
if (hStream != 0)
stream = m_pMem->GetStream(hStream);
if (lErr = m_pData->NcclBcast(x, nCount, type, 0, m_pData->m_comm, stream))
return lErr;
return 0;
}
template long ncclHandle<double>::Broadcast(long hStream, long hData, int nCount);
template long ncclHandle<float>::Broadcast(long hStream, long hData, int nCount);
template <class T>
long ncclHandle<T>::AllReduce(long hStream, long hX, int nCount, NCCL_OP op, T fScale)
{
long lErr;
ncclRedOp_t ncclop = ncclSum;
if (op == NCCL_PROD)
ncclop = ncclProd;
else if (op == NCCL_MIN)
ncclop = ncclMin;
else if (op == NCCL_MAX)
ncclop = ncclMax;
MemoryItem* pX;
if (lErr = m_pMemCol->GetData(hX, &pX))
return lErr;
T* x = (T*)pX->Data();
if (lErr = hipSetDevice(m_nGpuID))
return lErr;
hipStream_t stream = hipStreamDefault;
if (hStream != 0)
stream = m_pMem->GetStream(hStream);
ncclDataType_t type = (sizeof(T) == sizeof(double)) ? ncclDouble : ncclFloat;
if (lErr = m_pData->NcclAllReduce(x, x, nCount, type, ncclop, m_pData->m_comm, stream))
return lErr;
if (fScale != T(1.0))
return m_pMath->scal(nCount, fScale, hX, 0, hStream);
return 0;
}
template long ncclHandle<double>::AllReduce(long hStream, long hData, int nCount, NCCL_OP op, double dfScale);
template long ncclHandle<float>::AllReduce(long hStream, long hData, int nCount, NCCL_OP op, float fScale);
// end | 218fe79382e65d2708a24c32ae02dad0a9acd52c.cu | //=============================================================================
// FILE: nccl.cu
//
// DESC: This file implements the mutli-gpu communication functionality
//
// NOTES: Uses the 'Nickel' NCCL library located at: https://github.com/NVIDIA/nccl
//=============================================================================
#include "util.h"
#include "nccl.h"
#include "memory.h"
#include "..\_nccl\nccl.h"
#include <nvapi.h>
#include <nvml.h>
//=============================================================================
// Function Definitions
//=============================================================================
typedef ncclResult_t (*LPNCCLCOMMINITRANK)(ncclComm_t* comm, int ndev, ncclUniqueId commId, int rank);
typedef ncclResult_t (*LPNCCLCOMMINITALL)(ncclComm_t* comm, int ndev, const int* devlist);
typedef ncclResult_t (*LPNCCLCOMMDESTROY)(ncclComm_t comm);
typedef const char* (*LPNCCLGETERRORSTRING)(ncclResult_t result);
typedef ncclResult_t (*LPNCCLALLREDUCE)(const void* sendbuff, void* recvbuff, int count, ncclDataType_t datatype, ncclRedOp_t op, ncclComm_t comm, cudaStream_t stream);
typedef ncclResult_t (*LPNCCLBCAST)(void* buff, int count, ncclDataType_t datatype, int root, ncclComm_t comm, cudaStream_t stream);
extern HMODULE g_hModule;
//=============================================================================
// Private Classes
//=============================================================================
class Data
{
public:
ncclUniqueId m_id;
ncclComm_t m_comm;
int m_nCount;
int m_nRank;
HINSTANCE m_hDLL;
LPNCCLCOMMINITALL m_pCommInitAll;
LPNCCLCOMMINITRANK m_pCommInitRank;
LPNCCLCOMMDESTROY m_pCommDestroy;
LPNCCLALLREDUCE m_pAllReduce;
LPNCCLBCAST m_pBcast;
LPNCCLGETERRORSTRING m_pGetErrorString;
Data(int nCount, int nRank, char* szId)
{
m_hDLL = NULL;
m_comm = NULL;
m_nCount = nCount;
m_nRank = nRank;
strncpy(m_id.internal, szId, NCCL_UNIQUE_ID_BYTES);
}
LONG Initialize()
{
TCHAR szPath[1024] = { 0 };
TCHAR szNcclPath[1024] = { 0 };
TCHAR* pszVer = NULL;
LONG lErr = GetModuleFileName(g_hModule, szPath, sizeof(szPath));
if (lErr == 0 || lErr == sizeof(szPath))
return ERROR_PARAM_NULL;
int nLen = (int)_tcslen(szPath);
for (int i = nLen - 1; i >= 0; i--)
{
if (szPath[i] == _T('\\') && i < nLen-1)
{
for (int j = i; j < nLen; j++)
{
if (szPath[j] == _T('.'))
{
pszVer = &szPath[j];
break;
}
}
_tcsncpy(szNcclPath, szPath, i + 1);
break;
}
}
_tcscat(szNcclPath, _T("nccl64_134"));
if (pszVer != NULL)
_tcscat(szNcclPath, pszVer);
else
_tcscat(szNcclPath, _T(".dll"));
m_hDLL = LoadLibrary(szNcclPath);
if (m_hDLL == NULL)
return ERROR_CUDA_MISSING_NCCL64DLL;
m_pCommInitAll = (LPNCCLCOMMINITALL)GetProcAddress(m_hDLL, "ncclCommInitAll");
if (m_pCommInitAll == NULL)
return ERROR_PARAM_NULL;
m_pCommInitRank = (LPNCCLCOMMINITRANK)GetProcAddress(m_hDLL, "ncclCommInitRank");
if (m_pCommInitRank == NULL)
return ERROR_PARAM_NULL;
m_pCommDestroy = (LPNCCLCOMMDESTROY)GetProcAddress(m_hDLL, "ncclCommDestroy");
if (m_pCommDestroy == NULL)
return ERROR_PARAM_NULL;
m_pAllReduce = (LPNCCLALLREDUCE)GetProcAddress(m_hDLL, "ncclAllReduce");
if (m_pAllReduce == NULL)
return ERROR_PARAM_NULL;
m_pBcast = (LPNCCLBCAST)GetProcAddress(m_hDLL, "ncclBcast");
if (m_pBcast == NULL)
return ERROR_PARAM_NULL;
m_pGetErrorString = (LPNCCLGETERRORSTRING)GetProcAddress(m_hDLL, "ncclGetErrorString");
if (m_pGetErrorString == NULL)
return ERROR_PARAM_NULL;
return 0;
}
ncclResult_t NcclCommInitRank(ncclComm_t* comm, int ndev, ncclUniqueId commId, int rank)
{
return (*m_pCommInitRank)(comm, ndev, commId, rank);
}
ncclResult_t NcclCommInitAll(ncclComm_t* comm, int ndev, const int* devlist)
{
return (*m_pCommInitAll)(comm, ndev, devlist);
}
ncclResult_t NcclCommDestroy(ncclComm_t comm)
{
return (*m_pCommDestroy)(comm);
}
const char* NcclGetErrorString(ncclResult_t result)
{
return (*m_pGetErrorString)(result);
}
ncclResult_t NcclAllReduce(const void* sendbuff, void* recvbuff, int count, ncclDataType_t datatype, ncclRedOp_t op, ncclComm_t comm, cudaStream_t stream)
{
return (*m_pAllReduce)(sendbuff, recvbuff, count, datatype, op, comm, stream);
}
ncclResult_t NcclBcast(void* buff, int count, ncclDataType_t datatype, int root, ncclComm_t comm, cudaStream_t stream)
{
return (*m_pBcast)(buff, count, datatype, root, comm, stream);
}
~Data()
{
if (m_comm != NULL)
{
NcclCommDestroy(m_comm);
m_comm = NULL;
}
if (m_hDLL != NULL)
{
FreeLibrary(m_hDLL);
m_hDLL = NULL;
m_pCommDestroy = NULL;
m_pCommInitAll = NULL;
m_pCommInitRank = NULL;
m_pAllReduce = NULL;
m_pBcast = NULL;
m_pGetErrorString = NULL;
}
}
};
//=============================================================================
// Class Methods
//=============================================================================
template <class T>
long ncclHandle<T>::isDisplayConnectedToGpu(int nGpuID, bool* pbIsDisplayOn)
{
LONG lErr;
nvmlReturn_t res;
void* device;
*pbIsDisplayOn = false;
char rgPciID[256];
if (lErr = cudaDeviceGetPCIBusId(rgPciID, 255, nGpuID))
return lErr;
if ((res = nvmlDeviceGetHandleByPciBusId_v2(rgPciID, (nvmlDevice_t*)&device)) != NVML_SUCCESS)
return (int)res;
nvmlEnableState_t active;
if ((res = nvmlDeviceGetDisplayMode((nvmlDevice_t)device, &active)) == NVML_SUCCESS)
{
if (active == NVML_FEATURE_ENABLED)
*pbIsDisplayOn = true;
}
return 0;
}
template <class T>
void ncclHandle<T>::setBufferSize(long lBufferCount)
{
if (lBufferCount > 0)
{
lBufferCount *= sizeof(T);
lBufferCount /= 64;
lBufferCount = (lBufferCount + 1) * 64;
char szBuffer[256];
snprintf(szBuffer, 255, "NCCL_BUFFSIZE=%ld", lBufferCount);
putenv(szBuffer);
}
}
template <class T>
long ncclHandle<T>::Initialize(Memory<T>* pMem, Math<T>* pMath, int nGpuID, int nCount, int nRank, char* szId)
{
long lErr;
int nDevCount;
m_nGpuID = nGpuID;
Update(pMem, pMath);
if (lErr = cudaGetDeviceCount(&nDevCount))
return lErr;
if (nGpuID < 0 || nGpuID >= nDevCount)
return ERROR_PARAM_OUT_OF_RANGE;
if (!m_bNvmlInit)
{
nvmlReturn_t res;
if ((res = nvmlInit_v2()) != NVML_SUCCESS)
return (int)res;
m_bNvmlInit = true;
}
bool bDisplayOn = false;
if (lErr = isDisplayConnectedToGpu(nGpuID, &bDisplayOn))
return lErr;
if (bDisplayOn)
return ERROR_CUDA_NOTSUPPORED_ON_DISPLAYGPU;
m_pData = new Data(nCount, nRank, szId);
if (m_pData == NULL)
return ERROR_MEMORY_OUT;
if (lErr = m_pData->Initialize())
return lErr;
return 0;
}
template long ncclHandle<double>::Initialize(Memory<double>* pMem, Math<double>* pMath, int nGpuID, int nCount, int nRank, char* szId);
template long ncclHandle<float>::Initialize(Memory<float>* pMem, Math<float>* pMath, int nGpuID, int nCount, int nRank, char* szId);
template <class T>
long ncclHandle<T>::Update(Memory<T>* pMem, Math<T>* pMath)
{
m_pMem = pMem;
m_pMath = pMath;
m_pMemCol = pMem->GetMemoryCollection();
m_nRefCount++;
return 0;
}
template long ncclHandle<double>::Update(Memory<double>* pMem, Math<double>* pMath);
template long ncclHandle<float>::Update(Memory<float>* pMem, Math<float>* pMath);
template <class T>
long ncclHandle<T>::CleanUp()
{
m_nRefCount--;
if (m_nRefCount == 0)
{
if (m_pData != NULL)
{
delete m_pData;
m_pData = NULL;
}
}
if (m_bNvmlInit)
{
nvmlShutdown();
m_bNvmlInit = false;
}
return 0;
}
template long ncclHandle<double>::CleanUp();
template long ncclHandle<float>::CleanUp();
template <class T>
LPCSTR ncclHandle<T>::GetErrorString(long lErr)
{
return m_pData->NcclGetErrorString((ncclResult_t)lErr);
}
template LPCSTR ncclHandle<double>::GetErrorString(long lErr);
template LPCSTR ncclHandle<float>::GetErrorString(long lErr);
template <class T>
long ncclHandle<T>::InitSingleProcess(long lBufferCount, int nCount, ncclHandle<T>* rgHandles[])
{
LONG lErr;
ncclComm_t* rgComm = (ncclComm_t*)malloc(sizeof(ncclComm_t) * nCount);
if (rgComm == NULL)
return ERROR_MEMORY_OUT;
int* rgGpu = (int*)malloc(sizeof(int) * nCount);
if (rgGpu == NULL)
{
free(rgComm);
return ERROR_MEMORY_OUT;
}
for (int i = 0; i < nCount; i++)
{
rgGpu[i] = rgHandles[i]->m_nGpuID;
}
setBufferSize(lBufferCount);
lErr = m_pData->NcclCommInitAll(rgComm, nCount, rgGpu);
if (!lErr)
{
for (int i = 0; i < nCount; i++)
{
rgHandles[i]->m_pData->m_comm = rgComm[i];
}
}
free(rgComm);
free(rgGpu);
return lErr;
}
template long ncclHandle<double>::InitSingleProcess(long lBufferCount, int nCount, ncclHandle<double>* rgHandles[]);
template long ncclHandle<float>::InitSingleProcess(long lBufferCount, int nCount, ncclHandle<float>* rgHandles[]);
template <class T>
long ncclHandle<T>::InitMultiProcess(long lBufferCount)
{
setBufferSize(lBufferCount);
return m_pData->NcclCommInitRank(&m_pData->m_comm, m_pData->m_nCount, m_pData->m_id, m_pData->m_nRank);
}
template long ncclHandle<double>::InitMultiProcess(long lBufferCount);
template long ncclHandle<float>::InitMultiProcess(long lBufferCount);
template <class T>
long ncclHandle<T>::Broadcast(long hStream, long hX, int nCount)
{
ncclDataType_t type = (sizeof(T) == sizeof(double)) ? ncclDouble : ncclFloat;
MemoryItem* pX;
LONG lErr;
if (lErr = m_pMemCol->GetData(hX, &pX))
return lErr;
T* x = (T*)pX->Data();
if (lErr = cudaSetDevice(m_nGpuID))
return lErr;
cudaStream_t stream = cudaStreamDefault;
if (hStream != 0)
stream = m_pMem->GetStream(hStream);
if (lErr = m_pData->NcclBcast(x, nCount, type, 0, m_pData->m_comm, stream))
return lErr;
return 0;
}
template long ncclHandle<double>::Broadcast(long hStream, long hData, int nCount);
template long ncclHandle<float>::Broadcast(long hStream, long hData, int nCount);
template <class T>
long ncclHandle<T>::AllReduce(long hStream, long hX, int nCount, NCCL_OP op, T fScale)
{
long lErr;
ncclRedOp_t ncclop = ncclSum;
if (op == NCCL_PROD)
ncclop = ncclProd;
else if (op == NCCL_MIN)
ncclop = ncclMin;
else if (op == NCCL_MAX)
ncclop = ncclMax;
MemoryItem* pX;
if (lErr = m_pMemCol->GetData(hX, &pX))
return lErr;
T* x = (T*)pX->Data();
if (lErr = cudaSetDevice(m_nGpuID))
return lErr;
cudaStream_t stream = cudaStreamDefault;
if (hStream != 0)
stream = m_pMem->GetStream(hStream);
ncclDataType_t type = (sizeof(T) == sizeof(double)) ? ncclDouble : ncclFloat;
if (lErr = m_pData->NcclAllReduce(x, x, nCount, type, ncclop, m_pData->m_comm, stream))
return lErr;
if (fScale != T(1.0))
return m_pMath->scal(nCount, fScale, hX, 0, hStream);
return 0;
}
template long ncclHandle<double>::AllReduce(long hStream, long hData, int nCount, NCCL_OP op, double dfScale);
template long ncclHandle<float>::AllReduce(long hStream, long hData, int nCount, NCCL_OP op, float fScale);
// end |
a01454fc2730174b86fbfd14142c3a895e27e7f8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* one-block-one-bit counting sort
*/
#include "onebitsort.cuh"
extern __shared__ uint sRadixSum[];
/*
* shared memory layout
* 0 -> 1 group offset
* 2 -> 3 group count
* 4 -> 4+n*2-1 per-thread count
* 4+n*2 -> 4+n*2+n*2-1 per-thread offset
*
* per-thread count layout
* thread count: n
* bin count : 2
*
* 0 1 2 n-1 thread
*
* 2*0 2*1 2*2 2*(n-1)
* 2*1-1 2*2-1 2*3-1 2*n-1
*
* prefix sum: http://http.developer.nvidia.com/GPUGems3/gpugems3_ch39.html
*/
__global__ void RadixSum(KeyValuePair *oData, KeyValuePair *pData,
uint elements,
uint * counts)
{
uint * binVertical = &sRadixSum[4 + threadIdx.x * 2];
uint * binHorizontal = &sRadixSum[4 + threadIdx.x];
uint * binOffsetVertical = &sRadixSum[4 + 256 * 2 + threadIdx.x * 2];
uint * binOffsetHorizontal = &sRadixSum[4 + 256 * 2 + threadIdx.x];
uint i = 0;
for(;i<2;i++)
binVertical[i] = 0;
uint numBatches = elements>>8;
if(elements & 255) numBatches++;
for(i=0;i<numBatches;i++) {
uint pos = i*256+threadIdx.x;
if(pos<elements) {
uint p = pData[pos].key;
binVertical[p]++;
}
}
__syncthreads();
onebitsort::reduceInBlock(binVertical);
if(threadIdx.x < 4)
sRadixSum[threadIdx.x] = 0;
__syncthreads();
if(threadIdx.x < 2) {
sRadixSum[2 + threadIdx.x] = binHorizontal[0];
counts[2 + threadIdx.x] = sRadixSum[2 + threadIdx.x];
}
__syncthreads();
if(threadIdx.x == 1) {
sRadixSum[threadIdx.x] += sRadixSum[2 + threadIdx.x - 1];
}
if(threadIdx.x < 2) {
counts[threadIdx.x] = sRadixSum[threadIdx.x];
}
__syncthreads();
uint pos, p, ind;
for(i=0;i<numBatches;i++) {
binVertical[0] = 0;
binVertical[1] = 0;
__syncthreads();
pos = i*256+threadIdx.x;
if(pos<elements) {
p = pData[pos].key;
binVertical[p]++;
}
__syncthreads();
onebitsort::scanInBlock(&sRadixSum[4 + 256 * 2], &sRadixSum[4]);
if(pos<elements) {
ind = sRadixSum[p] + binOffsetVertical[p];
oData[ind] = pData[pos];
}
__syncthreads();
if(threadIdx.x < 2) {
sRadixSum[threadIdx.x] += binOffsetHorizontal[2*255]
+ binHorizontal[2*255];
}
__syncthreads();
}
}
#include <stdio.h>
void OneBitSort(KeyValuePair *pData0, KeyValuePair *pData1, uint elements, uint * counts)
{
hipLaunchKernelGGL(( RadixSum), dim3(1), dim3(256), 16320, 0, pData1, pData0, elements, counts);
uint hbins[4] = {0,0,0,0};
hipError_t err = hipMemcpy(hbins, counts, 16, hipMemcpyDeviceToHost);
if (err != hipSuccess)
printf(" error group counts %s\n", hipGetErrorString(err));
printf(" offset0 %i \n", hbins[0]);
printf(" offset1 %i \n", hbins[1]);
printf(" count0 %i \n", hbins[2]);
printf(" count1 %i \n", hbins[3]);
} | a01454fc2730174b86fbfd14142c3a895e27e7f8.cu | /*
* one-block-one-bit counting sort
*/
#include "onebitsort.cuh"
extern __shared__ uint sRadixSum[];
/*
* shared memory layout
* 0 -> 1 group offset
* 2 -> 3 group count
* 4 -> 4+n*2-1 per-thread count
* 4+n*2 -> 4+n*2+n*2-1 per-thread offset
*
* per-thread count layout
* thread count: n
* bin count : 2
*
* 0 1 2 n-1 thread
*
* 2*0 2*1 2*2 2*(n-1)
* 2*1-1 2*2-1 2*3-1 2*n-1
*
* prefix sum: http://http.developer.nvidia.com/GPUGems3/gpugems3_ch39.html
*/
__global__ void RadixSum(KeyValuePair *oData, KeyValuePair *pData,
uint elements,
uint * counts)
{
uint * binVertical = &sRadixSum[4 + threadIdx.x * 2];
uint * binHorizontal = &sRadixSum[4 + threadIdx.x];
uint * binOffsetVertical = &sRadixSum[4 + 256 * 2 + threadIdx.x * 2];
uint * binOffsetHorizontal = &sRadixSum[4 + 256 * 2 + threadIdx.x];
uint i = 0;
for(;i<2;i++)
binVertical[i] = 0;
uint numBatches = elements>>8;
if(elements & 255) numBatches++;
for(i=0;i<numBatches;i++) {
uint pos = i*256+threadIdx.x;
if(pos<elements) {
uint p = pData[pos].key;
binVertical[p]++;
}
}
__syncthreads();
onebitsort::reduceInBlock(binVertical);
if(threadIdx.x < 4)
sRadixSum[threadIdx.x] = 0;
__syncthreads();
if(threadIdx.x < 2) {
sRadixSum[2 + threadIdx.x] = binHorizontal[0];
counts[2 + threadIdx.x] = sRadixSum[2 + threadIdx.x];
}
__syncthreads();
if(threadIdx.x == 1) {
sRadixSum[threadIdx.x] += sRadixSum[2 + threadIdx.x - 1];
}
if(threadIdx.x < 2) {
counts[threadIdx.x] = sRadixSum[threadIdx.x];
}
__syncthreads();
uint pos, p, ind;
for(i=0;i<numBatches;i++) {
binVertical[0] = 0;
binVertical[1] = 0;
__syncthreads();
pos = i*256+threadIdx.x;
if(pos<elements) {
p = pData[pos].key;
binVertical[p]++;
}
__syncthreads();
onebitsort::scanInBlock(&sRadixSum[4 + 256 * 2], &sRadixSum[4]);
if(pos<elements) {
ind = sRadixSum[p] + binOffsetVertical[p];
oData[ind] = pData[pos];
}
__syncthreads();
if(threadIdx.x < 2) {
sRadixSum[threadIdx.x] += binOffsetHorizontal[2*255]
+ binHorizontal[2*255];
}
__syncthreads();
}
}
#include <stdio.h>
void OneBitSort(KeyValuePair *pData0, KeyValuePair *pData1, uint elements, uint * counts)
{
RadixSum<<<1, 256, 16320>>>(pData1, pData0, elements, counts);
uint hbins[4] = {0,0,0,0};
cudaError_t err = cudaMemcpy(hbins, counts, 16, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
printf(" error group counts %s\n", cudaGetErrorString(err));
printf(" offset0 %i \n", hbins[0]);
printf(" offset1 %i \n", hbins[1]);
printf(" count0 %i \n", hbins[2]);
printf(" count1 %i \n", hbins[3]);
} |
6fd13c4f7e828cef82a43b4b5d5456cfa5c34b0b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#define N 32
__global__ void square( unsigned *h_matrix ){
unsigned id = threadIdx.x * blockDim.y + threadIdx.y;
h_matrix[id] = id * id;
}
int main() {
dim3 block(N, N);
//creating the matrix variables
unsigned *matrix, *h_matrix;
//initializing the variables
matrix = (unsigned *)malloc(N * N * sizeof(unsigned));
hipMalloc(&h_matrix, N * N * sizeof(unsigned));
//squaring the matrix
hipLaunchKernelGGL(( square), dim3(1), dim3(block), 0, 0, h_matrix);
//copying the memory over
hipMemcpy( matrix, h_matrix, N * N * sizeof(unsigned), hipMemcpyDeviceToHost );
//print out the matrix
for( int i = 0; i < N; i++ ){
for( int j = 0; j < N; j++ ){
printf( "%d\t", matrix[ i * N + j ] );
}
printf( "\n" );
}
return 0;
}
| 6fd13c4f7e828cef82a43b4b5d5456cfa5c34b0b.cu |
#include <stdio.h>
#include <cuda.h>
#define N 32
__global__ void square( unsigned *h_matrix ){
unsigned id = threadIdx.x * blockDim.y + threadIdx.y;
h_matrix[id] = id * id;
}
int main() {
dim3 block(N, N);
//creating the matrix variables
unsigned *matrix, *h_matrix;
//initializing the variables
matrix = (unsigned *)malloc(N * N * sizeof(unsigned));
cudaMalloc(&h_matrix, N * N * sizeof(unsigned));
//squaring the matrix
square<<<1, block>>>(h_matrix);
//copying the memory over
cudaMemcpy( matrix, h_matrix, N * N * sizeof(unsigned), cudaMemcpyDeviceToHost );
//print out the matrix
for( int i = 0; i < N; i++ ){
for( int j = 0; j < N; j++ ){
printf( "%d\t", matrix[ i * N + j ] );
}
printf( "\n" );
}
return 0;
}
|
4f3d031fbd054f50cffd6ee0dcc79c2aa8dd9934.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/vec_traits.hpp"
#include "opencv2/gpu/device/limits.hpp"
namespace cv { namespace gpu { namespace device {
namespace bgfg_gmg
{
__constant__ int c_width;
__constant__ int c_height;
__constant__ float c_minVal;
__constant__ float c_maxVal;
__constant__ int c_quantizationLevels;
__constant__ float c_backgroundPrior;
__constant__ float c_decisionThreshold;
__constant__ int c_maxFeatures;
__constant__ int c_numInitializationFrames;
void loadConstants(int width, int height, float minVal, float maxVal, int quantizationLevels, float backgroundPrior,
float decisionThreshold, int maxFeatures, int numInitializationFrames)
{
cudaSafeCall( hipMemcpyToSymbol(c_width, &width, sizeof(width)) );
cudaSafeCall( hipMemcpyToSymbol(c_height, &height, sizeof(height)) );
cudaSafeCall( hipMemcpyToSymbol(c_minVal, &minVal, sizeof(minVal)) );
cudaSafeCall( hipMemcpyToSymbol(c_maxVal, &maxVal, sizeof(maxVal)) );
cudaSafeCall( hipMemcpyToSymbol(c_quantizationLevels, &quantizationLevels, sizeof(quantizationLevels)) );
cudaSafeCall( hipMemcpyToSymbol(c_backgroundPrior, &backgroundPrior, sizeof(backgroundPrior)) );
cudaSafeCall( hipMemcpyToSymbol(c_decisionThreshold, &decisionThreshold, sizeof(decisionThreshold)) );
cudaSafeCall( hipMemcpyToSymbol(c_maxFeatures, &maxFeatures, sizeof(maxFeatures)) );
cudaSafeCall( hipMemcpyToSymbol(c_numInitializationFrames, &numInitializationFrames, sizeof(numInitializationFrames)) );
}
__device__ float findFeature(const int color, const PtrStepi& colors, const PtrStepf& weights, const int x, const int y, const int nfeatures)
{
for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height)
{
if (color == colors(fy, x))
return weights(fy, x);
}
// not in histogram, so return 0.
return 0.0f;
}
__device__ void normalizeHistogram(PtrStepf weights, const int x, const int y, const int nfeatures)
{
float total = 0.0f;
for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height)
total += weights(fy, x);
if (total != 0.0f)
{
for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height)
weights(fy, x) /= total;
}
}
__device__ bool insertFeature(const int color, const float weight, PtrStepi colors, PtrStepf weights, const int x, const int y, int& nfeatures)
{
for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height)
{
if (color == colors(fy, x))
{
// feature in histogram
weights(fy, x) += weight;
return false;
}
}
if (nfeatures == c_maxFeatures)
{
// discard oldest feature
int idx = -1;
float minVal = numeric_limits<float>::max();
for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height)
{
const float w = weights(fy, x);
if (w < minVal)
{
minVal = w;
idx = fy;
}
}
colors(idx, x) = color;
weights(idx, x) = weight;
return false;
}
colors(nfeatures * c_height + y, x) = color;
weights(nfeatures * c_height + y, x) = weight;
++nfeatures;
return true;
}
namespace detail
{
template <int cn> struct Quantization
{
template <typename T>
__device__ static int apply(const T& val)
{
int res = 0;
res |= static_cast<int>((val.x - c_minVal) * c_quantizationLevels / (c_maxVal - c_minVal));
res |= static_cast<int>((val.y - c_minVal) * c_quantizationLevels / (c_maxVal - c_minVal)) << 8;
res |= static_cast<int>((val.z - c_minVal) * c_quantizationLevels / (c_maxVal - c_minVal)) << 16;
return res;
}
};
template <> struct Quantization<1>
{
template <typename T>
__device__ static int apply(T val)
{
return static_cast<int>((val - c_minVal) * c_quantizationLevels / (c_maxVal - c_minVal));
}
};
}
template <typename T> struct Quantization : detail::Quantization<VecTraits<T>::cn> {};
template <typename SrcT>
__global__ void update(const PtrStep<SrcT> frame, PtrStepb fgmask, PtrStepi colors_, PtrStepf weights_, PtrStepi nfeatures_,
const int frameNum, const float learningRate, const bool updateBackgroundModel)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= c_width || y >= c_height)
return;
const SrcT pix = frame(y, x);
const int newFeatureColor = Quantization<SrcT>::apply(pix);
int nfeatures = nfeatures_(y, x);
if (frameNum >= c_numInitializationFrames)
{
// typical operation
const float weight = findFeature(newFeatureColor, colors_, weights_, x, y, nfeatures);
// see Godbehere, Matsukawa, Goldberg (2012) for reasoning behind this implementation of Bayes rule
const float posterior = (weight * c_backgroundPrior) / (weight * c_backgroundPrior + (1.0f - weight) * (1.0f - c_backgroundPrior));
const bool isForeground = ((1.0f - posterior) > c_decisionThreshold);
fgmask(y, x) = (uchar)(-isForeground);
// update histogram.
if (updateBackgroundModel)
{
for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height)
weights_(fy, x) *= 1.0f - learningRate;
bool inserted = insertFeature(newFeatureColor, learningRate, colors_, weights_, x, y, nfeatures);
if (inserted)
{
normalizeHistogram(weights_, x, y, nfeatures);
nfeatures_(y, x) = nfeatures;
}
}
}
else if (updateBackgroundModel)
{
// training-mode update
insertFeature(newFeatureColor, 1.0f, colors_, weights_, x, y, nfeatures);
if (frameNum == c_numInitializationFrames - 1)
normalizeHistogram(weights_, x, y, nfeatures);
}
}
template <typename SrcT>
void update_gpu(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures,
int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream)
{
const dim3 block(32, 8);
const dim3 grid(divUp(frame.cols, block.x), divUp(frame.rows, block.y));
cudaSafeCall( hipFuncSetCacheConfig(update<SrcT>, hipFuncCachePreferL1) );
hipLaunchKernelGGL(( update<SrcT>), dim3(grid), dim3(block), 0, stream, (PtrStepSz<SrcT>) frame, fgmask, colors, weights, nfeatures, frameNum, learningRate, updateBackgroundModel);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template void update_gpu<uchar >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream);
template void update_gpu<uchar3 >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream);
template void update_gpu<uchar4 >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream);
template void update_gpu<ushort >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream);
template void update_gpu<ushort3>(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream);
template void update_gpu<ushort4>(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream);
template void update_gpu<float >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream);
template void update_gpu<float3 >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream);
template void update_gpu<float4 >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream);
}
}}}
| 4f3d031fbd054f50cffd6ee0dcc79c2aa8dd9934.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/vec_traits.hpp"
#include "opencv2/gpu/device/limits.hpp"
namespace cv { namespace gpu { namespace device {
namespace bgfg_gmg
{
__constant__ int c_width;
__constant__ int c_height;
__constant__ float c_minVal;
__constant__ float c_maxVal;
__constant__ int c_quantizationLevels;
__constant__ float c_backgroundPrior;
__constant__ float c_decisionThreshold;
__constant__ int c_maxFeatures;
__constant__ int c_numInitializationFrames;
void loadConstants(int width, int height, float minVal, float maxVal, int quantizationLevels, float backgroundPrior,
float decisionThreshold, int maxFeatures, int numInitializationFrames)
{
cudaSafeCall( cudaMemcpyToSymbol(c_width, &width, sizeof(width)) );
cudaSafeCall( cudaMemcpyToSymbol(c_height, &height, sizeof(height)) );
cudaSafeCall( cudaMemcpyToSymbol(c_minVal, &minVal, sizeof(minVal)) );
cudaSafeCall( cudaMemcpyToSymbol(c_maxVal, &maxVal, sizeof(maxVal)) );
cudaSafeCall( cudaMemcpyToSymbol(c_quantizationLevels, &quantizationLevels, sizeof(quantizationLevels)) );
cudaSafeCall( cudaMemcpyToSymbol(c_backgroundPrior, &backgroundPrior, sizeof(backgroundPrior)) );
cudaSafeCall( cudaMemcpyToSymbol(c_decisionThreshold, &decisionThreshold, sizeof(decisionThreshold)) );
cudaSafeCall( cudaMemcpyToSymbol(c_maxFeatures, &maxFeatures, sizeof(maxFeatures)) );
cudaSafeCall( cudaMemcpyToSymbol(c_numInitializationFrames, &numInitializationFrames, sizeof(numInitializationFrames)) );
}
__device__ float findFeature(const int color, const PtrStepi& colors, const PtrStepf& weights, const int x, const int y, const int nfeatures)
{
for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height)
{
if (color == colors(fy, x))
return weights(fy, x);
}
// not in histogram, so return 0.
return 0.0f;
}
__device__ void normalizeHistogram(PtrStepf weights, const int x, const int y, const int nfeatures)
{
float total = 0.0f;
for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height)
total += weights(fy, x);
if (total != 0.0f)
{
for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height)
weights(fy, x) /= total;
}
}
__device__ bool insertFeature(const int color, const float weight, PtrStepi colors, PtrStepf weights, const int x, const int y, int& nfeatures)
{
for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height)
{
if (color == colors(fy, x))
{
// feature in histogram
weights(fy, x) += weight;
return false;
}
}
if (nfeatures == c_maxFeatures)
{
// discard oldest feature
int idx = -1;
float minVal = numeric_limits<float>::max();
for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height)
{
const float w = weights(fy, x);
if (w < minVal)
{
minVal = w;
idx = fy;
}
}
colors(idx, x) = color;
weights(idx, x) = weight;
return false;
}
colors(nfeatures * c_height + y, x) = color;
weights(nfeatures * c_height + y, x) = weight;
++nfeatures;
return true;
}
namespace detail
{
template <int cn> struct Quantization
{
template <typename T>
__device__ static int apply(const T& val)
{
int res = 0;
res |= static_cast<int>((val.x - c_minVal) * c_quantizationLevels / (c_maxVal - c_minVal));
res |= static_cast<int>((val.y - c_minVal) * c_quantizationLevels / (c_maxVal - c_minVal)) << 8;
res |= static_cast<int>((val.z - c_minVal) * c_quantizationLevels / (c_maxVal - c_minVal)) << 16;
return res;
}
};
template <> struct Quantization<1>
{
template <typename T>
__device__ static int apply(T val)
{
return static_cast<int>((val - c_minVal) * c_quantizationLevels / (c_maxVal - c_minVal));
}
};
}
template <typename T> struct Quantization : detail::Quantization<VecTraits<T>::cn> {};
template <typename SrcT>
__global__ void update(const PtrStep<SrcT> frame, PtrStepb fgmask, PtrStepi colors_, PtrStepf weights_, PtrStepi nfeatures_,
const int frameNum, const float learningRate, const bool updateBackgroundModel)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= c_width || y >= c_height)
return;
const SrcT pix = frame(y, x);
const int newFeatureColor = Quantization<SrcT>::apply(pix);
int nfeatures = nfeatures_(y, x);
if (frameNum >= c_numInitializationFrames)
{
// typical operation
const float weight = findFeature(newFeatureColor, colors_, weights_, x, y, nfeatures);
// see Godbehere, Matsukawa, Goldberg (2012) for reasoning behind this implementation of Bayes rule
const float posterior = (weight * c_backgroundPrior) / (weight * c_backgroundPrior + (1.0f - weight) * (1.0f - c_backgroundPrior));
const bool isForeground = ((1.0f - posterior) > c_decisionThreshold);
fgmask(y, x) = (uchar)(-isForeground);
// update histogram.
if (updateBackgroundModel)
{
for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height)
weights_(fy, x) *= 1.0f - learningRate;
bool inserted = insertFeature(newFeatureColor, learningRate, colors_, weights_, x, y, nfeatures);
if (inserted)
{
normalizeHistogram(weights_, x, y, nfeatures);
nfeatures_(y, x) = nfeatures;
}
}
}
else if (updateBackgroundModel)
{
// training-mode update
insertFeature(newFeatureColor, 1.0f, colors_, weights_, x, y, nfeatures);
if (frameNum == c_numInitializationFrames - 1)
normalizeHistogram(weights_, x, y, nfeatures);
}
}
template <typename SrcT>
void update_gpu(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures,
int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream)
{
const dim3 block(32, 8);
const dim3 grid(divUp(frame.cols, block.x), divUp(frame.rows, block.y));
cudaSafeCall( cudaFuncSetCacheConfig(update<SrcT>, cudaFuncCachePreferL1) );
update<SrcT><<<grid, block, 0, stream>>>((PtrStepSz<SrcT>) frame, fgmask, colors, weights, nfeatures, frameNum, learningRate, updateBackgroundModel);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template void update_gpu<uchar >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream);
template void update_gpu<uchar3 >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream);
template void update_gpu<uchar4 >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream);
template void update_gpu<ushort >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream);
template void update_gpu<ushort3>(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream);
template void update_gpu<ushort4>(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream);
template void update_gpu<float >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream);
template void update_gpu<float3 >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream);
template void update_gpu<float4 >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream);
}
}}}
|
554c236a9ed2986e7f055a79cb4938a4abe79487.hip | // !!! This is a file automatically generated by hipify!!!
#include <string.h>
#include <stdio.h>
#include <iostream>
//#include <time.h>
//#include <string.h>
//#include <windows.h>
//#include <wincrypt.h> /* CryptAcquireContext, CryptGenRandom */
#include <hip/hip_runtime.h>
//#include <hip/hip_runtime_api.h>
//#include <hiprand/hiprand_kernel.h>
#include <hip/device_functions.h>
#include "device_launch_parameters.h"
#define uint8 unsigned char
#define CONST_WORD_LIMIT 10
#define CONST_CHARSET_LIMIT 100
#define CONST_CHARSET "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
#define CONST_CHARSET_LENGTH (sizeof(CONST_CHARSET) - 1)
#define CONST_WORD_LENGTH_MIN 1
#define CONST_WORD_LENGTH_MAX 8
//#define BCRYPT_HASHSIZE 60
//#define RANDBYTES (16)
#include "assert.cu"
#include "md5.cu"
#include "sha1.cu"
#include "sha256.cu"
#include "keccak.cu"
#include "sha1new.cu"
#include "md5new.cu"
#include "keccakv2.cu"
/* Global variables */
uint8_t g_wordLength;
char g_word[CONST_WORD_LIMIT];
char g_charset[CONST_CHARSET_LIMIT];
char g_cracked[CONST_WORD_LIMIT];
int BLOCKS, THREADS, devices;
__device__ char g_deviceCharset[CONST_CHARSET_LIMIT], g_deviceCracked[CONST_WORD_LIMIT];
__device__ __host__ bool next(uint8_t* length, char* word, uint32_t increment) {
uint32_t idx = 0;
uint32_t add = 0;
while (increment > 0 && idx < CONST_WORD_LIMIT) {
if (idx >= *length && increment > 0) {
increment--;
}
add = increment + word[idx];
word[idx] = add % CONST_CHARSET_LENGTH;
increment = add / CONST_CHARSET_LENGTH;
idx++;
}
if (idx > * length) {
*length = idx;
}
if (idx > CONST_WORD_LENGTH_MAX) {
return false;
}
return true;
}
__device__ __host__ bool compare(uint8 a[], uint8 b[], int len)
{
for (int i = 0; i < len; i++)
{
if (a[i] != b[i])
return false;
}
return true;
}
__global__ void md5Crack(uint8_t wordLength, char* charsetWord, uint32_t hash01, uint32_t hash02, uint32_t hash03, uint32_t hash04) {
uint32_t idx = (blockIdx.x * blockDim.x + threadIdx.x);
__shared__ char sharedCharset[CONST_CHARSET_LIMIT];
char threadCharsetWord[CONST_WORD_LIMIT];
char threadTextWord[CONST_WORD_LIMIT];
uint8_t threadWordLength;
uint32_t threadHash01, threadHash02, threadHash03, threadHash04;
memcpy(threadCharsetWord, charsetWord, CONST_WORD_LIMIT);
memcpy(&threadWordLength, &wordLength, sizeof(uint8_t));
memcpy(sharedCharset, g_deviceCharset, sizeof(uint8_t) * CONST_CHARSET_LIMIT);
next(&threadWordLength, threadCharsetWord, idx);
for (uint32_t i = 0; i < threadWordLength; i++) {
threadTextWord[i] = sharedCharset[threadCharsetWord[i]];
}
md5Hash((unsigned char*)threadTextWord, threadWordLength, &threadHash01, &threadHash02, &threadHash03, &threadHash04);
if (threadHash01 == hash01 && threadHash02 == hash02 && threadHash03 == hash03 && threadHash04 == hash04) {
memcpy(g_deviceCracked, threadTextWord, threadWordLength);
}
if (!next(&threadWordLength, threadCharsetWord, 1)) {
return;
}
}
__global__ void md5Crack2(uint8_t wordLength, char* charsetWord, uint8* origin) {
uint32_t idx = (blockIdx.x * blockDim.x + threadIdx.x);
__shared__ char sharedCharset[CONST_CHARSET_LIMIT];
char threadCharsetWord[CONST_WORD_LIMIT];
uint8 threadTextWord[CONST_WORD_LIMIT], md5sum[16];
uint8_t threadWordLength;
memcpy(threadCharsetWord, charsetWord, CONST_WORD_LIMIT);
memcpy(&threadWordLength, &wordLength, sizeof(uint8_t));
memcpy(sharedCharset, g_deviceCharset, sizeof(uint8_t) * CONST_CHARSET_LIMIT);
next(&threadWordLength, threadCharsetWord, idx);
for (uint32_t i = 0; i < threadWordLength; i++) {
threadTextWord[i] = sharedCharset[threadCharsetWord[i]];
}
md5new(threadTextWord, +wordLength, md5sum);
if (compare(md5sum, origin, 16)) {
memcpy(g_deviceCracked, threadTextWord, wordLength);
}
if (!next(&threadWordLength, threadCharsetWord, 1)) {
return;
}
}
__global__ void sha1Crack(uint8_t wordLength, char* charsetWord, uint32_t hash01, uint32_t hash02, uint32_t hash03, uint32_t hash04, uint32_t hash05) {
uint32_t idx = (blockIdx.x * blockDim.x + threadIdx.x);
__shared__ char sharedCharset[CONST_CHARSET_LIMIT];
char threadCharsetWord[CONST_WORD_LIMIT], threadTextWord[CONST_WORD_LIMIT];
uint8_t threadWordLength;
uint32_t threadHash01, threadHash02, threadHash03, threadHash04, threadHash05;
memcpy(threadCharsetWord, charsetWord, CONST_WORD_LIMIT);
memcpy(&threadWordLength, &wordLength, sizeof(uint8_t));
memcpy(sharedCharset, g_deviceCharset, sizeof(uint8_t) * CONST_CHARSET_LIMIT);
next(&threadWordLength, threadCharsetWord, idx);
for (uint32_t i = 0; i < wordLength; i++) {
threadTextWord[i] = sharedCharset[threadCharsetWord[i]];
}
sha1((unsigned char*)threadTextWord, wordLength, &threadHash01, &threadHash02, &threadHash03, &threadHash04, &threadHash05);
if (threadHash01 == hash01 && threadHash02 == hash02 && threadHash03 == hash03 && threadHash04 == hash04 && threadHash05 == hash05) {
memcpy(g_deviceCracked, threadTextWord, wordLength);
}
if (!next(&threadWordLength, threadCharsetWord, 1)) {
return;
}
}
__global__ void sha1Crack2(uint8_t wordLength, char* charsetWord, uint8* origin) {
uint32_t idx = (blockIdx.x * blockDim.x + threadIdx.x);
__shared__ char sharedCharset[CONST_CHARSET_LIMIT];
char threadCharsetWord[CONST_WORD_LIMIT];
uint8 threadTextWord[CONST_WORD_LIMIT], sha1sum[21];
uint8_t threadWordLength;
memcpy(threadCharsetWord, charsetWord, CONST_WORD_LIMIT);
memcpy(&threadWordLength, &wordLength, sizeof(uint8_t));
memcpy(sharedCharset, g_deviceCharset, sizeof(uint8_t) * CONST_CHARSET_LIMIT);
next(&threadWordLength, threadCharsetWord, idx);
for (uint32_t i = 0; i < wordLength; i++) {
threadTextWord[i] = sharedCharset[threadCharsetWord[i]];
}
//sha1((unsigned char*)threadTextWord, wordLength, &threadHash01, &threadHash02, &threadHash03, &threadHash04, &threadHash05);
sha1new(threadTextWord, +wordLength, sha1sum);
if (compare(sha1sum,origin,20)) {
memcpy(g_deviceCracked, threadTextWord, wordLength);
}
if (!next(&threadWordLength, threadCharsetWord, 1)) {
return;
}
}
__global__ void sha256Crack(uint8_t wordLength, char* charsetWord, uint8* unhexed) {
uint32_t idx = (blockIdx.x * blockDim.x + threadIdx.x);
__shared__ char sharedCharset[CONST_CHARSET_LIMIT];
char threadCharsetWord[CONST_WORD_LIMIT];
uint8 threadTextWord[CONST_WORD_LIMIT], sha256sum[33];
uint8_t threadWordLength;
memcpy(threadCharsetWord, charsetWord, CONST_WORD_LIMIT);
memcpy(&threadWordLength, &wordLength, sizeof(uint8_t));
memcpy(sharedCharset, g_deviceCharset, sizeof(uint8_t) * CONST_CHARSET_LIMIT);
next(&threadWordLength, threadCharsetWord, idx);
for (uint32_t i = 0; i < wordLength; i++) {
threadTextWord[i] = sharedCharset[threadCharsetWord[i]];
}
sha256(threadTextWord, +wordLength, sha256sum);
if (compare(unhexed, sha256sum, 32)){
memcpy(g_deviceCracked, threadTextWord, wordLength);
}
if (!next(&threadWordLength, threadCharsetWord, 1)) {
return;
}
}
void string_to_hex(uint8* msg, size_t msg_sz, char* hex, size_t hex_sz)
{
memset(msg, '\0', msg_sz);
for (int i = 0; i < hex_sz; i += 2)
{
uint8_t msb = (hex[i + 0] <= '9' ? hex[i + 0] - '0' : (hex[i + 0] & 0x5F) - 'A' + 10);
uint8_t lsb = (hex[i + 1] <= '9' ? hex[i + 1] - '0' : (hex[i + 1] & 0x5F) - 'A' + 10);
msg[i / 2] = (msb << 4) | lsb;
}
}
int hash_length(char* hash) {
int count = 0;
for (int i = 0; hash[i] != '\0'; i++)
count++;
return count;
}
//int bcrypt_gensalt(int factor, char salt[BCRYPT_HASHSIZE])
//{
// int fd;
// char input[RANDBYTES];
// int workf;
// char* aux;
//
// // Note: Windows does not have /dev/urandom sadly.
//#ifdef _WIN32 || _WIN64
// HCRYPTPROV p;
// ULONG i;
//
// // Acquire a crypt context for generating random bytes.
// if (CryptAcquireContext(&p, NULL, NULL, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT) == FALSE) {
// return 1;
// }
//
// if (CryptGenRandom(p, RANDBYTES, (BYTE*)input) == FALSE) {
// return 2;
// }
//
// if (CryptReleaseContext(p, 0) == FALSE) {
// return 3;
// }
//#else
// // Get random bytes on Unix/Linux.
// fd = open("/dev/urandom", O_RDONLY);
// if (fd == -1)
// return 1;
//
// if (try_read(fd, input, RANDBYTES) != 0) {
// if (try_close(fd) != 0)
// return 4;
// return 2;
// }
//
// if (try_close(fd) != 0)
// return 3;
//#endif
//
// /* Generate salt. */
// workf = (factor < 4 || factor > 31) ? 12 : factor;
//
// aux = crypt_gensalt_rn("$2a$", workf, input, RANDBYTES,
// salt, BCRYPT_HASHSIZE);
// return (aux == NULL) ? 5 : 0;
//}
int gcd(int a, int b) {
return (a == 0) ? b : gcd(b % a, a);
}
void gpu_init() {
hipDeviceProp_t device_prop;
hipGetDeviceCount(&devices);
if (devices < 1) {
exit(EXIT_FAILURE);
}
if (hipGetDeviceProperties(&device_prop, 0) != hipSuccess) {
exit(EXIT_FAILURE);
}
int max_threads_per_mp = device_prop.maxThreadsPerMultiProcessor;
int block_size = (max_threads_per_mp / gcd(max_threads_per_mp, device_prop.maxThreadsPerBlock));
THREADS = max_threads_per_mp / block_size;
BLOCKS = block_size * device_prop.multiProcessorCount;
//int clock_speed = (int)(device_prop.memoryClockRate * 1000 * 1000);
}
static void test_blake2b_wo_key(char word[4])
{
uint32 test_block_size[] = { 16,32,64 };
uint32 INPUT_SIZE = 4;
uint8* KEY = NULL;
uint32 KEYLEN = 0;
uint32 BLAKE2B_BLOCK_SIZE = test_block_size[1];
// TEST BLAKE2B
/*BYTE blake2b_inp[INPUT_SIZE*1024];
BYTE blake2b_oup[1024*BLAKE2B_BLOCK_SIZE];*/
uint8* blake2b_inp = (uint8*)malloc(INPUT_SIZE * 1024 * sizeof(BYTE));
uint8* blake2b_oup = (uint8*)malloc(1024 * BLAKE2B_BLOCK_SIZE * sizeof(BYTE));
srand(0);
for (unsigned int i = 0; i < 1024 * INPUT_SIZE; i++)
{
blake2b_inp[i] = rand() % 256;
}
// CPU hash
for (int i = 0; i < 1024; i++)
{
keccaknew((unsigned char*)word, INPUT_SIZE, blake2b_oup + BLAKE2B_BLOCK_SIZE * i, BLAKE2B_BLOCK_SIZE << 3);
}
//BYTE blake2b_cu_oup[1024*BLAKE2B_BLOCK_SIZE];
BYTE* blake2b_cu_oup = (BYTE*)malloc(1024 * BLAKE2B_BLOCK_SIZE * sizeof(BYTE));
keccaknew((unsigned char*)word, INPUT_SIZE, blake2b_cu_oup, BLAKE2B_BLOCK_SIZE << 3);
if (memcmp(blake2b_oup, blake2b_cu_oup, BLAKE2B_BLOCK_SIZE * 1024) != 0)
{
printf("Failed test BLAKE2B no key, len %u \n", BLAKE2B_BLOCK_SIZE);
}
else
{
printf("Passed test BLAKE2B no key, len %u \n", BLAKE2B_BLOCK_SIZE);
}
free(blake2b_inp);
free(blake2b_oup);
free(blake2b_cu_oup);
}
char* phex(const void* p, size_t n)
{
const unsigned char* cp = (unsigned char*) p; /* Access as bytes. */
char* s = (char*) malloc(2 * n + 1); /* 2*n hex digits, plus NUL. */
size_t k;
/*
* Just in case - if allocation failed.
*/
if (s == NULL)
return s;
for (k = 0; k < n; ++k) {
/*
* Convert one byte of data into two hex-digit characters.
*/
sprintf(s + 2 * k, "%02X", cp[k]);
}
/*
* Terminate the string with a NUL character.
*/
s[2 * n] = '\0';
return s;
}
__device__ __host__ void makedigits(unsigned char x, unsigned char(&digits)[2])
{
unsigned char d0 = x / 16;
digits[1] = x - d0 * 16;
unsigned char d1 = d0 / 16;
digits[0] = d0 - d1 * 16;
}
__device__ __host__ void makehex(unsigned char(&digits)[2], char(&hex)[2])
{
for (int i = 0; i < 2; ++i) {
if (digits[i] < 10) {
hex[i] = '0' + digits[i];
}
else {
hex[i] = 'a' + (digits[i] - 10);
}
}
}
__device__ __host__ void hex_to_string(unsigned char* input, char* output, int size)
{
for (int i = 0; i < 16; ++i) {
unsigned char val = input[i];
unsigned char d[2];
char h[2];
makedigits(val, d);
makehex(d, h);
output[2 * i] = h[0];
output[2 * i + 1] = h[1];
}
}
int main(int argc, char* argv[]) {
/*char* w = new char[4];
w[0] = 'k';
w[1] = 'i';
w[2] = 's';
w[3] = 'a';
test_blake2b_wo_key(w);*/
char* hash;// = "e9d71f5ee7c92d6dc9e92ffdad17b8bd49418f98";
/* unsigned char* sha3hash = new unsigned char[64];
char* word = "kisa";
keccak(word, 4, sha3hash, 64);
for (int i = 0; i < 100; ++i)
std::cout << std::hex << (int)sha3hash[i];
std::cout << std::endl;
std::cout << "15: " << std::hex << 15;*/
/*char* testhash = "775a2dae52c3ef080726dd427f81bd482fdf96b5";
char* word = "zzzz";
unsigned char* sha1hash = new unsigned char[41];
sha1new((unsigned char*)word, 4, sha1hash);
uint8 sha1Unhexed[21];
hex_to_string(sha1Unhexed, 20, testhash, 40);
for (int i = 0; i < 20; i++)
{
if (sha1Unhexed[i] != sha1hash[i])
std::cout << "not equal" << std::endl;
}*/
char* testhash = "1c0d894f6f6ab511099a568f6e876c2f";
char* word = "kisa";
unsigned char* md5Hash = new unsigned char[32];
md5new((unsigned char*)word, 4, md5Hash);
uint8 md5unh[16];
string_to_hex(md5unh, 16, testhash, 32);
for (int i = 0; i < 16; i++)
{
if (md5unh[i] != md5Hash[i]);
//std::cout << "not equal" << std::endl;
}
char str[16][2];
char* new_word = (char*) malloc(sizeof(char)*32);
for (int j = 0; j < 16; ++j) {
sprintf(str[j], "%02x", md5Hash[j]);
}
for(int i = 0; i < 16; i++)
std::cout << str[i][0] << str[i][1];
std::cout << std::endl;
unsigned char* d_md5Hash;
hipMalloc(&d_md5Hash, 32 * sizeof(unsigned char));
hipMemcpy(d_md5Hash, &md5Hash[0], 32 * sizeof(unsigned char),
hipMemcpyHostToDevice);
char* d_str1;
hipMalloc(&d_str1, 32 * sizeof(char));
char str1[32];
//kernel<<<1,1>>>(d_md5Hash, d_str1);
hipMemcpy(&str1[0], d_str1, 32 * sizeof(char), hipMemcpyDeviceToHost);
for (int i = 0; i < 32; i += 2) std::cout << str1[i] << str1[i + 1];
std::cout << std::endl;
/*bool next_ = true;
int k = 0;
int m = 0;
while (next_) {
new_word[k] = str[m][0];
new_word[k + 1] = str[m][1];
k += 2;
m++;
if (k >= 32)
next_ = false;
}
*/
for (int i = 0; i < 16; i++) {
new_word[2 * i] = str[i][0];
new_word[2 * i + 1] = str[i][1];
}
for (int i = 0; i < 32; i++)
std::cout << new_word[i];
std::cout << std::endl;
unsigned char* md5Hash2 = new unsigned char[32];
md5new((unsigned char*)new_word, 32, md5Hash2);
for (int i = 0; i < 16; i++)
{
if (md5unh[i] != md5Hash2[i]);
//std::cout << "not equal" << std::endl;
}
char str2[16][1];
for (int j = 0; j < 16; ++j) {
//printf("%02x", md5Hashh[j]);
sprintf(str2[j], "%02x", md5Hash2[j]);
std::cout << str2[j];
}
/* Check arguments */
if (argc != 2) {
std::cout << "Need hash password. Now arguments count: " << argc << std::endl;
return -1;
}
else {
hash = argv[1];
std::cout << "Set hash [" << hash << "]" << std::endl;
}
int hash_size = hash_length(hash);
gpu_init();
hipGetDeviceCount(&devices);
/* Sync type */
ERROR_CHECK(hipSetDeviceFlags(hipDeviceScheduleSpin));
/* Display amount of devices */
std::cout << "|**********************/" << std::endl;
std::cout << "| " << devices << " device(s) found" << std::endl;
std::cout << "| " << BLOCKS << " blocks found" << std::endl;
std::cout << "| " << THREADS << " threads found" << std::endl;
std::cout << "|**********************/" << std::endl;
//uint32_t md5Hash[4];
uint32_t sha1Hash[5];
uint8 sha256Unhexed[33];
uint8* unh;
uint8 sha1Unh[21];
uint8* sha1_;
uint8 md5Unh[16];
uint8* md5_;
switch (hash_size) {
case 32:
/* Parse argument (md5) */
std::cout << "It's a MD5" << std::endl;
for (uint8_t i = 0; i < 4; i++) {
char tmp[16];
strncpy(tmp, hash + i * 8, 8);
sscanf(tmp, "%x", &md5Hash[i]);
md5Hash[i] = (md5Hash[i] & 0xFF000000) >> 24 | (md5Hash[i] & 0x00FF0000) >> 8 | (md5Hash[i] & 0x0000FF00) << 8 | (md5Hash[i] & 0x000000FF) << 24;
}
memset(md5Unh, 0, 16);
hex_to_string(md5Unh, 16, hash, 32);
hipMalloc((char**)&md5_, sizeof(char) * 16);
hipMemcpy(md5_, md5Unh, sizeof(char) * 16, hipMemcpyHostToDevice);
break;
case 40:
/* Parse argument (sha1) */
std::cout << "It's a SHA1" << std::endl;
/*char tmp[40];
for (int i = 0; i < 5; i++)
{
for (int j = 0; j < 8; j++)
tmp[j] = hash[i * 8 + j];
sha1Hash[i] = (uint32_t)strtoll(tmp, NULL, 16);
}*/
memset(sha1Unh, 0, 21);
hex_to_string(sha1Unh, 20, hash, 40);
hipMalloc((char**)&sha1_, sizeof(char) * 20);
hipMemcpy(sha1_, sha1Unh, sizeof(char) * 20, hipMemcpyHostToDevice);
break;
case 64:
/* Parse argument (sha256) */
std::cout << "It's a SHA256" << std::endl;
memset(sha256Unhexed, 0, 33);
hex_to_string(sha256Unhexed, 32, hash, 64);
hipMalloc((char**)&unh, sizeof(char) * 32);
hipMemcpy(unh, sha256Unhexed, sizeof(char) * 32, hipMemcpyHostToDevice);
break;
default:
std::cout << "Wrong hash length" << std::endl;
return -1;
}
/* Fill memory */
memset(g_word, 0, CONST_WORD_LIMIT);
memset(g_cracked, 0, CONST_WORD_LIMIT);
memcpy(g_charset, CONST_CHARSET, CONST_CHARSET_LENGTH);
/* Current word length = minimum word length */
g_wordLength = CONST_WORD_LENGTH_MIN;
/* Main device */
hipSetDevice(0);
/* Timers */
hipEvent_t clockBegin;
hipEvent_t clockLast;
hipEventCreate(&clockBegin, hipEventDefault);
hipEventCreate(&clockLast, hipEventDefault);
hipEventRecord(clockBegin, 0);
/* Current word is different on each device */
char** words = new char* [devices];
for (int device = 0; device < devices; device++) {
hipSetDevice(device);
/* Copy to each device */
ERROR_CHECK(hipMemcpyToSymbol(g_deviceCharset, g_charset, sizeof(uint8_t) * CONST_CHARSET_LIMIT, 0, hipMemcpyHostToDevice));
ERROR_CHECK(hipMemcpyToSymbol(g_deviceCracked, g_cracked, sizeof(uint8_t) * CONST_WORD_LIMIT, 0, hipMemcpyHostToDevice));
/* Allocate on each device */
ERROR_CHECK(hipMalloc((void**)&words[device], sizeof(uint8_t) * CONST_WORD_LIMIT));
}
int later = 0;
while (true) {
bool result = false;
bool found = false;
for (int device = 0; device < devices; device++) {
hipSetDevice(device);
/* Copy current data */
ERROR_CHECK(hipMemcpy(words[device], g_word, sizeof(uint8_t) * CONST_WORD_LIMIT, hipMemcpyHostToDevice));
/* Start kernel */
switch (hash_size) {
case 32:
//md5Crack <<< BLOCKS, THREADS >>> (g_wordLength, words[device], md5Hash[0], md5Hash[1],
// md5Hash[2], md5Hash[3]);
md5Crack2 << < BLOCKS, THREADS >> > (g_wordLength, words[device], md5_);
break;
case 40:
//sha1Crack <<< BLOCKS, THREADS >>> (g_wordLength, words[device], sha1Hash[0], sha1Hash[1],
// sha1Hash[2], sha1Hash[3], sha1Hash[4]);
sha1Crack2 << <BLOCKS, THREADS >> > (g_wordLength, words[device], sha1_);
break;
case 64:
hipLaunchKernelGGL(( sha256Crack) , dim3(BLOCKS), dim3(THREADS) , 0, 0, g_wordLength, words[device], unh);
break;
default:
std::cout << "Error when start __global__";
break;
}
/* Global increment */
result = next(&g_wordLength, g_word, BLOCKS * THREADS);
}
///* Display progress */
//char word[CONST_WORD_LIMIT];
//for (int i = 0; i < g_wordLength; i++) {
// word[i] = g_charset[g_word[i]];
//}
if (later != (uint32_t)g_wordLength) {
std::cout << "(" << (uint32_t)g_wordLength << ")" << std::endl;
later = (uint32_t)g_wordLength;
}
//std::cout << "currently at " << std::string(word, g_wordLength) << " (" << (uint32_t)g_wordLength << ")" << std::endl;
for (int device = 0; device < devices; device++) {
hipSetDevice(device);
hipDeviceSynchronize();
ERROR_CHECK(hipMemcpyFromSymbol(g_cracked, g_deviceCracked, sizeof(uint8_t) * CONST_WORD_LIMIT));
if (found = *g_cracked != 0) {
std::cout << "cracked " << g_cracked << std::endl;
break;
}
}
if (!result || found) {
if (!result && !found) {
std::cout << "found nothing (host)" << std::endl;
}
break;
}
}
for (int device = 0; device < devices; device++) {
hipSetDevice(device);
hipFree((void**)words[device]);
}
delete[] words;
hipSetDevice(0);
float milliseconds = 0;
hipEventRecord(clockLast, 0);
hipEventSynchronize(clockLast);
hipEventElapsedTime(&milliseconds, clockBegin, clockLast);
std::cout << "Computation time " << milliseconds << " ms" << std::endl;
hipEventDestroy(clockBegin);
hipEventDestroy(clockLast);
}
| 554c236a9ed2986e7f055a79cb4938a4abe79487.cu | #include <string.h>
#include <stdio.h>
#include <iostream>
//#include <time.h>
//#include <string.h>
//#include <windows.h>
//#include <wincrypt.h> /* CryptAcquireContext, CryptGenRandom */
#include <cuda_runtime.h>
//#include <cuda_runtime_api.h>
//#include <curand_kernel.h>
#include <device_functions.h>
#include "device_launch_parameters.h"
#define uint8 unsigned char
#define CONST_WORD_LIMIT 10
#define CONST_CHARSET_LIMIT 100
#define CONST_CHARSET "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
#define CONST_CHARSET_LENGTH (sizeof(CONST_CHARSET) - 1)
#define CONST_WORD_LENGTH_MIN 1
#define CONST_WORD_LENGTH_MAX 8
//#define BCRYPT_HASHSIZE 60
//#define RANDBYTES (16)
#include "assert.cu"
#include "md5.cu"
#include "sha1.cu"
#include "sha256.cu"
#include "keccak.cu"
#include "sha1new.cu"
#include "md5new.cu"
#include "keccakv2.cu"
/* Global variables */
uint8_t g_wordLength;
char g_word[CONST_WORD_LIMIT];
char g_charset[CONST_CHARSET_LIMIT];
char g_cracked[CONST_WORD_LIMIT];
int BLOCKS, THREADS, devices;
__device__ char g_deviceCharset[CONST_CHARSET_LIMIT], g_deviceCracked[CONST_WORD_LIMIT];
__device__ __host__ bool next(uint8_t* length, char* word, uint32_t increment) {
uint32_t idx = 0;
uint32_t add = 0;
while (increment > 0 && idx < CONST_WORD_LIMIT) {
if (idx >= *length && increment > 0) {
increment--;
}
add = increment + word[idx];
word[idx] = add % CONST_CHARSET_LENGTH;
increment = add / CONST_CHARSET_LENGTH;
idx++;
}
if (idx > * length) {
*length = idx;
}
if (idx > CONST_WORD_LENGTH_MAX) {
return false;
}
return true;
}
__device__ __host__ bool compare(uint8 a[], uint8 b[], int len)
{
for (int i = 0; i < len; i++)
{
if (a[i] != b[i])
return false;
}
return true;
}
__global__ void md5Crack(uint8_t wordLength, char* charsetWord, uint32_t hash01, uint32_t hash02, uint32_t hash03, uint32_t hash04) {
uint32_t idx = (blockIdx.x * blockDim.x + threadIdx.x);
__shared__ char sharedCharset[CONST_CHARSET_LIMIT];
char threadCharsetWord[CONST_WORD_LIMIT];
char threadTextWord[CONST_WORD_LIMIT];
uint8_t threadWordLength;
uint32_t threadHash01, threadHash02, threadHash03, threadHash04;
memcpy(threadCharsetWord, charsetWord, CONST_WORD_LIMIT);
memcpy(&threadWordLength, &wordLength, sizeof(uint8_t));
memcpy(sharedCharset, g_deviceCharset, sizeof(uint8_t) * CONST_CHARSET_LIMIT);
next(&threadWordLength, threadCharsetWord, idx);
for (uint32_t i = 0; i < threadWordLength; i++) {
threadTextWord[i] = sharedCharset[threadCharsetWord[i]];
}
md5Hash((unsigned char*)threadTextWord, threadWordLength, &threadHash01, &threadHash02, &threadHash03, &threadHash04);
if (threadHash01 == hash01 && threadHash02 == hash02 && threadHash03 == hash03 && threadHash04 == hash04) {
memcpy(g_deviceCracked, threadTextWord, threadWordLength);
}
if (!next(&threadWordLength, threadCharsetWord, 1)) {
return;
}
}
__global__ void md5Crack2(uint8_t wordLength, char* charsetWord, uint8* origin) {
uint32_t idx = (blockIdx.x * blockDim.x + threadIdx.x);
__shared__ char sharedCharset[CONST_CHARSET_LIMIT];
char threadCharsetWord[CONST_WORD_LIMIT];
uint8 threadTextWord[CONST_WORD_LIMIT], md5sum[16];
uint8_t threadWordLength;
memcpy(threadCharsetWord, charsetWord, CONST_WORD_LIMIT);
memcpy(&threadWordLength, &wordLength, sizeof(uint8_t));
memcpy(sharedCharset, g_deviceCharset, sizeof(uint8_t) * CONST_CHARSET_LIMIT);
next(&threadWordLength, threadCharsetWord, idx);
for (uint32_t i = 0; i < threadWordLength; i++) {
threadTextWord[i] = sharedCharset[threadCharsetWord[i]];
}
md5new(threadTextWord, +wordLength, md5sum);
if (compare(md5sum, origin, 16)) {
memcpy(g_deviceCracked, threadTextWord, wordLength);
}
if (!next(&threadWordLength, threadCharsetWord, 1)) {
return;
}
}
__global__ void sha1Crack(uint8_t wordLength, char* charsetWord, uint32_t hash01, uint32_t hash02, uint32_t hash03, uint32_t hash04, uint32_t hash05) {
uint32_t idx = (blockIdx.x * blockDim.x + threadIdx.x);
__shared__ char sharedCharset[CONST_CHARSET_LIMIT];
char threadCharsetWord[CONST_WORD_LIMIT], threadTextWord[CONST_WORD_LIMIT];
uint8_t threadWordLength;
uint32_t threadHash01, threadHash02, threadHash03, threadHash04, threadHash05;
memcpy(threadCharsetWord, charsetWord, CONST_WORD_LIMIT);
memcpy(&threadWordLength, &wordLength, sizeof(uint8_t));
memcpy(sharedCharset, g_deviceCharset, sizeof(uint8_t) * CONST_CHARSET_LIMIT);
next(&threadWordLength, threadCharsetWord, idx);
for (uint32_t i = 0; i < wordLength; i++) {
threadTextWord[i] = sharedCharset[threadCharsetWord[i]];
}
sha1((unsigned char*)threadTextWord, wordLength, &threadHash01, &threadHash02, &threadHash03, &threadHash04, &threadHash05);
if (threadHash01 == hash01 && threadHash02 == hash02 && threadHash03 == hash03 && threadHash04 == hash04 && threadHash05 == hash05) {
memcpy(g_deviceCracked, threadTextWord, wordLength);
}
if (!next(&threadWordLength, threadCharsetWord, 1)) {
return;
}
}
__global__ void sha1Crack2(uint8_t wordLength, char* charsetWord, uint8* origin) {
uint32_t idx = (blockIdx.x * blockDim.x + threadIdx.x);
__shared__ char sharedCharset[CONST_CHARSET_LIMIT];
char threadCharsetWord[CONST_WORD_LIMIT];
uint8 threadTextWord[CONST_WORD_LIMIT], sha1sum[21];
uint8_t threadWordLength;
memcpy(threadCharsetWord, charsetWord, CONST_WORD_LIMIT);
memcpy(&threadWordLength, &wordLength, sizeof(uint8_t));
memcpy(sharedCharset, g_deviceCharset, sizeof(uint8_t) * CONST_CHARSET_LIMIT);
next(&threadWordLength, threadCharsetWord, idx);
for (uint32_t i = 0; i < wordLength; i++) {
threadTextWord[i] = sharedCharset[threadCharsetWord[i]];
}
//sha1((unsigned char*)threadTextWord, wordLength, &threadHash01, &threadHash02, &threadHash03, &threadHash04, &threadHash05);
sha1new(threadTextWord, +wordLength, sha1sum);
if (compare(sha1sum,origin,20)) {
memcpy(g_deviceCracked, threadTextWord, wordLength);
}
if (!next(&threadWordLength, threadCharsetWord, 1)) {
return;
}
}
__global__ void sha256Crack(uint8_t wordLength, char* charsetWord, uint8* unhexed) {
uint32_t idx = (blockIdx.x * blockDim.x + threadIdx.x);
__shared__ char sharedCharset[CONST_CHARSET_LIMIT];
char threadCharsetWord[CONST_WORD_LIMIT];
uint8 threadTextWord[CONST_WORD_LIMIT], sha256sum[33];
uint8_t threadWordLength;
memcpy(threadCharsetWord, charsetWord, CONST_WORD_LIMIT);
memcpy(&threadWordLength, &wordLength, sizeof(uint8_t));
memcpy(sharedCharset, g_deviceCharset, sizeof(uint8_t) * CONST_CHARSET_LIMIT);
next(&threadWordLength, threadCharsetWord, idx);
for (uint32_t i = 0; i < wordLength; i++) {
threadTextWord[i] = sharedCharset[threadCharsetWord[i]];
}
sha256(threadTextWord, +wordLength, sha256sum);
if (compare(unhexed, sha256sum, 32)){
memcpy(g_deviceCracked, threadTextWord, wordLength);
}
if (!next(&threadWordLength, threadCharsetWord, 1)) {
return;
}
}
void string_to_hex(uint8* msg, size_t msg_sz, char* hex, size_t hex_sz)
{
memset(msg, '\0', msg_sz);
for (int i = 0; i < hex_sz; i += 2)
{
uint8_t msb = (hex[i + 0] <= '9' ? hex[i + 0] - '0' : (hex[i + 0] & 0x5F) - 'A' + 10);
uint8_t lsb = (hex[i + 1] <= '9' ? hex[i + 1] - '0' : (hex[i + 1] & 0x5F) - 'A' + 10);
msg[i / 2] = (msb << 4) | lsb;
}
}
int hash_length(char* hash) {
int count = 0;
for (int i = 0; hash[i] != '\0'; i++)
count++;
return count;
}
//int bcrypt_gensalt(int factor, char salt[BCRYPT_HASHSIZE])
//{
// int fd;
// char input[RANDBYTES];
// int workf;
// char* aux;
//
// // Note: Windows does not have /dev/urandom sadly.
//#ifdef _WIN32 || _WIN64
// HCRYPTPROV p;
// ULONG i;
//
// // Acquire a crypt context for generating random bytes.
// if (CryptAcquireContext(&p, NULL, NULL, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT) == FALSE) {
// return 1;
// }
//
// if (CryptGenRandom(p, RANDBYTES, (BYTE*)input) == FALSE) {
// return 2;
// }
//
// if (CryptReleaseContext(p, 0) == FALSE) {
// return 3;
// }
//#else
// // Get random bytes on Unix/Linux.
// fd = open("/dev/urandom", O_RDONLY);
// if (fd == -1)
// return 1;
//
// if (try_read(fd, input, RANDBYTES) != 0) {
// if (try_close(fd) != 0)
// return 4;
// return 2;
// }
//
// if (try_close(fd) != 0)
// return 3;
//#endif
//
// /* Generate salt. */
// workf = (factor < 4 || factor > 31) ? 12 : factor;
//
// aux = crypt_gensalt_rn("$2a$", workf, input, RANDBYTES,
// salt, BCRYPT_HASHSIZE);
// return (aux == NULL) ? 5 : 0;
//}
int gcd(int a, int b) {
return (a == 0) ? b : gcd(b % a, a);
}
void gpu_init() {
cudaDeviceProp device_prop;
cudaGetDeviceCount(&devices);
if (devices < 1) {
exit(EXIT_FAILURE);
}
if (cudaGetDeviceProperties(&device_prop, 0) != cudaSuccess) {
exit(EXIT_FAILURE);
}
int max_threads_per_mp = device_prop.maxThreadsPerMultiProcessor;
int block_size = (max_threads_per_mp / gcd(max_threads_per_mp, device_prop.maxThreadsPerBlock));
THREADS = max_threads_per_mp / block_size;
BLOCKS = block_size * device_prop.multiProcessorCount;
//int clock_speed = (int)(device_prop.memoryClockRate * 1000 * 1000);
}
static void test_blake2b_wo_key(char word[4])
{
uint32 test_block_size[] = { 16,32,64 };
uint32 INPUT_SIZE = 4;
uint8* KEY = NULL;
uint32 KEYLEN = 0;
uint32 BLAKE2B_BLOCK_SIZE = test_block_size[1];
// TEST BLAKE2B
/*BYTE blake2b_inp[INPUT_SIZE*1024];
BYTE blake2b_oup[1024*BLAKE2B_BLOCK_SIZE];*/
uint8* blake2b_inp = (uint8*)malloc(INPUT_SIZE * 1024 * sizeof(BYTE));
uint8* blake2b_oup = (uint8*)malloc(1024 * BLAKE2B_BLOCK_SIZE * sizeof(BYTE));
srand(0);
for (unsigned int i = 0; i < 1024 * INPUT_SIZE; i++)
{
blake2b_inp[i] = rand() % 256;
}
// CPU hash
for (int i = 0; i < 1024; i++)
{
keccaknew((unsigned char*)word, INPUT_SIZE, blake2b_oup + BLAKE2B_BLOCK_SIZE * i, BLAKE2B_BLOCK_SIZE << 3);
}
//BYTE blake2b_cu_oup[1024*BLAKE2B_BLOCK_SIZE];
BYTE* blake2b_cu_oup = (BYTE*)malloc(1024 * BLAKE2B_BLOCK_SIZE * sizeof(BYTE));
keccaknew((unsigned char*)word, INPUT_SIZE, blake2b_cu_oup, BLAKE2B_BLOCK_SIZE << 3);
if (memcmp(blake2b_oup, blake2b_cu_oup, BLAKE2B_BLOCK_SIZE * 1024) != 0)
{
printf("Failed test BLAKE2B no key, len %u \n", BLAKE2B_BLOCK_SIZE);
}
else
{
printf("Passed test BLAKE2B no key, len %u \n", BLAKE2B_BLOCK_SIZE);
}
free(blake2b_inp);
free(blake2b_oup);
free(blake2b_cu_oup);
}
char* phex(const void* p, size_t n)
{
const unsigned char* cp = (unsigned char*) p; /* Access as bytes. */
char* s = (char*) malloc(2 * n + 1); /* 2*n hex digits, plus NUL. */
size_t k;
/*
* Just in case - if allocation failed.
*/
if (s == NULL)
return s;
for (k = 0; k < n; ++k) {
/*
* Convert one byte of data into two hex-digit characters.
*/
sprintf(s + 2 * k, "%02X", cp[k]);
}
/*
* Terminate the string with a NUL character.
*/
s[2 * n] = '\0';
return s;
}
__device__ __host__ void makedigits(unsigned char x, unsigned char(&digits)[2])
{
unsigned char d0 = x / 16;
digits[1] = x - d0 * 16;
unsigned char d1 = d0 / 16;
digits[0] = d0 - d1 * 16;
}
__device__ __host__ void makehex(unsigned char(&digits)[2], char(&hex)[2])
{
for (int i = 0; i < 2; ++i) {
if (digits[i] < 10) {
hex[i] = '0' + digits[i];
}
else {
hex[i] = 'a' + (digits[i] - 10);
}
}
}
__device__ __host__ void hex_to_string(unsigned char* input, char* output, int size)
{
for (int i = 0; i < 16; ++i) {
unsigned char val = input[i];
unsigned char d[2];
char h[2];
makedigits(val, d);
makehex(d, h);
output[2 * i] = h[0];
output[2 * i + 1] = h[1];
}
}
int main(int argc, char* argv[]) {
/*char* w = new char[4];
w[0] = 'k';
w[1] = 'i';
w[2] = 's';
w[3] = 'a';
test_blake2b_wo_key(w);*/
char* hash;// = "e9d71f5ee7c92d6dc9e92ffdad17b8bd49418f98";
/* unsigned char* sha3hash = new unsigned char[64];
char* word = "kisa";
keccak(word, 4, sha3hash, 64);
for (int i = 0; i < 100; ++i)
std::cout << std::hex << (int)sha3hash[i];
std::cout << std::endl;
std::cout << "15: " << std::hex << 15;*/
/*char* testhash = "775a2dae52c3ef080726dd427f81bd482fdf96b5";
char* word = "zzzz";
unsigned char* sha1hash = new unsigned char[41];
sha1new((unsigned char*)word, 4, sha1hash);
uint8 sha1Unhexed[21];
hex_to_string(sha1Unhexed, 20, testhash, 40);
for (int i = 0; i < 20; i++)
{
if (sha1Unhexed[i] != sha1hash[i])
std::cout << "not equal" << std::endl;
}*/
char* testhash = "1c0d894f6f6ab511099a568f6e876c2f";
char* word = "kisa";
unsigned char* md5Hash = new unsigned char[32];
md5new((unsigned char*)word, 4, md5Hash);
uint8 md5unh[16];
string_to_hex(md5unh, 16, testhash, 32);
for (int i = 0; i < 16; i++)
{
if (md5unh[i] != md5Hash[i]);
//std::cout << "not equal" << std::endl;
}
char str[16][2];
char* new_word = (char*) malloc(sizeof(char)*32);
for (int j = 0; j < 16; ++j) {
sprintf(str[j], "%02x", md5Hash[j]);
}
for(int i = 0; i < 16; i++)
std::cout << str[i][0] << str[i][1];
std::cout << std::endl;
unsigned char* d_md5Hash;
cudaMalloc(&d_md5Hash, 32 * sizeof(unsigned char));
cudaMemcpy(d_md5Hash, &md5Hash[0], 32 * sizeof(unsigned char),
cudaMemcpyHostToDevice);
char* d_str1;
cudaMalloc(&d_str1, 32 * sizeof(char));
char str1[32];
//kernel<<<1,1>>>(d_md5Hash, d_str1);
cudaMemcpy(&str1[0], d_str1, 32 * sizeof(char), cudaMemcpyDeviceToHost);
for (int i = 0; i < 32; i += 2) std::cout << str1[i] << str1[i + 1];
std::cout << std::endl;
/*bool next_ = true;
int k = 0;
int m = 0;
while (next_) {
new_word[k] = str[m][0];
new_word[k + 1] = str[m][1];
k += 2;
m++;
if (k >= 32)
next_ = false;
}
*/
for (int i = 0; i < 16; i++) {
new_word[2 * i] = str[i][0];
new_word[2 * i + 1] = str[i][1];
}
for (int i = 0; i < 32; i++)
std::cout << new_word[i];
std::cout << std::endl;
unsigned char* md5Hash2 = new unsigned char[32];
md5new((unsigned char*)new_word, 32, md5Hash2);
for (int i = 0; i < 16; i++)
{
if (md5unh[i] != md5Hash2[i]);
//std::cout << "not equal" << std::endl;
}
char str2[16][1];
for (int j = 0; j < 16; ++j) {
//printf("%02x", md5Hashh[j]);
sprintf(str2[j], "%02x", md5Hash2[j]);
std::cout << str2[j];
}
/* Check arguments */
if (argc != 2) {
std::cout << "Need hash password. Now arguments count: " << argc << std::endl;
return -1;
}
else {
hash = argv[1];
std::cout << "Set hash [" << hash << "]" << std::endl;
}
int hash_size = hash_length(hash);
gpu_init();
cudaGetDeviceCount(&devices);
/* Sync type */
ERROR_CHECK(cudaSetDeviceFlags(cudaDeviceScheduleSpin));
/* Display amount of devices */
std::cout << "|**********************/" << std::endl;
std::cout << "| " << devices << " device(s) found" << std::endl;
std::cout << "| " << BLOCKS << " blocks found" << std::endl;
std::cout << "| " << THREADS << " threads found" << std::endl;
std::cout << "|**********************/" << std::endl;
//uint32_t md5Hash[4];
uint32_t sha1Hash[5];
uint8 sha256Unhexed[33];
uint8* unh;
uint8 sha1Unh[21];
uint8* sha1_;
uint8 md5Unh[16];
uint8* md5_;
switch (hash_size) {
case 32:
/* Parse argument (md5) */
std::cout << "It's a MD5" << std::endl;
for (uint8_t i = 0; i < 4; i++) {
char tmp[16];
strncpy(tmp, hash + i * 8, 8);
sscanf(tmp, "%x", &md5Hash[i]);
md5Hash[i] = (md5Hash[i] & 0xFF000000) >> 24 | (md5Hash[i] & 0x00FF0000) >> 8 | (md5Hash[i] & 0x0000FF00) << 8 | (md5Hash[i] & 0x000000FF) << 24;
}
memset(md5Unh, 0, 16);
hex_to_string(md5Unh, 16, hash, 32);
cudaMalloc((char**)&md5_, sizeof(char) * 16);
cudaMemcpy(md5_, md5Unh, sizeof(char) * 16, cudaMemcpyHostToDevice);
break;
case 40:
/* Parse argument (sha1) */
std::cout << "It's a SHA1" << std::endl;
/*char tmp[40];
for (int i = 0; i < 5; i++)
{
for (int j = 0; j < 8; j++)
tmp[j] = hash[i * 8 + j];
sha1Hash[i] = (uint32_t)strtoll(tmp, NULL, 16);
}*/
memset(sha1Unh, 0, 21);
hex_to_string(sha1Unh, 20, hash, 40);
cudaMalloc((char**)&sha1_, sizeof(char) * 20);
cudaMemcpy(sha1_, sha1Unh, sizeof(char) * 20, cudaMemcpyHostToDevice);
break;
case 64:
/* Parse argument (sha256) */
std::cout << "It's a SHA256" << std::endl;
memset(sha256Unhexed, 0, 33);
hex_to_string(sha256Unhexed, 32, hash, 64);
cudaMalloc((char**)&unh, sizeof(char) * 32);
cudaMemcpy(unh, sha256Unhexed, sizeof(char) * 32, cudaMemcpyHostToDevice);
break;
default:
std::cout << "Wrong hash length" << std::endl;
return -1;
}
/* Fill memory */
memset(g_word, 0, CONST_WORD_LIMIT);
memset(g_cracked, 0, CONST_WORD_LIMIT);
memcpy(g_charset, CONST_CHARSET, CONST_CHARSET_LENGTH);
/* Current word length = minimum word length */
g_wordLength = CONST_WORD_LENGTH_MIN;
/* Main device */
cudaSetDevice(0);
/* Timers */
cudaEvent_t clockBegin;
cudaEvent_t clockLast;
cudaEventCreate(&clockBegin, cudaEventDefault);
cudaEventCreate(&clockLast, cudaEventDefault);
cudaEventRecord(clockBegin, 0);
/* Current word is different on each device */
char** words = new char* [devices];
for (int device = 0; device < devices; device++) {
cudaSetDevice(device);
/* Copy to each device */
ERROR_CHECK(cudaMemcpyToSymbol(g_deviceCharset, g_charset, sizeof(uint8_t) * CONST_CHARSET_LIMIT, 0, cudaMemcpyHostToDevice));
ERROR_CHECK(cudaMemcpyToSymbol(g_deviceCracked, g_cracked, sizeof(uint8_t) * CONST_WORD_LIMIT, 0, cudaMemcpyHostToDevice));
/* Allocate on each device */
ERROR_CHECK(cudaMalloc((void**)&words[device], sizeof(uint8_t) * CONST_WORD_LIMIT));
}
int later = 0;
while (true) {
bool result = false;
bool found = false;
for (int device = 0; device < devices; device++) {
cudaSetDevice(device);
/* Copy current data */
ERROR_CHECK(cudaMemcpy(words[device], g_word, sizeof(uint8_t) * CONST_WORD_LIMIT, cudaMemcpyHostToDevice));
/* Start kernel */
switch (hash_size) {
case 32:
//md5Crack <<< BLOCKS, THREADS >>> (g_wordLength, words[device], md5Hash[0], md5Hash[1],
// md5Hash[2], md5Hash[3]);
md5Crack2 << < BLOCKS, THREADS >> > (g_wordLength, words[device], md5_);
break;
case 40:
//sha1Crack <<< BLOCKS, THREADS >>> (g_wordLength, words[device], sha1Hash[0], sha1Hash[1],
// sha1Hash[2], sha1Hash[3], sha1Hash[4]);
sha1Crack2 << <BLOCKS, THREADS >> > (g_wordLength, words[device], sha1_);
break;
case 64:
sha256Crack <<< BLOCKS, THREADS >>> (g_wordLength, words[device], unh);
break;
default:
std::cout << "Error when start __global__";
break;
}
/* Global increment */
result = next(&g_wordLength, g_word, BLOCKS * THREADS);
}
///* Display progress */
//char word[CONST_WORD_LIMIT];
//for (int i = 0; i < g_wordLength; i++) {
// word[i] = g_charset[g_word[i]];
//}
if (later != (uint32_t)g_wordLength) {
std::cout << "(" << (uint32_t)g_wordLength << ")" << std::endl;
later = (uint32_t)g_wordLength;
}
//std::cout << "currently at " << std::string(word, g_wordLength) << " (" << (uint32_t)g_wordLength << ")" << std::endl;
for (int device = 0; device < devices; device++) {
cudaSetDevice(device);
cudaDeviceSynchronize();
ERROR_CHECK(cudaMemcpyFromSymbol(g_cracked, g_deviceCracked, sizeof(uint8_t) * CONST_WORD_LIMIT));
if (found = *g_cracked != 0) {
std::cout << "cracked " << g_cracked << std::endl;
break;
}
}
if (!result || found) {
if (!result && !found) {
std::cout << "found nothing (host)" << std::endl;
}
break;
}
}
for (int device = 0; device < devices; device++) {
cudaSetDevice(device);
cudaFree((void**)words[device]);
}
delete[] words;
cudaSetDevice(0);
float milliseconds = 0;
cudaEventRecord(clockLast, 0);
cudaEventSynchronize(clockLast);
cudaEventElapsedTime(&milliseconds, clockBegin, clockLast);
std::cout << "Computation time " << milliseconds << " ms" << std::endl;
cudaEventDestroy(clockBegin);
cudaEventDestroy(clockLast);
}
|
9c1e08e7fa5c4dbcb0d870e10db32fdb6f779909.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//============================================================================
// UPDATE
//============================================================================
// 14 APR 2011 Lukasz G. Szafaryn
// 2014-2018 Caio Lunardi
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <stdbool.h> // (in path known to compiler) needed by true/false
#include <omp.h>
// Helper functions
#include "helper_cuda.h"
#include "helper_string.h"
#ifdef LOGS
#include "log_helper.h"
#endif
#ifdef SAFE_MALLOC
#include "safe_memory/safe_memory.h"
#endif
//=============================================================================
// DEFINE / INCLUDE
//=============================================================================
#define NUMBER_PAR_PER_BOX 192 // keep this low to allow more blocks that share shared memory to run concurrently, code does not work for larger than 110, more speedup can be achieved with larger number and no shared memory used
#define NUMBER_THREADS 192 // this should be roughly equal to NUMBER_PAR_PER_BOX for best performance
// STABLE
#define DOT(A,B) ((A.x)*(B.x)+(A.y)*(B.y)+(A.z)*(B.z))
//=============================================================================
// STRUCTURES
//=============================================================================
typedef struct
{
double x, y, z;
} THREE_VECTOR;
typedef struct
{
double v, x, y, z;
} FOUR_VECTOR;
typedef struct nei_str
{
// neighbor box
int x, y, z;
int number;
long offset;
} nei_str;
typedef struct box_str
{
// home box
int x, y, z;
int number;
long offset;
// neighbor boxes
int nn;
nei_str nei[26];
} box_str;
typedef struct par_str
{
double alpha;
} par_str;
typedef struct dim_str
{
// input arguments
int cur_arg;
int arch_arg;
int cores_arg;
int boxes1d_arg;
// system memory
long number_boxes;
long box_mem;
long space_elem;
long space_mem;
long space_mem2;
} dim_str;
void usage(int argc, char** argv) {
printf("Usage: %s -boxes=N [-generate] [-input_distances=<path>] [-input_charges=<path>] [-output_gold=<path>] [-iterations=N] [-streams=N] [-debug] [-verbose]\n", argv[0]);
}
void getParams(int argc, char** argv, int *boxes, int *generate, char **input_distances, char **input_charges, char **output_gold, int *iterations, int *verbose, int *fault_injection, int *nstreams)
{
if (argc<2) {
usage(argc, argv);
exit(EXIT_FAILURE);
}
*generate = 0;
*iterations = 1000000;
*nstreams = 1;
*fault_injection = 0;
*verbose = 0;
if (checkCmdLineFlag(argc, (const char **)argv, "boxes"))
{
*boxes = getCmdLineArgumentInt(argc, (const char **)argv, "boxes");
if (*boxes <= 0)
{
printf("Invalid input size given on the command-line: %d\n", *boxes);
exit(EXIT_FAILURE);
}
}
else
{
usage(argc, argv);
exit(EXIT_FAILURE);
}
if (checkCmdLineFlag(argc, (const char **)argv, "generate"))
{
*generate = 1;
printf(">> Output will be written to file. Only stream #0 output will be considered.\n");
}
if (checkCmdLineFlag(argc, (const char **)argv, "input_distances"))
{
getCmdLineArgumentString(argc, (const char **)argv, "input_distances", input_distances);
}
else
{
*input_distances = new char[100];
snprintf(*input_distances, 100, "dlava_distances_%i", *boxes);
printf("Using default input_distances path: %s\n", *input_distances);
}
if (checkCmdLineFlag(argc, (const char **)argv, "input_charges"))
{
getCmdLineArgumentString(argc, (const char **)argv, "input_charges", input_charges);
}
else
{
*input_charges = new char[100];
snprintf(*input_charges, 100, "dlava_charges_%i", *boxes);
printf("Using default input_charges path: %s\n", *input_charges);
}
if (checkCmdLineFlag(argc, (const char **)argv, "output_gold"))
{
getCmdLineArgumentString(argc, (const char **)argv, "output_gold", output_gold);
}
else
{
*output_gold = new char[100];
snprintf(*output_gold, 100, "dlava_gold_%i", *boxes);
printf("Using default output_gold path: %s\n", *output_gold);
}
if (checkCmdLineFlag(argc, (const char **)argv, "iterations"))
{
*iterations = getCmdLineArgumentInt(argc, (const char **)argv, "iterations");
}
if (checkCmdLineFlag(argc, (const char **)argv, "streams"))
{
*nstreams = getCmdLineArgumentInt(argc, (const char **)argv, "streams");
}
if (checkCmdLineFlag(argc, (const char **)argv, "verbose"))
{
*verbose = 1;
}
if (checkCmdLineFlag(argc, (const char **)argv, "debug"))
{
*fault_injection = 1;
printf("!! Will be injected an input error\n");
}
}
//-----------------------------------------------------------------------------
// plasmaKernel_gpu_2
//-----------------------------------------------------------------------------
__global__ void kernel_gpu_cuda(par_str d_par_gpu, dim_str d_dim_gpu, box_str* d_box_gpu, FOUR_VECTOR* d_rv_gpu, double* d_qv_gpu, FOUR_VECTOR* d_fv_gpu) {
//---------------------------------------------------------------------
// THREAD PARAMETERS
//---------------------------------------------------------------------
int bx = blockIdx.x; // get current horizontal block index (0-n)
int tx = threadIdx.x; // get current horizontal thread index (0-n)
int wtx = tx;
//---------------------------------------------------------------------
// DO FOR THE NUMBER OF BOXES
//---------------------------------------------------------------------
if(bx<d_dim_gpu.number_boxes) {
//-------------------------------------------------------------
// Extract input parameters
//-------------------------------------------------------------
// parameters
double a2 = 2.0*d_par_gpu.alpha*d_par_gpu.alpha;
// home box
int first_i;
FOUR_VECTOR* rA;
FOUR_VECTOR* fA;
__shared__ FOUR_VECTOR rA_shared[200];
// nei box
int pointer;
int k = 0;
int first_j;
FOUR_VECTOR* rB;
double* qB;
int j = 0;
__shared__ FOUR_VECTOR rB_shared[200];
__shared__ double qB_shared[200];
// common
double r2;
double u2;
double vij;
double fs;
double fxij;
double fyij;
double fzij;
THREE_VECTOR d;
//-------------------------------------------------------------
// Home box
//-------------------------------------------------------------
//-------------------------------------------------------------
// Setup parameters
//-------------------------------------------------------------
// home box - box parameters
first_i = d_box_gpu[bx].offset;
// home box - distance, force, charge and type parameters
rA = &d_rv_gpu[first_i];
fA = &d_fv_gpu[first_i];
//-------------------------------------------------------------
// Copy to shared memory
//-------------------------------------------------------------
// home box - shared memory
while(wtx<NUMBER_PAR_PER_BOX) {
rA_shared[wtx] = rA[wtx];
wtx = wtx + NUMBER_THREADS;
}
wtx = tx;
// synchronize threads - not needed, but just to be safe
__syncthreads();
//-------------------------------------------------------------
// nei box loop
//-------------------------------------------------------------
// loop over neiing boxes of home box
for (k=0; k<(1+d_box_gpu[bx].nn); k++) {
//---------------------------------------------
// nei box - get pointer to the right box
//---------------------------------------------
if(k==0) {
pointer = bx; // set first box to be processed to home box
}
else {
// remaining boxes are nei boxes
pointer = d_box_gpu[bx].nei[k-1].number;
}
//-----------------------------------------------------
// Setup parameters
//-----------------------------------------------------
// nei box - box parameters
first_j = d_box_gpu[pointer].offset;
// nei box - distance, (force), charge and (type) parameters
rB = &d_rv_gpu[first_j];
qB = &d_qv_gpu[first_j];
//-----------------------------------------------------
// Setup parameters
//-----------------------------------------------------
// nei box - shared memory
while(wtx<NUMBER_PAR_PER_BOX) {
rB_shared[wtx] = rB[wtx];
qB_shared[wtx] = qB[wtx];
wtx = wtx + NUMBER_THREADS;
}
wtx = tx;
// synchronize threads because in next section each thread accesses data brought in by different threads here
__syncthreads();
//-----------------------------------------------------
// Calculation
//-----------------------------------------------------
// loop for the number of particles in the home box
// for (int i=0; i<nTotal_i; i++){
while(wtx<NUMBER_PAR_PER_BOX) {
// loop for the number of particles in the current nei box
for (j=0; j<NUMBER_PAR_PER_BOX; j++) {
r2 = (double)rA_shared[wtx].v + (double)rB_shared[j].v - DOT((double)rA_shared[wtx],(double)rB_shared[j]);
u2 = a2*r2;
vij= exp(-u2);
fs = 2*vij;
d.x = (double)rA_shared[wtx].x - (double)rB_shared[j].x;
fxij=fs*d.x;
d.y = (double)rA_shared[wtx].y - (double)rB_shared[j].y;
fyij=fs*d.y;
d.z = (double)rA_shared[wtx].z - (double)rB_shared[j].z;
fzij=fs*d.z;
fA[wtx].v += (double)((double)qB_shared[j]*vij);
fA[wtx].x += (double)((double)qB_shared[j]*fxij);
fA[wtx].y += (double)((double)qB_shared[j]*fyij);
fA[wtx].z += (double)((double)qB_shared[j]*fzij);
}
// increment work thread index
wtx = wtx + NUMBER_THREADS;
}
// reset work index
wtx = tx;
// synchronize after finishing force contributions from current nei box not to cause conflicts when starting next box
__syncthreads();
//----------------------------------------------------------------------------------------------------------------------------------140
// Calculation END
//----------------------------------------------------------------------------------------------------------------------------------140
}
//------------------------------------------------------------------------------------------------------------------------------------------------------160
// nei box loop END
//------------------------------------------------------------------------------------------------------------------------------------------------------160
}
}
double mysecond()
{
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
void generateInput(dim_str dim_cpu, char *input_distances, FOUR_VECTOR **rv_cpu, char *input_charges, double **qv_cpu)
{
// random generator seed set to random value - time in this case
FILE *fp;
int i;
srand(time(NULL));
// input (distances)
if( (fp = fopen(input_distances, "wb" )) == 0 ) {
printf( "The file 'input_distances' was not opened\n" ); exit(EXIT_FAILURE);
}
*rv_cpu = (FOUR_VECTOR*)malloc(dim_cpu.space_mem);
for(i=0; i<dim_cpu.space_elem; i=i+1) {
// get a number in the range 0.1 - 1.0
(*rv_cpu)[i].v = (double)(rand()%10 + 1) / 10.0;
fwrite(&((*rv_cpu)[i].v), 1, sizeof(double), fp);
// get a number in the range 0.1 - 1.0
(*rv_cpu)[i].x = (double)(rand()%10 + 1) / 10.0;
fwrite(&((*rv_cpu)[i].x), 1, sizeof(double), fp);
// get a number in the range 0.1 - 1.0
(*rv_cpu)[i].y = (double)(rand()%10 + 1) / 10.0;
fwrite(&((*rv_cpu)[i].y), 1, sizeof(double), fp);
// get a number in the range 0.1 - 1.0
(*rv_cpu)[i].z = (double)(rand()%10 + 1) / 10.0;
fwrite(&((*rv_cpu)[i].z), 1, sizeof(double), fp);
}
fclose(fp);
// input (charge)
if( (fp = fopen(input_charges, "wb" )) == 0 ) {
printf( "The file 'input_charges' was not opened\n" ); exit(EXIT_FAILURE);
}
*qv_cpu = (double*)malloc(dim_cpu.space_mem2);
for(i=0; i<dim_cpu.space_elem; i=i+1) {
// get a number in the range 0.1 - 1.0
(*qv_cpu)[i] = (double)(rand()%10 + 1) / 10.0;
fwrite(&((*qv_cpu)[i]), 1, sizeof(double), fp);
}
fclose(fp);
}
void readInput(dim_str dim_cpu, char *input_distances, FOUR_VECTOR **rv_cpu, char *input_charges, double **qv_cpu, int fault_injection)
{
FILE *fp;
int i;
size_t return_value[4];
// input (distances)
if( (fp = fopen(input_distances, "rb" )) == 0 ) {
printf( "The file 'input_distances' was not opened\n" ); exit(EXIT_FAILURE);
}
*rv_cpu = (FOUR_VECTOR*)malloc(dim_cpu.space_mem);
if(*rv_cpu == NULL) {
printf("error rv_cpu malloc\n");
#ifdef LOGS
log_error_detail("error rv_cpu malloc"); end_log_file();
#endif
exit(1);
}
for(i=0; i<dim_cpu.space_elem; i=i+1) {
return_value[0] = fread(&((*rv_cpu)[i].v), 1, sizeof(double), fp);
return_value[1] = fread(&((*rv_cpu)[i].x), 1, sizeof(double), fp);
return_value[2] = fread(&((*rv_cpu)[i].y), 1, sizeof(double), fp);
return_value[3] = fread(&((*rv_cpu)[i].z), 1, sizeof(double), fp);
if (return_value[0] == 0 || return_value[1] == 0 || return_value[2] == 0 || return_value[3] == 0) {
printf("error reading rv_cpu from file\n");
#ifdef LOGS
log_error_detail("error reading rv_cpu from file"); end_log_file();
#endif
exit(1);
}
}
fclose(fp);
// input (charge)
if( (fp = fopen(input_charges, "rb" )) == 0 ) {
printf( "The file 'input_charges' was not opened\n" ); exit(EXIT_FAILURE);
}
*qv_cpu = (double*)malloc(dim_cpu.space_mem2);
if(*qv_cpu == NULL) {
printf("error qv_cpu malloc\n");
#ifdef LOGS
log_error_detail("error qv_cpu malloc"); end_log_file();
#endif
exit(1);
}
for(i=0; i<dim_cpu.space_elem; i=i+1) {
return_value[0] = fread(&((*qv_cpu)[i]), 1, sizeof(double), fp);
if (return_value[0] == 0) {
printf("error reading qv_cpu from file\n");
#ifdef LOGS
log_error_detail("error reading qv_cpu from file"); end_log_file();
#endif
exit(1);
}
}
fclose(fp);
// =============== Fault injection
if (fault_injection) {
(*qv_cpu)[2] = 0.732637263; // must be in range 0.1 - 1.0
printf("!!> Fault injection: qv_cpu[2]=%f\n", (*qv_cpu)[2]);
}
// ========================
}
void readGold(dim_str dim_cpu, char *output_gold, FOUR_VECTOR **fv_cpu_GOLD)
{
FILE *fp;
size_t return_value[4];
int i;
if( (fp = fopen(output_gold, "rb" )) == 0 )
{
printf( "The file 'output_forces' was not opened\n" ); exit(EXIT_FAILURE);
}
*fv_cpu_GOLD = (FOUR_VECTOR*)malloc(dim_cpu.space_mem);
if(*fv_cpu_GOLD == NULL) {
printf("error fv_cpu_GOLD malloc\n");
#ifdef LOGS
log_error_detail("error fv_cpu_GOLD malloc"); end_log_file();
#endif
exit(1);
}
for(i=0; i<dim_cpu.space_elem; i=i+1) {
return_value[0] = fread(&((*fv_cpu_GOLD)[i].v), 1, sizeof(double), fp);
return_value[1] = fread(&((*fv_cpu_GOLD)[i].x), 1, sizeof(double), fp);
return_value[2] = fread(&((*fv_cpu_GOLD)[i].y), 1, sizeof(double), fp);
return_value[3] = fread(&((*fv_cpu_GOLD)[i].z), 1, sizeof(double), fp);
if (return_value[0] == 0 || return_value[1] == 0 || return_value[2] == 0 || return_value[3] == 0) {
printf("error reading rv_cpu from file\n");
#ifdef LOGS
log_error_detail("error reading rv_cpu from file"); end_log_file();
#endif
exit(1);
}
}
fclose(fp);
}
void writeGold(dim_str dim_cpu, char *output_gold, FOUR_VECTOR **fv_cpu)
{
FILE *fp;
int i;
if( (fp = fopen(output_gold, "wb" )) == 0 ) {
printf( "The file 'output_forces' was not opened\n" ); exit(EXIT_FAILURE);
}
int number_zeros = 0;
for(i=0; i<dim_cpu.space_elem; i=i+1) {
if((*fv_cpu)[i].v == 0.0)
number_zeros++;
if((*fv_cpu)[i].x == 0.0)
number_zeros++;
if((*fv_cpu)[i].y == 0.0)
number_zeros++;
if((*fv_cpu)[i].z == 0.0)
number_zeros++;
fwrite(&((*fv_cpu)[i].v), 1, sizeof(double), fp);
fwrite(&((*fv_cpu)[i].x), 1, sizeof(double), fp);
fwrite(&((*fv_cpu)[i].y), 1, sizeof(double), fp);
fwrite(&((*fv_cpu)[i].z), 1, sizeof(double), fp);
}
fclose(fp);
}
//=============================================================================
// MAIN FUNCTION
//=============================================================================
int main(int argc, char *argv []) {
//=====================================================================
// CPU/MCPU VARIABLES
//=====================================================================
// timer
double timestamp;
// counters
int i, j, k, l, m, n;
int iterations;
int generate, verbose, fault_injection;
// system memory
par_str par_cpu;
dim_str dim_cpu;
box_str* box_cpu;
FOUR_VECTOR* rv_cpu;
double* qv_cpu;
FOUR_VECTOR* fv_cpu;
FOUR_VECTOR* fv_cpu_GOLD;
int nh;
int nstreams, streamIdx;
hipError_t cuda_error;
const char *error_string;
char *input_distances, *input_charges, *output_gold;
int number_nn = 0;
//=====================================================================
// CHECK INPUT ARGUMENTS
//=====================================================================
getParams(argc, argv, &dim_cpu.boxes1d_arg, &generate, &input_distances, &input_charges, &output_gold, &iterations, &verbose, &fault_injection, &nstreams);
char test_info[200];
snprintf(test_info, 200, "type:double-precision streams:%d boxes:%d block_size:%d", nstreams, dim_cpu.boxes1d_arg, NUMBER_THREADS);
printf("%s\n", test_info);
#ifdef LOGS
if (!generate) start_log_file("cudaDLavaMD", test_info);
#endif
//=====================================================================
// INPUTS
//=====================================================================
par_cpu.alpha = 0.5;
//=====================================================================
// DIMENSIONS
//=====================================================================
// total number of boxes
dim_cpu.number_boxes = dim_cpu.boxes1d_arg * dim_cpu.boxes1d_arg * dim_cpu.boxes1d_arg;
// how many particles space has in each direction
dim_cpu.space_elem = dim_cpu.number_boxes * NUMBER_PAR_PER_BOX;
dim_cpu.space_mem = dim_cpu.space_elem * sizeof(FOUR_VECTOR);
dim_cpu.space_mem2 = dim_cpu.space_elem * sizeof(double);
// box array
dim_cpu.box_mem = dim_cpu.number_boxes * sizeof(box_str);
//=====================================================================
// SYSTEM MEMORY
//=====================================================================
//=====================================================================
// BOX
//=====================================================================
// allocate boxes
box_cpu = (box_str*)malloc(dim_cpu.box_mem);
if(box_cpu == NULL) {
printf("error box_cpu malloc\n");
#ifdef LOGS
if (!generate) log_error_detail("error box_cpu malloc"); end_log_file();
#endif
exit(1);
}
// initialize number of home boxes
nh = 0;
// home boxes in z direction
for(i=0; i<dim_cpu.boxes1d_arg; i++) {
// home boxes in y direction
for(j=0; j<dim_cpu.boxes1d_arg; j++) {
// home boxes in x direction
for(k=0; k<dim_cpu.boxes1d_arg; k++) {
// current home box
box_cpu[nh].x = k;
box_cpu[nh].y = j;
box_cpu[nh].z = i;
box_cpu[nh].number = nh;
box_cpu[nh].offset = nh * NUMBER_PAR_PER_BOX;
// initialize number of neighbor boxes
box_cpu[nh].nn = 0;
// neighbor boxes in z direction
for(l=-1; l<2; l++) {
// neighbor boxes in y direction
for(m=-1; m<2; m++) {
// neighbor boxes in x direction
for(n=-1; n<2; n++) {
// check if (this neighbor exists) and (it is not the same as home box)
if( (((i+l)>=0 && (j+m)>=0 && (k+n)>=0)==true && ((i+l)<dim_cpu.boxes1d_arg && (j+m)<dim_cpu.boxes1d_arg && (k+n)<dim_cpu.boxes1d_arg)==true) &&
(l==0 && m==0 && n==0)==false ) {
// current neighbor box
box_cpu[nh].nei[box_cpu[nh].nn].x = (k+n);
box_cpu[nh].nei[box_cpu[nh].nn].y = (j+m);
box_cpu[nh].nei[box_cpu[nh].nn].z = (i+l);
box_cpu[nh].nei[box_cpu[nh].nn].number = (box_cpu[nh].nei[box_cpu[nh].nn].z * dim_cpu.boxes1d_arg * dim_cpu.boxes1d_arg) +
(box_cpu[nh].nei[box_cpu[nh].nn].y * dim_cpu.boxes1d_arg) + box_cpu[nh].nei[box_cpu[nh].nn].x;
box_cpu[nh].nei[box_cpu[nh].nn].offset = box_cpu[nh].nei[box_cpu[nh].nn].number * NUMBER_PAR_PER_BOX;
// increment neighbor box
box_cpu[nh].nn = box_cpu[nh].nn + 1;
number_nn += box_cpu[nh].nn;
}
} // neighbor boxes in x direction
} // neighbor boxes in y direction
} // neighbor boxes in z direction
// increment home box
nh = nh + 1;
} // home boxes in x direction
} // home boxes in y direction
} // home boxes in z direction
//=====================================================================
// PARAMETERS, DISTANCE, CHARGE AND FORCE
//=====================================================================
if (generate) {
generateInput(dim_cpu, input_distances, &rv_cpu, input_charges, &qv_cpu);
} else {
readInput(dim_cpu, input_distances, &rv_cpu, input_charges, &qv_cpu, fault_injection);
readGold(dim_cpu, output_gold, &fv_cpu_GOLD);
}
//=====================================================================
// EXECUTION PARAMETERS
//=====================================================================
dim3 threads;
dim3 blocks;
blocks.x = dim_cpu.number_boxes;
blocks.y = 1;
// define the number of threads in the block
threads.x = NUMBER_THREADS;
threads.y = 1;
hipStream_t *streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t));
//LOOP START
int loop;
for(loop=0; loop<iterations; loop++) {
if (verbose) {
printf("[Iteration #%i]=====================================\n", loop); fflush(stdout);
}
double globaltimer = mysecond();
timestamp = mysecond();
// prepare host memory to receive kernel output
// output (forces)
fv_cpu = (FOUR_VECTOR*)malloc(dim_cpu.space_mem);
if(fv_cpu == NULL) {
printf("error fv_cpu malloc\n");
#ifdef LOGS
if (!generate) log_error_detail("error fv_cpu malloc"); end_log_file();
#endif
exit(1);
}
for(i=0; i<dim_cpu.space_elem; i=i+1) {
// set to 0, because kernels keeps adding to initial value
fv_cpu[i].v = 0;
fv_cpu[i].x = 0;
fv_cpu[i].y = 0;
fv_cpu[i].z = 0;
}
//=====================================================================
// GPU_CUDA
//=====================================================================
//=====================================================================
// VARIABLES
//=====================================================================
box_str* d_box_gpu[nstreams];
FOUR_VECTOR* d_rv_gpu[nstreams];
double* d_qv_gpu[nstreams];
FOUR_VECTOR* d_fv_gpu[nstreams];
//=====================================================================
// GPU SETUP
//=====================================================================
for (streamIdx = 0; streamIdx < nstreams; streamIdx++) {
hipStreamCreateWithFlags(&(streams[streamIdx]), hipStreamNonBlocking);
//==================================================
// boxes
//==================================================
#ifdef SAFE_MALLOC
cuda_error = hipSuccess;
safe_cuda_malloc_cover((void **)&(d_box_gpu[streamIdx]), dim_cpu.box_mem);
#else
cuda_error = hipMalloc( (void **)&(d_box_gpu[streamIdx]), dim_cpu.box_mem);
#endif
error_string = hipGetErrorString(cuda_error);
if(strcmp(error_string, "no error") != 0) {
printf("error d_box_gpu hipMalloc\n");
#ifdef LOGS
if (!generate) log_error_detail("error d_box_gpu cudamalloc"); end_log_file();
#endif
exit(1);
}
//==================================================
// rv
//==================================================
#ifdef SAFE_MALLOC
cuda_error = hipSuccess;
safe_cuda_malloc_cover( (void **)&(d_rv_gpu[streamIdx]), dim_cpu.space_mem);
#else
cuda_error = hipMalloc( (void **)&(d_rv_gpu[streamIdx]), dim_cpu.space_mem);
#endif
error_string = hipGetErrorString(cuda_error);
if(strcmp(error_string, "no error") != 0) {
printf("error d_rv_gpu hipMalloc\n");
#ifdef LOGS
if (!generate) log_error_detail("error d_box_gpu cudamalloc"); end_log_file();
#endif
exit(1);
}
//==================================================
// qv
//==================================================
#ifdef SAFE_MALLOC
cuda_error = hipSuccess;
hipMalloc( (void **)&(d_qv_gpu[streamIdx]), dim_cpu.space_mem2);
#else
cuda_error = hipMalloc( (void **)&(d_qv_gpu[streamIdx]), dim_cpu.space_mem2);
#endif
error_string = hipGetErrorString(cuda_error);
if(strcmp(error_string, "no error") != 0) {
printf("error d_qv_gpu hipMalloc\n");
#ifdef LOGS
if (!generate) log_error_detail("error d_box_gpu cudamalloc"); end_log_file();
#endif
exit(1);
}
//==================================================
// fv
//==================================================
#ifdef SAFE_MALLOC
cuda_error = hipSuccess;
safe_cuda_malloc_cover( (void **)&(d_fv_gpu[streamIdx]), dim_cpu.space_mem);
#else
cuda_error = hipMalloc( (void **)&(d_fv_gpu[streamIdx]), dim_cpu.space_mem);
#endif
error_string = hipGetErrorString(cuda_error);
if(strcmp(error_string, "no error") != 0) {
printf("error d_fv_gpu hipMalloc\n");
#ifdef LOGS
if (!generate) log_error_detail("error d_box_gpu cudamalloc"); end_log_file();
#endif
exit(1);
}
//=====================================================================
// GPU MEMORY COPY
//=====================================================================
//==================================================
// boxes
//==================================================
cuda_error = hipMemcpy(d_box_gpu[streamIdx], box_cpu, dim_cpu.box_mem, hipMemcpyHostToDevice);
error_string = hipGetErrorString(cuda_error);
if(strcmp(error_string, "no error") != 0) {
printf("error load d_boc_gpu\n");
#ifdef LOGS
if (!generate) log_error_detail("error load d_box_gpu"); end_log_file();
#endif
exit(1);
}
//==================================================
// rv
//==================================================
cuda_error = hipMemcpy( d_rv_gpu[streamIdx], rv_cpu, dim_cpu.space_mem, hipMemcpyHostToDevice);
error_string = hipGetErrorString(cuda_error);
if(strcmp(error_string, "no error") != 0) {
printf("error load d_rv_gpu\n");
#ifdef LOGS
if (!generate) log_error_detail("error load d_box_gpu"); end_log_file();
#endif
exit(1);
}
//==================================================
// qv
//==================================================
cuda_error = hipMemcpy( d_qv_gpu[streamIdx], qv_cpu, dim_cpu.space_mem2, hipMemcpyHostToDevice);
error_string = hipGetErrorString(cuda_error);
if(strcmp(error_string, "no error") != 0) {
printf("error load d_qv_gpu\n");
#ifdef LOGS
if (!generate) log_error_detail("error load d_box_gpu"); end_log_file();
#endif
exit(1);
}
//==================================================
// fv
//==================================================
cuda_error = hipMemcpy( d_fv_gpu[streamIdx], fv_cpu, dim_cpu.space_mem, hipMemcpyHostToDevice);
error_string = hipGetErrorString(cuda_error);
if(strcmp(error_string, "no error") != 0) {
printf("error load d_fv_gpu\n");
#ifdef LOGS
if (!generate) log_error_detail("error load d_box_gpu"); end_log_file();
#endif
exit(1);
}
}
if (verbose) printf("[Iteration #%i] Setup prepare time: %.4fs\n", loop, mysecond()-timestamp);
//=====================================================================
// KERNEL
//=====================================================================
double kernel_time=mysecond();
#ifdef LOGS
if (!generate) start_iteration();
#endif
// launch kernel - all boxes
for (streamIdx = 0; streamIdx < nstreams; streamIdx++) {
hipLaunchKernelGGL(( kernel_gpu_cuda), dim3(blocks), dim3(threads), 0, streams[streamIdx], par_cpu, dim_cpu, \
d_box_gpu[streamIdx], d_rv_gpu[streamIdx], d_qv_gpu[streamIdx], d_fv_gpu[streamIdx]);
checkCudaErrors( hipPeekAtLastError() );
}
//printf("All kernels were commited.\n");
for (streamIdx = 0; streamIdx < nstreams; streamIdx++) {
cuda_error = hipStreamSynchronize(streams[streamIdx]);
error_string = hipGetErrorString(cuda_error);
if(strcmp(error_string, "no error") != 0) {
printf("error logic: %s\n",error_string);
#ifdef LOGS
if (!generate) log_error_detail("error logic:"); end_log_file();
#endif
exit(1);
}
checkCudaErrors( hipPeekAtLastError() );
}
#ifdef LOGS
if (!generate) end_iteration();
#endif
kernel_time = mysecond()-kernel_time;
//=====================================================================
// COMPARE OUTPUTS / WRITE GOLD
//=====================================================================
if (generate){
cuda_error = hipMemcpy( fv_cpu, d_fv_gpu[0], dim_cpu.space_mem, hipMemcpyDeviceToHost);
error_string = hipGetErrorString(cuda_error);
if(strcmp(error_string, "no error") != 0) {
printf("error download fv_cpu\n");
exit(1);
}
writeGold(dim_cpu, output_gold, &fv_cpu);
} else { // Check gold
//int ea = 0;
int thread_error = 0;
int kernel_errors = 0;
char error_detail[300];
timestamp = mysecond();
for (streamIdx = 0; streamIdx < nstreams; streamIdx++) {
//=====================================================================
// GPU MEMORY COPY BACK
//=====================================================================
cuda_error = hipMemcpy( fv_cpu, d_fv_gpu[streamIdx], dim_cpu.space_mem, hipMemcpyDeviceToHost);
error_string = hipGetErrorString(cuda_error);
if(strcmp(error_string, "no error") != 0) {
printf("error download fv_cpu\n");
#ifdef LOGS
if (!generate) log_error_detail("error download fv_cpu"); end_log_file();
#endif
exit(1);
}
#pragma omp parallel for
for(i=0; i<dim_cpu.space_elem; i=i+1) {
if(fv_cpu_GOLD[i].v != fv_cpu[i].v) {
thread_error++;
}
if(fv_cpu_GOLD[i].x != fv_cpu[i].x) {
thread_error++;
}
if(fv_cpu_GOLD[i].y != fv_cpu[i].y) {
thread_error++;
}
if(fv_cpu_GOLD[i].z != fv_cpu[i].z) {
thread_error++;
}
if (thread_error > 0) {
#pragma omp critical
{
kernel_errors++;
snprintf(error_detail, 300, "stream: %d, p: [%d], ea: %d, v_r: %1.16e, v_e: %1.16e, x_r: %1.16e, x_e: %1.16e, y_r: %1.16e, y_e: %1.16e, z_r: %1.16e, z_e: %1.16e\n", streamIdx, \
i, thread_error, fv_cpu[i].v, fv_cpu_GOLD[i].v, fv_cpu[i].x, fv_cpu_GOLD[i].x, fv_cpu[i].y, fv_cpu_GOLD[i].y, fv_cpu[i].z, fv_cpu_GOLD[i].z);
if (kernel_errors<25) printf("ERROR: %s\n", error_detail);
if (kernel_errors>=25) printf("!");
#ifdef LOGS
if (!generate) log_error_detail(error_detail);
#endif
thread_error = 0;
}
}
}
}
#ifdef LOGS
if (!generate) log_error_count(kernel_errors);
#endif
if (verbose) printf("[Iteration #%i] Gold check time: %f\n", loop, mysecond() - timestamp);
}
//================= PERF
// iterate for each neighbor of a box (number_nn)
double flop = number_nn;
// The last for iterate NUMBER_PAR_PER_BOX times
flop *= NUMBER_PAR_PER_BOX;
// the last for uses 46 operations plus 2 exp() functions
flop *=46;
flop *= nstreams;
double flops = (double)flop/kernel_time;
double outputpersec = (double)dim_cpu.space_elem * 4 * nstreams / kernel_time;
if (verbose) printf("[Iteration #%i] BOXES:%d BLOCK:%d OUTPUT/S:%.2f FLOPS:%.2f (GFLOPS:%.2f)\n", loop, dim_cpu.boxes1d_arg, NUMBER_THREADS, outputpersec, flops, flops/1000000000);
if (verbose) printf("[Iteration #%i] kernel_time:%f\n", loop, kernel_time);
//=====================
printf(".");
fflush(stdout);
//=====================================================================
// GPU MEMORY DEALLOCATION
//=====================================================================
for (streamIdx = 0; streamIdx < nstreams; streamIdx++) {
hipFree(d_rv_gpu[streamIdx]);
hipFree(d_qv_gpu[streamIdx]);
hipFree(d_fv_gpu[streamIdx]);
hipFree(d_box_gpu[streamIdx]);
}
//=====================================================================
// SYSTEM MEMORY DEALLOCATION
//=====================================================================
free(fv_cpu);
if (verbose) printf("[Iteration #%i] Elapsed time: %.4fs\n", loop, mysecond()-globaltimer);
}
if (!generate) free(fv_cpu_GOLD);
free(rv_cpu);
free(qv_cpu);
free(box_cpu);
printf("\n");
#ifdef LOGS
if (!generate) end_log_file();
#endif
return 0;
}
| 9c1e08e7fa5c4dbcb0d870e10db32fdb6f779909.cu | //============================================================================
// UPDATE
//============================================================================
// 14 APR 2011 Lukasz G. Szafaryn
// 2014-2018 Caio Lunardi
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <stdbool.h> // (in path known to compiler) needed by true/false
#include <omp.h>
// Helper functions
#include "helper_cuda.h"
#include "helper_string.h"
#ifdef LOGS
#include "log_helper.h"
#endif
#ifdef SAFE_MALLOC
#include "safe_memory/safe_memory.h"
#endif
//=============================================================================
// DEFINE / INCLUDE
//=============================================================================
#define NUMBER_PAR_PER_BOX 192 // keep this low to allow more blocks that share shared memory to run concurrently, code does not work for larger than 110, more speedup can be achieved with larger number and no shared memory used
#define NUMBER_THREADS 192 // this should be roughly equal to NUMBER_PAR_PER_BOX for best performance
// STABLE
#define DOT(A,B) ((A.x)*(B.x)+(A.y)*(B.y)+(A.z)*(B.z))
//=============================================================================
// STRUCTURES
//=============================================================================
typedef struct
{
double x, y, z;
} THREE_VECTOR;
typedef struct
{
double v, x, y, z;
} FOUR_VECTOR;
typedef struct nei_str
{
// neighbor box
int x, y, z;
int number;
long offset;
} nei_str;
typedef struct box_str
{
// home box
int x, y, z;
int number;
long offset;
// neighbor boxes
int nn;
nei_str nei[26];
} box_str;
typedef struct par_str
{
double alpha;
} par_str;
typedef struct dim_str
{
// input arguments
int cur_arg;
int arch_arg;
int cores_arg;
int boxes1d_arg;
// system memory
long number_boxes;
long box_mem;
long space_elem;
long space_mem;
long space_mem2;
} dim_str;
void usage(int argc, char** argv) {
printf("Usage: %s -boxes=N [-generate] [-input_distances=<path>] [-input_charges=<path>] [-output_gold=<path>] [-iterations=N] [-streams=N] [-debug] [-verbose]\n", argv[0]);
}
void getParams(int argc, char** argv, int *boxes, int *generate, char **input_distances, char **input_charges, char **output_gold, int *iterations, int *verbose, int *fault_injection, int *nstreams)
{
if (argc<2) {
usage(argc, argv);
exit(EXIT_FAILURE);
}
*generate = 0;
*iterations = 1000000;
*nstreams = 1;
*fault_injection = 0;
*verbose = 0;
if (checkCmdLineFlag(argc, (const char **)argv, "boxes"))
{
*boxes = getCmdLineArgumentInt(argc, (const char **)argv, "boxes");
if (*boxes <= 0)
{
printf("Invalid input size given on the command-line: %d\n", *boxes);
exit(EXIT_FAILURE);
}
}
else
{
usage(argc, argv);
exit(EXIT_FAILURE);
}
if (checkCmdLineFlag(argc, (const char **)argv, "generate"))
{
*generate = 1;
printf(">> Output will be written to file. Only stream #0 output will be considered.\n");
}
if (checkCmdLineFlag(argc, (const char **)argv, "input_distances"))
{
getCmdLineArgumentString(argc, (const char **)argv, "input_distances", input_distances);
}
else
{
*input_distances = new char[100];
snprintf(*input_distances, 100, "dlava_distances_%i", *boxes);
printf("Using default input_distances path: %s\n", *input_distances);
}
if (checkCmdLineFlag(argc, (const char **)argv, "input_charges"))
{
getCmdLineArgumentString(argc, (const char **)argv, "input_charges", input_charges);
}
else
{
*input_charges = new char[100];
snprintf(*input_charges, 100, "dlava_charges_%i", *boxes);
printf("Using default input_charges path: %s\n", *input_charges);
}
if (checkCmdLineFlag(argc, (const char **)argv, "output_gold"))
{
getCmdLineArgumentString(argc, (const char **)argv, "output_gold", output_gold);
}
else
{
*output_gold = new char[100];
snprintf(*output_gold, 100, "dlava_gold_%i", *boxes);
printf("Using default output_gold path: %s\n", *output_gold);
}
if (checkCmdLineFlag(argc, (const char **)argv, "iterations"))
{
*iterations = getCmdLineArgumentInt(argc, (const char **)argv, "iterations");
}
if (checkCmdLineFlag(argc, (const char **)argv, "streams"))
{
*nstreams = getCmdLineArgumentInt(argc, (const char **)argv, "streams");
}
if (checkCmdLineFlag(argc, (const char **)argv, "verbose"))
{
*verbose = 1;
}
if (checkCmdLineFlag(argc, (const char **)argv, "debug"))
{
*fault_injection = 1;
printf("!! Will be injected an input error\n");
}
}
//-----------------------------------------------------------------------------
// plasmaKernel_gpu_2
//-----------------------------------------------------------------------------
__global__ void kernel_gpu_cuda(par_str d_par_gpu, dim_str d_dim_gpu, box_str* d_box_gpu, FOUR_VECTOR* d_rv_gpu, double* d_qv_gpu, FOUR_VECTOR* d_fv_gpu) {
//---------------------------------------------------------------------
// THREAD PARAMETERS
//---------------------------------------------------------------------
int bx = blockIdx.x; // get current horizontal block index (0-n)
int tx = threadIdx.x; // get current horizontal thread index (0-n)
int wtx = tx;
//---------------------------------------------------------------------
// DO FOR THE NUMBER OF BOXES
//---------------------------------------------------------------------
if(bx<d_dim_gpu.number_boxes) {
//-------------------------------------------------------------
// Extract input parameters
//-------------------------------------------------------------
// parameters
double a2 = 2.0*d_par_gpu.alpha*d_par_gpu.alpha;
// home box
int first_i;
FOUR_VECTOR* rA;
FOUR_VECTOR* fA;
__shared__ FOUR_VECTOR rA_shared[200];
// nei box
int pointer;
int k = 0;
int first_j;
FOUR_VECTOR* rB;
double* qB;
int j = 0;
__shared__ FOUR_VECTOR rB_shared[200];
__shared__ double qB_shared[200];
// common
double r2;
double u2;
double vij;
double fs;
double fxij;
double fyij;
double fzij;
THREE_VECTOR d;
//-------------------------------------------------------------
// Home box
//-------------------------------------------------------------
//-------------------------------------------------------------
// Setup parameters
//-------------------------------------------------------------
// home box - box parameters
first_i = d_box_gpu[bx].offset;
// home box - distance, force, charge and type parameters
rA = &d_rv_gpu[first_i];
fA = &d_fv_gpu[first_i];
//-------------------------------------------------------------
// Copy to shared memory
//-------------------------------------------------------------
// home box - shared memory
while(wtx<NUMBER_PAR_PER_BOX) {
rA_shared[wtx] = rA[wtx];
wtx = wtx + NUMBER_THREADS;
}
wtx = tx;
// synchronize threads - not needed, but just to be safe
__syncthreads();
//-------------------------------------------------------------
// nei box loop
//-------------------------------------------------------------
// loop over neiing boxes of home box
for (k=0; k<(1+d_box_gpu[bx].nn); k++) {
//---------------------------------------------
// nei box - get pointer to the right box
//---------------------------------------------
if(k==0) {
pointer = bx; // set first box to be processed to home box
}
else {
// remaining boxes are nei boxes
pointer = d_box_gpu[bx].nei[k-1].number;
}
//-----------------------------------------------------
// Setup parameters
//-----------------------------------------------------
// nei box - box parameters
first_j = d_box_gpu[pointer].offset;
// nei box - distance, (force), charge and (type) parameters
rB = &d_rv_gpu[first_j];
qB = &d_qv_gpu[first_j];
//-----------------------------------------------------
// Setup parameters
//-----------------------------------------------------
// nei box - shared memory
while(wtx<NUMBER_PAR_PER_BOX) {
rB_shared[wtx] = rB[wtx];
qB_shared[wtx] = qB[wtx];
wtx = wtx + NUMBER_THREADS;
}
wtx = tx;
// synchronize threads because in next section each thread accesses data brought in by different threads here
__syncthreads();
//-----------------------------------------------------
// Calculation
//-----------------------------------------------------
// loop for the number of particles in the home box
// for (int i=0; i<nTotal_i; i++){
while(wtx<NUMBER_PAR_PER_BOX) {
// loop for the number of particles in the current nei box
for (j=0; j<NUMBER_PAR_PER_BOX; j++) {
r2 = (double)rA_shared[wtx].v + (double)rB_shared[j].v - DOT((double)rA_shared[wtx],(double)rB_shared[j]);
u2 = a2*r2;
vij= exp(-u2);
fs = 2*vij;
d.x = (double)rA_shared[wtx].x - (double)rB_shared[j].x;
fxij=fs*d.x;
d.y = (double)rA_shared[wtx].y - (double)rB_shared[j].y;
fyij=fs*d.y;
d.z = (double)rA_shared[wtx].z - (double)rB_shared[j].z;
fzij=fs*d.z;
fA[wtx].v += (double)((double)qB_shared[j]*vij);
fA[wtx].x += (double)((double)qB_shared[j]*fxij);
fA[wtx].y += (double)((double)qB_shared[j]*fyij);
fA[wtx].z += (double)((double)qB_shared[j]*fzij);
}
// increment work thread index
wtx = wtx + NUMBER_THREADS;
}
// reset work index
wtx = tx;
// synchronize after finishing force contributions from current nei box not to cause conflicts when starting next box
__syncthreads();
//----------------------------------------------------------------------------------------------------------------------------------140
// Calculation END
//----------------------------------------------------------------------------------------------------------------------------------140
}
//------------------------------------------------------------------------------------------------------------------------------------------------------160
// nei box loop END
//------------------------------------------------------------------------------------------------------------------------------------------------------160
}
}
double mysecond()
{
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
void generateInput(dim_str dim_cpu, char *input_distances, FOUR_VECTOR **rv_cpu, char *input_charges, double **qv_cpu)
{
// random generator seed set to random value - time in this case
FILE *fp;
int i;
srand(time(NULL));
// input (distances)
if( (fp = fopen(input_distances, "wb" )) == 0 ) {
printf( "The file 'input_distances' was not opened\n" ); exit(EXIT_FAILURE);
}
*rv_cpu = (FOUR_VECTOR*)malloc(dim_cpu.space_mem);
for(i=0; i<dim_cpu.space_elem; i=i+1) {
// get a number in the range 0.1 - 1.0
(*rv_cpu)[i].v = (double)(rand()%10 + 1) / 10.0;
fwrite(&((*rv_cpu)[i].v), 1, sizeof(double), fp);
// get a number in the range 0.1 - 1.0
(*rv_cpu)[i].x = (double)(rand()%10 + 1) / 10.0;
fwrite(&((*rv_cpu)[i].x), 1, sizeof(double), fp);
// get a number in the range 0.1 - 1.0
(*rv_cpu)[i].y = (double)(rand()%10 + 1) / 10.0;
fwrite(&((*rv_cpu)[i].y), 1, sizeof(double), fp);
// get a number in the range 0.1 - 1.0
(*rv_cpu)[i].z = (double)(rand()%10 + 1) / 10.0;
fwrite(&((*rv_cpu)[i].z), 1, sizeof(double), fp);
}
fclose(fp);
// input (charge)
if( (fp = fopen(input_charges, "wb" )) == 0 ) {
printf( "The file 'input_charges' was not opened\n" ); exit(EXIT_FAILURE);
}
*qv_cpu = (double*)malloc(dim_cpu.space_mem2);
for(i=0; i<dim_cpu.space_elem; i=i+1) {
// get a number in the range 0.1 - 1.0
(*qv_cpu)[i] = (double)(rand()%10 + 1) / 10.0;
fwrite(&((*qv_cpu)[i]), 1, sizeof(double), fp);
}
fclose(fp);
}
void readInput(dim_str dim_cpu, char *input_distances, FOUR_VECTOR **rv_cpu, char *input_charges, double **qv_cpu, int fault_injection)
{
FILE *fp;
int i;
size_t return_value[4];
// input (distances)
if( (fp = fopen(input_distances, "rb" )) == 0 ) {
printf( "The file 'input_distances' was not opened\n" ); exit(EXIT_FAILURE);
}
*rv_cpu = (FOUR_VECTOR*)malloc(dim_cpu.space_mem);
if(*rv_cpu == NULL) {
printf("error rv_cpu malloc\n");
#ifdef LOGS
log_error_detail("error rv_cpu malloc"); end_log_file();
#endif
exit(1);
}
for(i=0; i<dim_cpu.space_elem; i=i+1) {
return_value[0] = fread(&((*rv_cpu)[i].v), 1, sizeof(double), fp);
return_value[1] = fread(&((*rv_cpu)[i].x), 1, sizeof(double), fp);
return_value[2] = fread(&((*rv_cpu)[i].y), 1, sizeof(double), fp);
return_value[3] = fread(&((*rv_cpu)[i].z), 1, sizeof(double), fp);
if (return_value[0] == 0 || return_value[1] == 0 || return_value[2] == 0 || return_value[3] == 0) {
printf("error reading rv_cpu from file\n");
#ifdef LOGS
log_error_detail("error reading rv_cpu from file"); end_log_file();
#endif
exit(1);
}
}
fclose(fp);
// input (charge)
if( (fp = fopen(input_charges, "rb" )) == 0 ) {
printf( "The file 'input_charges' was not opened\n" ); exit(EXIT_FAILURE);
}
*qv_cpu = (double*)malloc(dim_cpu.space_mem2);
if(*qv_cpu == NULL) {
printf("error qv_cpu malloc\n");
#ifdef LOGS
log_error_detail("error qv_cpu malloc"); end_log_file();
#endif
exit(1);
}
for(i=0; i<dim_cpu.space_elem; i=i+1) {
return_value[0] = fread(&((*qv_cpu)[i]), 1, sizeof(double), fp);
if (return_value[0] == 0) {
printf("error reading qv_cpu from file\n");
#ifdef LOGS
log_error_detail("error reading qv_cpu from file"); end_log_file();
#endif
exit(1);
}
}
fclose(fp);
// =============== Fault injection
if (fault_injection) {
(*qv_cpu)[2] = 0.732637263; // must be in range 0.1 - 1.0
printf("!!> Fault injection: qv_cpu[2]=%f\n", (*qv_cpu)[2]);
}
// ========================
}
void readGold(dim_str dim_cpu, char *output_gold, FOUR_VECTOR **fv_cpu_GOLD)
{
FILE *fp;
size_t return_value[4];
int i;
if( (fp = fopen(output_gold, "rb" )) == 0 )
{
printf( "The file 'output_forces' was not opened\n" ); exit(EXIT_FAILURE);
}
*fv_cpu_GOLD = (FOUR_VECTOR*)malloc(dim_cpu.space_mem);
if(*fv_cpu_GOLD == NULL) {
printf("error fv_cpu_GOLD malloc\n");
#ifdef LOGS
log_error_detail("error fv_cpu_GOLD malloc"); end_log_file();
#endif
exit(1);
}
for(i=0; i<dim_cpu.space_elem; i=i+1) {
return_value[0] = fread(&((*fv_cpu_GOLD)[i].v), 1, sizeof(double), fp);
return_value[1] = fread(&((*fv_cpu_GOLD)[i].x), 1, sizeof(double), fp);
return_value[2] = fread(&((*fv_cpu_GOLD)[i].y), 1, sizeof(double), fp);
return_value[3] = fread(&((*fv_cpu_GOLD)[i].z), 1, sizeof(double), fp);
if (return_value[0] == 0 || return_value[1] == 0 || return_value[2] == 0 || return_value[3] == 0) {
printf("error reading rv_cpu from file\n");
#ifdef LOGS
log_error_detail("error reading rv_cpu from file"); end_log_file();
#endif
exit(1);
}
}
fclose(fp);
}
void writeGold(dim_str dim_cpu, char *output_gold, FOUR_VECTOR **fv_cpu)
{
FILE *fp;
int i;
if( (fp = fopen(output_gold, "wb" )) == 0 ) {
printf( "The file 'output_forces' was not opened\n" ); exit(EXIT_FAILURE);
}
int number_zeros = 0;
for(i=0; i<dim_cpu.space_elem; i=i+1) {
if((*fv_cpu)[i].v == 0.0)
number_zeros++;
if((*fv_cpu)[i].x == 0.0)
number_zeros++;
if((*fv_cpu)[i].y == 0.0)
number_zeros++;
if((*fv_cpu)[i].z == 0.0)
number_zeros++;
fwrite(&((*fv_cpu)[i].v), 1, sizeof(double), fp);
fwrite(&((*fv_cpu)[i].x), 1, sizeof(double), fp);
fwrite(&((*fv_cpu)[i].y), 1, sizeof(double), fp);
fwrite(&((*fv_cpu)[i].z), 1, sizeof(double), fp);
}
fclose(fp);
}
//=============================================================================
// MAIN FUNCTION
//=============================================================================
int main(int argc, char *argv []) {
//=====================================================================
// CPU/MCPU VARIABLES
//=====================================================================
// timer
double timestamp;
// counters
int i, j, k, l, m, n;
int iterations;
int generate, verbose, fault_injection;
// system memory
par_str par_cpu;
dim_str dim_cpu;
box_str* box_cpu;
FOUR_VECTOR* rv_cpu;
double* qv_cpu;
FOUR_VECTOR* fv_cpu;
FOUR_VECTOR* fv_cpu_GOLD;
int nh;
int nstreams, streamIdx;
cudaError_t cuda_error;
const char *error_string;
char *input_distances, *input_charges, *output_gold;
int number_nn = 0;
//=====================================================================
// CHECK INPUT ARGUMENTS
//=====================================================================
getParams(argc, argv, &dim_cpu.boxes1d_arg, &generate, &input_distances, &input_charges, &output_gold, &iterations, &verbose, &fault_injection, &nstreams);
char test_info[200];
snprintf(test_info, 200, "type:double-precision streams:%d boxes:%d block_size:%d", nstreams, dim_cpu.boxes1d_arg, NUMBER_THREADS);
printf("%s\n", test_info);
#ifdef LOGS
if (!generate) start_log_file("cudaDLavaMD", test_info);
#endif
//=====================================================================
// INPUTS
//=====================================================================
par_cpu.alpha = 0.5;
//=====================================================================
// DIMENSIONS
//=====================================================================
// total number of boxes
dim_cpu.number_boxes = dim_cpu.boxes1d_arg * dim_cpu.boxes1d_arg * dim_cpu.boxes1d_arg;
// how many particles space has in each direction
dim_cpu.space_elem = dim_cpu.number_boxes * NUMBER_PAR_PER_BOX;
dim_cpu.space_mem = dim_cpu.space_elem * sizeof(FOUR_VECTOR);
dim_cpu.space_mem2 = dim_cpu.space_elem * sizeof(double);
// box array
dim_cpu.box_mem = dim_cpu.number_boxes * sizeof(box_str);
//=====================================================================
// SYSTEM MEMORY
//=====================================================================
//=====================================================================
// BOX
//=====================================================================
// allocate boxes
box_cpu = (box_str*)malloc(dim_cpu.box_mem);
if(box_cpu == NULL) {
printf("error box_cpu malloc\n");
#ifdef LOGS
if (!generate) log_error_detail("error box_cpu malloc"); end_log_file();
#endif
exit(1);
}
// initialize number of home boxes
nh = 0;
// home boxes in z direction
for(i=0; i<dim_cpu.boxes1d_arg; i++) {
// home boxes in y direction
for(j=0; j<dim_cpu.boxes1d_arg; j++) {
// home boxes in x direction
for(k=0; k<dim_cpu.boxes1d_arg; k++) {
// current home box
box_cpu[nh].x = k;
box_cpu[nh].y = j;
box_cpu[nh].z = i;
box_cpu[nh].number = nh;
box_cpu[nh].offset = nh * NUMBER_PAR_PER_BOX;
// initialize number of neighbor boxes
box_cpu[nh].nn = 0;
// neighbor boxes in z direction
for(l=-1; l<2; l++) {
// neighbor boxes in y direction
for(m=-1; m<2; m++) {
// neighbor boxes in x direction
for(n=-1; n<2; n++) {
// check if (this neighbor exists) and (it is not the same as home box)
if( (((i+l)>=0 && (j+m)>=0 && (k+n)>=0)==true && ((i+l)<dim_cpu.boxes1d_arg && (j+m)<dim_cpu.boxes1d_arg && (k+n)<dim_cpu.boxes1d_arg)==true) &&
(l==0 && m==0 && n==0)==false ) {
// current neighbor box
box_cpu[nh].nei[box_cpu[nh].nn].x = (k+n);
box_cpu[nh].nei[box_cpu[nh].nn].y = (j+m);
box_cpu[nh].nei[box_cpu[nh].nn].z = (i+l);
box_cpu[nh].nei[box_cpu[nh].nn].number = (box_cpu[nh].nei[box_cpu[nh].nn].z * dim_cpu.boxes1d_arg * dim_cpu.boxes1d_arg) +
(box_cpu[nh].nei[box_cpu[nh].nn].y * dim_cpu.boxes1d_arg) + box_cpu[nh].nei[box_cpu[nh].nn].x;
box_cpu[nh].nei[box_cpu[nh].nn].offset = box_cpu[nh].nei[box_cpu[nh].nn].number * NUMBER_PAR_PER_BOX;
// increment neighbor box
box_cpu[nh].nn = box_cpu[nh].nn + 1;
number_nn += box_cpu[nh].nn;
}
} // neighbor boxes in x direction
} // neighbor boxes in y direction
} // neighbor boxes in z direction
// increment home box
nh = nh + 1;
} // home boxes in x direction
} // home boxes in y direction
} // home boxes in z direction
//=====================================================================
// PARAMETERS, DISTANCE, CHARGE AND FORCE
//=====================================================================
if (generate) {
generateInput(dim_cpu, input_distances, &rv_cpu, input_charges, &qv_cpu);
} else {
readInput(dim_cpu, input_distances, &rv_cpu, input_charges, &qv_cpu, fault_injection);
readGold(dim_cpu, output_gold, &fv_cpu_GOLD);
}
//=====================================================================
// EXECUTION PARAMETERS
//=====================================================================
dim3 threads;
dim3 blocks;
blocks.x = dim_cpu.number_boxes;
blocks.y = 1;
// define the number of threads in the block
threads.x = NUMBER_THREADS;
threads.y = 1;
cudaStream_t *streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t));
//LOOP START
int loop;
for(loop=0; loop<iterations; loop++) {
if (verbose) {
printf("[Iteration #%i]=====================================\n", loop); fflush(stdout);
}
double globaltimer = mysecond();
timestamp = mysecond();
// prepare host memory to receive kernel output
// output (forces)
fv_cpu = (FOUR_VECTOR*)malloc(dim_cpu.space_mem);
if(fv_cpu == NULL) {
printf("error fv_cpu malloc\n");
#ifdef LOGS
if (!generate) log_error_detail("error fv_cpu malloc"); end_log_file();
#endif
exit(1);
}
for(i=0; i<dim_cpu.space_elem; i=i+1) {
// set to 0, because kernels keeps adding to initial value
fv_cpu[i].v = 0;
fv_cpu[i].x = 0;
fv_cpu[i].y = 0;
fv_cpu[i].z = 0;
}
//=====================================================================
// GPU_CUDA
//=====================================================================
//=====================================================================
// VARIABLES
//=====================================================================
box_str* d_box_gpu[nstreams];
FOUR_VECTOR* d_rv_gpu[nstreams];
double* d_qv_gpu[nstreams];
FOUR_VECTOR* d_fv_gpu[nstreams];
//=====================================================================
// GPU SETUP
//=====================================================================
for (streamIdx = 0; streamIdx < nstreams; streamIdx++) {
cudaStreamCreateWithFlags(&(streams[streamIdx]), cudaStreamNonBlocking);
//==================================================
// boxes
//==================================================
#ifdef SAFE_MALLOC
cuda_error = cudaSuccess;
safe_cuda_malloc_cover((void **)&(d_box_gpu[streamIdx]), dim_cpu.box_mem);
#else
cuda_error = cudaMalloc( (void **)&(d_box_gpu[streamIdx]), dim_cpu.box_mem);
#endif
error_string = cudaGetErrorString(cuda_error);
if(strcmp(error_string, "no error") != 0) {
printf("error d_box_gpu cudaMalloc\n");
#ifdef LOGS
if (!generate) log_error_detail("error d_box_gpu cudamalloc"); end_log_file();
#endif
exit(1);
}
//==================================================
// rv
//==================================================
#ifdef SAFE_MALLOC
cuda_error = cudaSuccess;
safe_cuda_malloc_cover( (void **)&(d_rv_gpu[streamIdx]), dim_cpu.space_mem);
#else
cuda_error = cudaMalloc( (void **)&(d_rv_gpu[streamIdx]), dim_cpu.space_mem);
#endif
error_string = cudaGetErrorString(cuda_error);
if(strcmp(error_string, "no error") != 0) {
printf("error d_rv_gpu cudaMalloc\n");
#ifdef LOGS
if (!generate) log_error_detail("error d_box_gpu cudamalloc"); end_log_file();
#endif
exit(1);
}
//==================================================
// qv
//==================================================
#ifdef SAFE_MALLOC
cuda_error = cudaSuccess;
cudaMalloc( (void **)&(d_qv_gpu[streamIdx]), dim_cpu.space_mem2);
#else
cuda_error = cudaMalloc( (void **)&(d_qv_gpu[streamIdx]), dim_cpu.space_mem2);
#endif
error_string = cudaGetErrorString(cuda_error);
if(strcmp(error_string, "no error") != 0) {
printf("error d_qv_gpu cudaMalloc\n");
#ifdef LOGS
if (!generate) log_error_detail("error d_box_gpu cudamalloc"); end_log_file();
#endif
exit(1);
}
//==================================================
// fv
//==================================================
#ifdef SAFE_MALLOC
cuda_error = cudaSuccess;
safe_cuda_malloc_cover( (void **)&(d_fv_gpu[streamIdx]), dim_cpu.space_mem);
#else
cuda_error = cudaMalloc( (void **)&(d_fv_gpu[streamIdx]), dim_cpu.space_mem);
#endif
error_string = cudaGetErrorString(cuda_error);
if(strcmp(error_string, "no error") != 0) {
printf("error d_fv_gpu cudaMalloc\n");
#ifdef LOGS
if (!generate) log_error_detail("error d_box_gpu cudamalloc"); end_log_file();
#endif
exit(1);
}
//=====================================================================
// GPU MEMORY COPY
//=====================================================================
//==================================================
// boxes
//==================================================
cuda_error = cudaMemcpy(d_box_gpu[streamIdx], box_cpu, dim_cpu.box_mem, cudaMemcpyHostToDevice);
error_string = cudaGetErrorString(cuda_error);
if(strcmp(error_string, "no error") != 0) {
printf("error load d_boc_gpu\n");
#ifdef LOGS
if (!generate) log_error_detail("error load d_box_gpu"); end_log_file();
#endif
exit(1);
}
//==================================================
// rv
//==================================================
cuda_error = cudaMemcpy( d_rv_gpu[streamIdx], rv_cpu, dim_cpu.space_mem, cudaMemcpyHostToDevice);
error_string = cudaGetErrorString(cuda_error);
if(strcmp(error_string, "no error") != 0) {
printf("error load d_rv_gpu\n");
#ifdef LOGS
if (!generate) log_error_detail("error load d_box_gpu"); end_log_file();
#endif
exit(1);
}
//==================================================
// qv
//==================================================
cuda_error = cudaMemcpy( d_qv_gpu[streamIdx], qv_cpu, dim_cpu.space_mem2, cudaMemcpyHostToDevice);
error_string = cudaGetErrorString(cuda_error);
if(strcmp(error_string, "no error") != 0) {
printf("error load d_qv_gpu\n");
#ifdef LOGS
if (!generate) log_error_detail("error load d_box_gpu"); end_log_file();
#endif
exit(1);
}
//==================================================
// fv
//==================================================
cuda_error = cudaMemcpy( d_fv_gpu[streamIdx], fv_cpu, dim_cpu.space_mem, cudaMemcpyHostToDevice);
error_string = cudaGetErrorString(cuda_error);
if(strcmp(error_string, "no error") != 0) {
printf("error load d_fv_gpu\n");
#ifdef LOGS
if (!generate) log_error_detail("error load d_box_gpu"); end_log_file();
#endif
exit(1);
}
}
if (verbose) printf("[Iteration #%i] Setup prepare time: %.4fs\n", loop, mysecond()-timestamp);
//=====================================================================
// KERNEL
//=====================================================================
double kernel_time=mysecond();
#ifdef LOGS
if (!generate) start_iteration();
#endif
// launch kernel - all boxes
for (streamIdx = 0; streamIdx < nstreams; streamIdx++) {
kernel_gpu_cuda<<<blocks, threads, 0, streams[streamIdx]>>>( par_cpu, dim_cpu, \
d_box_gpu[streamIdx], d_rv_gpu[streamIdx], d_qv_gpu[streamIdx], d_fv_gpu[streamIdx]);
checkCudaErrors( cudaPeekAtLastError() );
}
//printf("All kernels were commited.\n");
for (streamIdx = 0; streamIdx < nstreams; streamIdx++) {
cuda_error = cudaStreamSynchronize(streams[streamIdx]);
error_string = cudaGetErrorString(cuda_error);
if(strcmp(error_string, "no error") != 0) {
printf("error logic: %s\n",error_string);
#ifdef LOGS
if (!generate) log_error_detail("error logic:"); end_log_file();
#endif
exit(1);
}
checkCudaErrors( cudaPeekAtLastError() );
}
#ifdef LOGS
if (!generate) end_iteration();
#endif
kernel_time = mysecond()-kernel_time;
//=====================================================================
// COMPARE OUTPUTS / WRITE GOLD
//=====================================================================
if (generate){
cuda_error = cudaMemcpy( fv_cpu, d_fv_gpu[0], dim_cpu.space_mem, cudaMemcpyDeviceToHost);
error_string = cudaGetErrorString(cuda_error);
if(strcmp(error_string, "no error") != 0) {
printf("error download fv_cpu\n");
exit(1);
}
writeGold(dim_cpu, output_gold, &fv_cpu);
} else { // Check gold
//int ea = 0;
int thread_error = 0;
int kernel_errors = 0;
char error_detail[300];
timestamp = mysecond();
for (streamIdx = 0; streamIdx < nstreams; streamIdx++) {
//=====================================================================
// GPU MEMORY COPY BACK
//=====================================================================
cuda_error = cudaMemcpy( fv_cpu, d_fv_gpu[streamIdx], dim_cpu.space_mem, cudaMemcpyDeviceToHost);
error_string = cudaGetErrorString(cuda_error);
if(strcmp(error_string, "no error") != 0) {
printf("error download fv_cpu\n");
#ifdef LOGS
if (!generate) log_error_detail("error download fv_cpu"); end_log_file();
#endif
exit(1);
}
#pragma omp parallel for
for(i=0; i<dim_cpu.space_elem; i=i+1) {
if(fv_cpu_GOLD[i].v != fv_cpu[i].v) {
thread_error++;
}
if(fv_cpu_GOLD[i].x != fv_cpu[i].x) {
thread_error++;
}
if(fv_cpu_GOLD[i].y != fv_cpu[i].y) {
thread_error++;
}
if(fv_cpu_GOLD[i].z != fv_cpu[i].z) {
thread_error++;
}
if (thread_error > 0) {
#pragma omp critical
{
kernel_errors++;
snprintf(error_detail, 300, "stream: %d, p: [%d], ea: %d, v_r: %1.16e, v_e: %1.16e, x_r: %1.16e, x_e: %1.16e, y_r: %1.16e, y_e: %1.16e, z_r: %1.16e, z_e: %1.16e\n", streamIdx, \
i, thread_error, fv_cpu[i].v, fv_cpu_GOLD[i].v, fv_cpu[i].x, fv_cpu_GOLD[i].x, fv_cpu[i].y, fv_cpu_GOLD[i].y, fv_cpu[i].z, fv_cpu_GOLD[i].z);
if (kernel_errors<25) printf("ERROR: %s\n", error_detail);
if (kernel_errors>=25) printf("!");
#ifdef LOGS
if (!generate) log_error_detail(error_detail);
#endif
thread_error = 0;
}
}
}
}
#ifdef LOGS
if (!generate) log_error_count(kernel_errors);
#endif
if (verbose) printf("[Iteration #%i] Gold check time: %f\n", loop, mysecond() - timestamp);
}
//================= PERF
// iterate for each neighbor of a box (number_nn)
double flop = number_nn;
// The last for iterate NUMBER_PAR_PER_BOX times
flop *= NUMBER_PAR_PER_BOX;
// the last for uses 46 operations plus 2 exp() functions
flop *=46;
flop *= nstreams;
double flops = (double)flop/kernel_time;
double outputpersec = (double)dim_cpu.space_elem * 4 * nstreams / kernel_time;
if (verbose) printf("[Iteration #%i] BOXES:%d BLOCK:%d OUTPUT/S:%.2f FLOPS:%.2f (GFLOPS:%.2f)\n", loop, dim_cpu.boxes1d_arg, NUMBER_THREADS, outputpersec, flops, flops/1000000000);
if (verbose) printf("[Iteration #%i] kernel_time:%f\n", loop, kernel_time);
//=====================
printf(".");
fflush(stdout);
//=====================================================================
// GPU MEMORY DEALLOCATION
//=====================================================================
for (streamIdx = 0; streamIdx < nstreams; streamIdx++) {
cudaFree(d_rv_gpu[streamIdx]);
cudaFree(d_qv_gpu[streamIdx]);
cudaFree(d_fv_gpu[streamIdx]);
cudaFree(d_box_gpu[streamIdx]);
}
//=====================================================================
// SYSTEM MEMORY DEALLOCATION
//=====================================================================
free(fv_cpu);
if (verbose) printf("[Iteration #%i] Elapsed time: %.4fs\n", loop, mysecond()-globaltimer);
}
if (!generate) free(fv_cpu_GOLD);
free(rv_cpu);
free(qv_cpu);
free(box_cpu);
printf("\n");
#ifdef LOGS
if (!generate) end_log_file();
#endif
return 0;
}
|
f7b6323e065a5fd52591d1308de829420e44d14c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Kernel.h"
#include <stdio.h>
#include <math.h>
#include <float.h>
#include "IPTools.h"
#include <math.h>
#include <algorithm>
#include "Common.h"
#ifndef min
#define min(a,b) (((a) < (b)) ? (a) : (b))
#endif
#ifndef max
#define max(a,b) (((a) > (b)) ? (a) : (b))
#endif
// Texture reference for 2D float texture
texture<float, 2, hipReadModeElementType> tex_rot_imgA;
//-----------------------------------------------------------------------------
__device__ double getInterpolatedPixelA(double x, double y, int iWidth, int iHeight, float* pixels)
{
int xbase = (int)x;
int ybase = (int)y;
xbase = xbase < iWidth ? xbase : iWidth - 1;// min(xbase, iWidth - 1);
ybase = ybase < iHeight ? ybase : iHeight - 1;// (ybase, iHeight - 1);
//if (xbase >= iWidth ybase >= iHeight )
// return 1;
double xFraction = x - xbase;
double yFraction = y - ybase;
int offset = ybase * iWidth + xbase;
double lowerLeft = pixels[offset];
double lowerRight = xbase == iWidth - 1 ? pixels[offset] : pixels[offset + 1];
double upperRight = (xbase == iWidth - 1 || ybase == iHeight - 1) ? pixels[offset] : pixels[offset + iWidth + 1];
double upperLeft = ybase == iHeight - 1 ? pixels[offset] : pixels[offset + iWidth];
double upperAverage = upperLeft;
if (xFraction != 0.0)
upperAverage += xFraction * (upperRight - upperLeft);
double lowerAverage = lowerLeft;
if (xFraction != 0.0)
lowerAverage += xFraction * (lowerRight - lowerLeft);
if (yFraction == 0.0)
return lowerAverage;
else
return lowerAverage + yFraction * (upperAverage - lowerAverage);
}
//-------------------------------------------------------------------
__global__ void RotateImageA_tex_kernel(float *outputImagekernel, int inWidth, int inHeight, int outWidth, int outHeight, double theta)
{
// Set row and colum for thread.
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float rads = (theta) * 3.1415926 / 180.0;
float u = (float)col - (float)outWidth / 2;
float v = (float)row - (float)outHeight / 2;
float tu = u * cosf(rads) - v * sinf(rads);
float tv = v * cosf(rads) + u * sinf(rads);
tu /= (float)inWidth;
tv /= (float)inHeight;
if (col < outWidth && row < outHeight)
outputImagekernel[row*outWidth + col] = tex2D(tex_rot_imgA, tu + 0.5f, tv + 0.5f);
}
//-------------------------------------------------------------------
__global__ void RotateImageA_kernel(float *inImagekernel, float *outputImagekernel, int inWidth, int inHeight, int outWidth, int outHeight, double theta, double fMag)
{
// Set row and colum for thread.
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float rads = (theta) * 3.1415926 / 180.0;
float u = (float)col - (float)outWidth / 2;
float v = (float)row - (float)outHeight / 2;
float tu = u * cosf(rads) - v * sinf(rads);
float tv = v * cosf(rads) + u * sinf(rads);
//tu /= (float)inWidth;
//tv /= (float)inHeight;
tu *= fMag;
tv *= fMag;
tu = (float)(tu + (float)inWidth / 2.0f);
tv = (float)(tv + (float)inHeight / 2.0f);
if (/*col < outWidth && row < outHeight &&*/ tu >= 0 && tu < inWidth && tv >= 0 && tv < inHeight)
{
outputImagekernel[row*outWidth + col] = inImagekernel[((int)tv * inWidth) + (int)tu];//tex2D(tex_rot_imgA, tu + inWidth/2 , tv + inHeight/2);
//outputImagekernel[row*outWidth + col] = getInterpolatedPixelA(tu, tv, inWidth, inHeight, inImagekernel);
}
}
//---------------------------
__global__ void RotateImageA_usingLUT_kernel(float *inImagekernel, float *outputImagekernel, int inWidth, int inHeight, int outWidth, int outHeight, double theta, int *pLut)
{
// Set row and colum for thread.
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int offset = row * outWidth + col;
if (col < outWidth && row < outHeight )
{
if ( pLut[offset] >= 0 )
outputImagekernel[offset] = inImagekernel[pLut[offset]];
}
}
__global__ void SmoothBorder_kernel(float *inImagekernel, float *outputImagekernel, unsigned char *pMaskData, int iWidth, int iHeight, int iWin)
{
// Set row and colum for thread.
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int iOffset = row * iWidth + col;
outputImagekernel[iOffset] = inImagekernel[iOffset] * inImagekernel[iOffset];
//if (pMaskData[iOffset] > 0)
//{
// int count = 0;
// double fSum = 0.0;
// for (int j = -iWin; j <= iWin; j++)
// {
// for (int i = -iWin; i <= iWin; i++)
// {
// int iNewX = col + i;
// int iNewY = row + j;
// if (iNewX >= 0 && iNewX < iWidth && iNewY >= 0 && iNewY < iHeight)
// {
// fSum += (double)inImagekernel[iNewY*iWidth + iNewX];
// count++;
// }
// }
// }
// if (count > 0)
// {
// outputImagekernel[iOffset] = (float)(fSum / (double)count);
// }
//}
}
//-------------------------------------------------------------------
__global__ void TDS_AddA_kernel(float *inFrameData, float *outputImageData, int inWidth, int inHeight)
{
// Set row and colum for thread.
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int offset = row * inWidth + col;
if ( col < inWidth && row < inHeight)
outputImageData[offset] += inFrameData[offset];
//__syncthreads();
}
//-------------------------------------------------------------------
__global__ void TDS_AddA_conditional_kernel(float *inFrameData, float *outputImageData, int inWidth, int inHeight)
{
// Set row and colum for thread.
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float pVals[2];
int offset = row * inWidth + col;
if (col < inWidth && row < inHeight)
{
float fVal = inFrameData[offset];
if ( fVal > 0.0f)
outputImageData[offset] += fVal;
}
}
//-------------------------------------------------------------------
__global__ void SetValues_kernel(float *inFrameData, float iVal, int inWidth, int inHeight)
{
// Set row and colum for thread.
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col < inWidth && row < inHeight)
inFrameData[row*inWidth + col] = iVal;
}
//-------------------------------------------------------------------
__global__ void Memcpy_us_to_float__kernel(unsigned short *pInData, float *pOutData, int inWidth, int inHeight)
{
// Set row and colum for thread.
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col < inWidth && row < inHeight)
pOutData[row*inWidth + col] = pInData[row*inWidth + col];
}
//-------------------------------------------------------------------
__global__ void Memcpy_any_to_float__kernel(unsigned char *pInData, float *pOutData, int inWidth, int inHeight, int pixType)
{
// Set row and colum for thread.
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col < inWidth && row < inHeight)
{
switch (pixType)
{
case 1:
pOutData[row*inWidth + col] = (float)*((unsigned char*)&pInData[row*inWidth + col]);
break;
case 2:
pOutData[row*inWidth + col] = (float)*((unsigned short*)&pInData[row*inWidth*2 + col*2]);
break;
case 4:
pOutData[row*inWidth + col] = (float)*((unsigned int*)&pInData[row*inWidth*4 + col*4]);
break;
case 6:
pOutData[row*inWidth + col] = (float)*((float*)&pInData[row*inWidth*4 + col*4]);
break;
};
}
}
//----------------------------------------------------------------------------
unsigned char *GetMaskDataAfterRotation(int iW, int iH, double theta, double fMag, int &iNewW, int &iNewH)
{
unsigned char *pMaskData = NULL;
//int iNewW=0, iNewH = 0;
FindDimensionAfterRotation(iW, iH, theta, fMag, iNewW, iNewH);
if (iNewW <= 0 || iNewH <= 0) return NULL;
pMaskData = new unsigned char[iNewW*iNewH];
unsigned char *pTempData = new unsigned char[iW*iH];
unsigned char *pRotatedTempData = new unsigned char[iNewW*iNewH];
memset(pRotatedTempData, 0, iNewW*iNewH);
for (long i = 0; i < iW*iH; i++)
pTempData[i] = 1;
unsigned char *pMaks_rot_eroded = new unsigned char[iNewW*iNewH];
memset(pMaks_rot_eroded, 0, iNewW*iNewH);
IPTools<unsigned char>::RotateImage_cpu(pTempData, iW, iH, pRotatedTempData, iNewW, iNewH, theta, 0);
IPTools<unsigned char>::DoErosion(pRotatedTempData, pMaks_rot_eroded, iNewW, iNewH, 5);
float *pDisData = new float[iNewW*iNewH];
IPTools<unsigned char>::GetDistanceMap(pMaks_rot_eroded, iNewW, iNewH, 0, pDisData);
//WriteRawData<unsigned char>("c:\\Temp\\Dist.raw", pMaks_rot_eroded, iNewW, iNewH);
memset(pMaskData, 0, 1 * iNewW*iNewH);
for (long i = 0; i < iNewW*iNewH; i++)
{
float fVal = pDisData[i];
if (fVal > 0.0f && fVal < 6.0f)
pMaskData[i] = 1;
}
delete[] pTempData;
delete[] pRotatedTempData;
delete[] pMaks_rot_eroded;
delete[] pDisData;
return pMaskData;
}
//--------------------------------------------------------------------
void RotateImage_GetLUT_cpu(int iW, int iH, int *pLut, int iOutWidth, int iOutHeight, double theta, double fMagnification)
{
for (long i = 0; i < iOutWidth * iOutHeight; i++) pLut[i] = -1;
long iInFrameSize = iW * iH;
double fMag = 1.0 / fMagnification;
float rads = (theta) * 3.1415926 / 180.0;
float cs = cos(rads); // precalculate these values
float ss = sin(rads);
float xcenterOut = (float)(iOutWidth) / 2.0; // use float here!
float ycenterOut = (float)(iOutHeight) / 2.0;
float xcenterIn = (float)iW / 2.0f;
float ycenterIn = (float)iH / 2.0f;
for (int row = 0; row < iOutHeight; row++)
{
for (int col = 0; col < iOutWidth; col++)
{
float u = (float)col - xcenterOut;
float v = (float)row - ycenterOut;
float tu = u * cs - v * ss;
float tv = v * cs + u * ss;
tu *= fMag;
tv *= fMag;
tu += xcenterIn;
tv += ycenterIn;
//tu += (iOutWidth - iW) / 2;
//tu += (iOutHeight - iH) / 2;
if (tu >= 0 && tu < iW && tv >= 0 && tv < iH)
{
//pOutData[row*iOutWidth + col] = getInterpolatedPixel_TF(tu, tv, iW, iH, pData);
long offset = (int)tv*iW + (int)tu;
// pLut[offset] = row*iOutWidth + col;
pLut[row*iOutWidth + col] = offset;
}
}
}
}
//--------------------------------------------------------------------------------
//hipError_t RotateAddImage_Cuda(unsigned short* pInData, int inWidth, int inHeight, int iNumFrames, unsigned short *pOutData, int outWidth, int outHeight, double theta, double fScale, double fMag)
//{
//
// hipArray *cuArray_img;
// float *d_OutData = 0;
// unsigned short *d_InData_us = 0;
// float *d_InData = 0;
// float *d_RotatedFrameData = 0;
// unsigned char *d_pMaskData = 0;
// float *pTempInData = 0;
// float *pTempOutData = 0;
// int *pLut = 0;
// int *d_pLut = 0;
// unsigned char *pMaskData = NULL;
//
// int iFrameSize = inWidth * inHeight;
// int iOutFrameSize = outWidth * outHeight;
//
// pTempInData = new float[iFrameSize];
// pTempOutData = new float[iOutFrameSize];
//
//
// hipError_t cudaStatus = hipErrorInvalidValue;
//
// int iRotWidth, int iRotHeight;
// FindDimensionAfterRotation(inWidth, inHeight, theta, fMag, iRotWidth, iRotHeight);
//
// int iRotatedFrameSize = iRotWidth * iRotHeight;
//
// pLut = new int[iRotatedFrameSize];
// RotateImage_GetLUT_cpu(inWidth, inHeight, pLut, iRotWidth, iRotHeight, theta, fMag);
//
// //Get mask data to cover arround the edges after rotation
// int iNewMaskW = 0, iNewMaskH = 0;
// pMaskData = GetMaskDataAfterRotation(inWidth, inHeight, theta, fMag, iNewMaskW, iNewMaskH);
// if (iNewMaskW != iRotWidth || iNewMaskH != iRotHeight)
// {
// printf("dimension mismatch when creating mask\n");
// goto Error;
// }
// //WriteRawData<unsigned char>("c:\\Temp\\MaskData.raw", pMaskData, iNewMaskW, iNewMaskH);
//
// printf("Rotaed dim %d %d %d %d\n", iRotWidth, iRotHeight, outWidth, outHeight);
// // Choose which GPU to run on, change this on a multi-GPU system.
// //cudaStatus = hipSetDevice(0);
// //if (cudaStatus != hipSuccess) {
// // fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
// // goto Error;
// //}
//
// // Allocate GPU buffers for three vectors (two input, one output) .
// //imput image text array
//// hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
//// hipMallocArray(&cuArray_img, &channelDesc, inWidth, inHeight);
////// hipMemcpyToArray(cuArray_img, 0, 0, pInData, iFrameSize * sizeof(unsigned short), hipMemcpyHostToDevice);
//// // Set texture parameters
//// tex_rot_imgA.addressMode[0] = hipAddressModeBorder;// ModeWrap;
//// tex_rot_imgA.addressMode[1] = hipAddressModeBorder;
//// tex_rot_imgA.filterMode = hipFilterModeLinear;
//// tex_rot_imgA.normalized = true; // access with normalized texture coordinates
//// hipBindTextureToArray(tex_rot_imgA, cuArray_img, channelDesc);
//
//
//
// cudaStatus = hipMalloc((void**)&d_OutData, iOutFrameSize * sizeof(float));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
//
// cudaStatus = hipMalloc((void**)&d_InData_us, iFrameSize * sizeof(unsigned short));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
//
// cudaStatus = hipMalloc((void**)&d_InData, iFrameSize * sizeof(float));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
//
// cudaStatus = hipMalloc((void**)&d_RotatedFrameData, iRotatedFrameSize * sizeof(float)*2);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
//
// cudaStatus = hipMalloc((void**)&d_pLut, iRotatedFrameSize * sizeof(int));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
// hipMemcpy(d_pLut, pLut, iRotatedFrameSize * sizeof(int), hipMemcpyHostToDevice);
//
// cudaStatus = hipMalloc((void**)&d_pMaskData, iRotatedFrameSize * sizeof(unsigned char) );
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
//// hipMemcpy(d_pMaskData, pMaskData, iRotatedFrameSize * sizeof(unsigned char), hipMemcpyHostToDevice);
//
// int TILE_SIZE_X = 16;
// int TILE_SIZE_Y = 16;
// dim3 dimBlock(TILE_SIZE_X, TILE_SIZE_Y);
//
// dim3 dimGrid((int)ceil((float)iRotWidth / (float)TILE_SIZE_X), (int)ceil((float)iRotHeight / (float)TILE_SIZE_Y));
//
// dim3 dimGrid_in((int)ceil((float)inWidth / (float)TILE_SIZE_X), (int)ceil((float)inHeight / (float)TILE_SIZE_Y));
//
// dim3 dimGrid_out((int)ceil((float)outWidth / (float)TILE_SIZE_X), (int)ceil((float)outHeight / (float)TILE_SIZE_Y));
//
// double fShiftRow = 0.0;
// bool bReversed = false;
// int iCurIndex = 0;
// int iPrevIndex = -1;
// SetValues_kernel << <dimGrid_out, dimBlock >> > (d_OutData, 0, outWidth, outHeight);
//
//
// for (int iZ = 0; iZ < iNumFrames /*&&iZ<30*/ ; iZ++)
// {
// iCurIndex = (int)fShiftRow*outWidth;
// if (iPrevIndex != iCurIndex)
// {
//
//
// int iZIndex = bReversed ? iZ : iNumFrames - 1 - iZ;
//
// //////////////////
// //unsigned short *pInDataRef = &pInData[iZIndex * iFrameSize];
// //for (int k = 0; k < iFrameSize; k++)
// // pTempInData[k] = (float)pInDataRef[k];
// //hipMemcpy(d_InData, pTempInData, iFrameSize * sizeof(float), hipMemcpyHostToDevice);
// /////////////////////
//
// hipMemcpy(d_InData_us, &pInData[iZIndex * iFrameSize], iFrameSize * sizeof(unsigned short), hipMemcpyHostToDevice);
// Memcpy_us_to_float__kernel << <dimGrid_in, dimBlock>> > (d_InData_us, d_InData, inWidth, inHeight);
//
//
// SetValues_kernel << <dimGrid, dimBlock >> > (d_RotatedFrameData, 0, iRotWidth, iRotHeight);
// RotateImageA_kernel << <dimGrid, dimBlock >> > (d_InData, &d_RotatedFrameData[0], inWidth, inHeight, iRotWidth, iRotHeight, theta, 1.0 / fMag);
//
//
//
// // RotateImageA_usingLUT_kernel << <dimGrid, dimBlock >> > (d_InData, &d_RotatedFrameData[0], inWidth, inHeight, iRotWidth, iRotHeight, theta, d_pLut);
// //SmoothBorder_kernel << <dimGrid, dimBlock >> > (&d_RotatedFrameData[0], &d_RotatedFrameData[iRotatedFrameSize], d_pMaskData, iRotWidth, iRotHeight, 3);
// TDS_AddA_kernel << <dimGrid, dimBlock >> > (&d_RotatedFrameData[0], &d_OutData[iCurIndex], iRotWidth, iRotHeight);
//
// hipDeviceSynchronize();
// }
// fShiftRow += fScale;
// if (fShiftRow >= outHeight) break;
// iPrevIndex = iCurIndex;
// }
//
//
// // Check for any errors launching the kernel
// cudaStatus = hipGetLastError();
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
// goto Error;
// }
//
// // hipDeviceSynchronize waits for the kernel to finish, and returns
// // any errors encountered during the launch.
// cudaStatus = hipDeviceSynchronize();
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
// goto Error;
// }
//
// // Copy output vector from GPU buffer to host memory.
// //cudaStatus = hipMemcpy(pOutData, d_OutData, iOutFrameSize * sizeof(unsigned short), hipMemcpyDeviceToHost);
// cudaStatus = hipMemcpy(pTempOutData, d_OutData, iOutFrameSize * sizeof(float), hipMemcpyDeviceToHost);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy failed!");
// goto Error;
// }
// for (int k = 0; k < iOutFrameSize; k++)
// pOutData[k] = (unsigned short)pTempOutData[k];
//
//Error:
// //hipFreeArray(cuArray_img);
// if ( d_InData!=NULL )hipFree(d_InData);
// if (d_OutData!=NULL) hipFree(d_OutData);
// if (d_RotatedFrameData!=NULL) hipFree(d_RotatedFrameData);
// if (d_pMaskData!=NULL)hipFree(d_pMaskData);
// if (d_InData_us != NULL)hipFree(d_InData_us);
// if (d_pLut != NULL) hipFree(d_pLut);
// delete[] pMaskData;
// delete[] pTempInData;
// delete[] pTempOutData;
// delete[] pLut;
// return cudaStatus;
//}
//--------------------------------------------------------------------------------------------------------------------------
hipError_t RotateAddImage_Cuda(unsigned char* pInData, int inWidth, int inHeight, int iNumFrames, unsigned char *pOutData, int outWidth, int outHeight, int pixType, double theta, double fScale, double fMag, bool bReversed)
{
hipArray *cuArray_img;
float *d_OutData = 0;
unsigned char *d_InData_any = 0;
float *d_InData = 0;
float *d_RotatedFrameData = 0;
unsigned char *d_pMaskData = 0;
float *pTempInData = 0;
float *pTempOutData = 0;
int *pLut = 0;
int *d_pLut = 0;
unsigned char *pMaskData = NULL;
int iFrameSize = inWidth * inHeight;
int iOutFrameSize = outWidth * outHeight;
pTempInData = new float[iFrameSize];
pTempOutData = new float[iOutFrameSize];
int pixSize = 2; //u16
switch (pixType)
{
case 1: pixSize = 1;
case 4: case 6: pixSize = 4; break; //u32 and float
};
hipError_t cudaStatus = hipErrorInvalidValue;
int iRotWidth, int iRotHeight;
FindDimensionAfterRotation(inWidth, inHeight, theta, fMag, iRotWidth, iRotHeight);
int iRotatedFrameSize = iRotWidth * iRotHeight;
pLut = new int[iRotatedFrameSize];
RotateImage_GetLUT_cpu(inWidth, inHeight, pLut, iRotWidth, iRotHeight, theta, fMag);
//Get mask data to cover arround the edges after rotation
int iNewMaskW = 0, iNewMaskH = 0;
pMaskData = GetMaskDataAfterRotation(inWidth, inHeight, theta, fMag, iNewMaskW, iNewMaskH);
if (iNewMaskW != iRotWidth || iNewMaskH != iRotHeight)
{
printf("dimension mismatch when creating mask\n");
goto Error;
}
//WriteRawData<unsigned char>("c:\\Temp\\MaskData.raw", pMaskData, iNewMaskW, iNewMaskH);
printf("Rotaed dim %d %d %d %d\n", iRotWidth, iRotHeight, outWidth, outHeight);
cudaStatus = hipMalloc((void**)&d_OutData, iOutFrameSize * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&d_InData_any, iFrameSize * pixSize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&d_InData, iFrameSize * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&d_RotatedFrameData, iRotatedFrameSize * sizeof(float) * 2);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&d_pLut, iRotatedFrameSize * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
hipMemcpy(d_pLut, pLut, iRotatedFrameSize * sizeof(int), hipMemcpyHostToDevice);
cudaStatus = hipMalloc((void**)&d_pMaskData, iRotatedFrameSize * sizeof(unsigned char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// hipMemcpy(d_pMaskData, pMaskData, iRotatedFrameSize * sizeof(unsigned char), hipMemcpyHostToDevice);
int TILE_SIZE_X = 16;
int TILE_SIZE_Y = 16;
dim3 dimBlock(TILE_SIZE_X, TILE_SIZE_Y);
dim3 dimGrid((int)ceil((float)iRotWidth / (float)TILE_SIZE_X), (int)ceil((float)iRotHeight / (float)TILE_SIZE_Y));
dim3 dimGrid_in((int)ceil((float)inWidth / (float)TILE_SIZE_X), (int)ceil((float)inHeight / (float)TILE_SIZE_Y));
dim3 dimGrid_out((int)ceil((float)outWidth / (float)TILE_SIZE_X), (int)ceil((float)outHeight / (float)TILE_SIZE_Y));
float fShiftRow = 0.0;
//bool bReversed = false;
int iCurIndex = 0;
int iPrevIndex = -1;
SetValues_kernel << <dimGrid_out, dimBlock >> > (d_OutData, 0, outWidth, outHeight);
for (int iZ = 0; iZ < iNumFrames /*&&iZ<30*/; iZ++)
{
iCurIndex = (int)fShiftRow*outWidth;
if (iPrevIndex != iCurIndex)
{
int iZIndex = bReversed ? iZ : iNumFrames - 1 - iZ;
//////////////////
//unsigned short *pInDataRef = &pInData[iZIndex * iFrameSize];
//for (int k = 0; k < iFrameSize; k++)
// pTempInData[k] = (float)pInDataRef[k];
//hipMemcpy(d_InData, pTempInData, iFrameSize * sizeof(float), hipMemcpyHostToDevice);
/////////////////////
hipMemcpy(d_InData_any, &pInData[iZIndex * iFrameSize * pixSize], iFrameSize * pixSize, hipMemcpyHostToDevice);
Memcpy_any_to_float__kernel << <dimGrid_in, dimBlock >> > (d_InData_any, d_InData, inWidth, inHeight, pixType);
//Memcpy_us_to_float__kernel << <dimGrid_in, dimBlock >> > ((unsigned short*)d_InData_any, d_InData, inWidth, inHeight);
SetValues_kernel << <dimGrid, dimBlock >> > (d_RotatedFrameData, 0, iRotWidth, iRotHeight);
RotateImageA_kernel << <dimGrid, dimBlock >> > (d_InData, &d_RotatedFrameData[0], inWidth, inHeight, iRotWidth, iRotHeight, theta, 1.0 / fMag);
//RotateImageA_usingLUT_kernel << <dimGrid, dimBlock >> > (d_InData, &d_RotatedFrameData[0], inWidth, inHeight, iRotWidth, iRotHeight, theta, d_pLut);
TDS_AddA_kernel << <dimGrid, dimBlock >> > (&d_RotatedFrameData[0], &d_OutData[iCurIndex], iRotWidth, iRotHeight);
hipDeviceSynchronize();
}
fShiftRow += fScale;
if (fShiftRow >= outHeight) break;
iPrevIndex = iCurIndex;
}
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
//cudaStatus = hipMemcpy(pOutData, d_OutData, iOutFrameSize * sizeof(unsigned short), hipMemcpyDeviceToHost);
cudaStatus = hipMemcpy(pTempOutData, d_OutData, iOutFrameSize * sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
switch (pixType)
{
case 1: for (int k = 0, int p = 0; p < iOutFrameSize; k += pixSize, p++) *((unsigned char*)&pOutData[k]) = (unsigned char)pTempOutData[p]; break;
case 2: for (int k = 0, int p = 0; p < iOutFrameSize; k += pixSize, p++) *((unsigned short*)&pOutData[k]) = (unsigned short)pTempOutData[p]; break;
case 4: for (int k = 0, int p = 0; p < iOutFrameSize; k += pixSize, p++) pOutData[k] = (unsigned int)pTempOutData[p]; break;
case 6: for (int k = 0, int p = 0; p < iOutFrameSize; k += pixSize, p++) pOutData[k] = (float)pTempOutData[p]; break;
};
Error:
//hipFreeArray(cuArray_img);
if (d_InData != NULL)hipFree(d_InData);
if (d_OutData != NULL) hipFree(d_OutData);
if (d_RotatedFrameData != NULL) hipFree(d_RotatedFrameData);
if (d_pMaskData != NULL)hipFree(d_pMaskData);
if (d_InData_any != NULL)hipFree(d_InData_any);
if (d_pLut != NULL) hipFree(d_pLut);
delete[] pMaskData;
delete[] pTempInData;
delete[] pTempOutData;
delete[] pLut;
return cudaStatus;
}
//--------------------------------------------------------------------------------
hipError_t RotateAddImage_lut_Cuda(unsigned char* pInData, int inWidth, int inHeight, int iNumFrames, unsigned char *pOutData, int outWidth, int outHeight, int pixType, double theta, double fScale, double fMag, bool bReversed)
{
float *d_OutData = 0;
unsigned char *d_InData_any = 0;
float *d_InData = 0;
float *d_RotatedFrameData = 0;
float *pTempInData = 0;
float *pTempOutData = 0;
int *pLut = 0;
int *d_pLut = 0;
unsigned char *pMaskData = NULL;
int iFrameSize = inWidth * inHeight;
int iOutFrameSize = outWidth * outHeight;
pTempInData = new float[iFrameSize];
pTempOutData = new float[iOutFrameSize];
int pixSize = 2; //u16
switch (pixType)
{
case 1: pixSize = 1;
case 4: case 6: pixSize = 4; break; //u32 and float
};
hipError_t cudaStatus = hipErrorInvalidValue;
int iRotWidth, int iRotHeight;
FindDimensionAfterRotation(inWidth, inHeight, theta, fMag, iRotWidth, iRotHeight);
int iRotatedFrameSize = iRotWidth * iRotHeight;
pLut = new int[iRotatedFrameSize];
RotateImage_GetLUT_cpu(inWidth, inHeight, pLut, iRotWidth, iRotHeight, theta, fMag);
printf("Rotaed dim %d %d %d %d\n", iRotWidth, iRotHeight, outWidth, outHeight);
cudaStatus = hipMalloc((void**)&d_OutData, iOutFrameSize * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&d_InData_any, iFrameSize * pixSize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&d_InData, iFrameSize * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&d_RotatedFrameData, iRotatedFrameSize * sizeof(float) * 2);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&d_pLut, iRotatedFrameSize * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
hipMemcpy(d_pLut, pLut, iRotatedFrameSize * sizeof(int), hipMemcpyHostToDevice);
int TILE_SIZE_X = 32;
int TILE_SIZE_Y = 8;
dim3 dimBlock(TILE_SIZE_X, TILE_SIZE_Y);
dim3 dimGrid((int)ceil((float)iRotWidth / (float)TILE_SIZE_X), (int)ceil((float)iRotHeight / (float)TILE_SIZE_Y));
dim3 dimGrid_in((int)ceil((float)inWidth / (float)TILE_SIZE_X), (int)ceil((float)inHeight / (float)TILE_SIZE_Y));
float fShiftRow = 0.0f;
//bool bReversed = false;
//bReversed = false;
int iCurIndex = 0;
int iPrevIndex = -1;
for (int iZ = 0; iZ < iNumFrames; iZ++)
{
iCurIndex = (int)fShiftRow*outWidth;
if (iPrevIndex != iCurIndex)
{
int iZIndex = bReversed ? iZ : iNumFrames - 1 - iZ;
//////////////////
//unsigned short *pInDataRef = &pInData[iZIndex * iFrameSize];
//for (int k = 0; k < iFrameSize; k++)
// pTempInData[k] = (float)pInDataRef[k];
//hipMemcpy(d_InData, pTempInData, iFrameSize * sizeof(float), hipMemcpyHostToDevice);
/////////////////////
hipMemcpy(d_InData_any, &pInData[iZIndex * iFrameSize * pixSize], iFrameSize * pixSize, hipMemcpyHostToDevice);
Memcpy_any_to_float__kernel << <dimGrid_in, dimBlock >> > (d_InData_any, d_InData, inWidth, inHeight, pixType);
//Memcpy_us_to_float__kernel << <dimGrid_in, dimBlock >> > ((unsigned short*)d_InData_any, d_InData, inWidth, inHeight);
SetValues_kernel << <dimGrid, dimBlock >> > (d_RotatedFrameData, 0, iRotWidth, iRotHeight);
//RotateImageA_kernel << <dimGrid, dimBlock >> > (d_InData, &d_RotatedFrameData[0], inWidth, inHeight, iRotWidth, iRotHeight, theta, 1.0 / fMag);
RotateImageA_usingLUT_kernel << <dimGrid, dimBlock >> > (d_InData, &d_RotatedFrameData[0], inWidth, inHeight, iRotWidth, iRotHeight, theta, d_pLut);
// TDS_AddA_kernel << <dimGrid, dimBlock >> > (&d_RotatedFrameData[0], &d_OutData[iCurIndex], iRotWidth, iRotHeight);
TDS_AddA_conditional_kernel << <dimGrid, dimBlock >> > (&d_RotatedFrameData[0], &d_OutData[iCurIndex], iRotWidth, iRotHeight);
hipDeviceSynchronize();
}
fShiftRow += fScale;
iPrevIndex = iCurIndex;
if (fShiftRow >= outHeight) break;
}
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
//cudaStatus = hipMemcpy(pOutData, d_OutData, iOutFrameSize * sizeof(unsigned short), hipMemcpyDeviceToHost);
cudaStatus = hipMemcpy(pTempOutData, d_OutData, iOutFrameSize * sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
/////////////////////
//FILE *out; fopen_s(&out, "c:\\temp\\testOut.raw", "wb"); fwrite(pTempOutData, 4, iOutFrameSize, out); fclose(out);
////////////////////
switch (pixType)
{
case 1: for (int k = 0, int p=0; p < iOutFrameSize; k+=pixSize, p++) *((unsigned char*)&pOutData[k]) = (unsigned char)pTempOutData[p]; break;
case 2: for (int k = 0, int p = 0; p < iOutFrameSize; k += pixSize, p++) *((unsigned short*)&pOutData[k]) = (unsigned short)pTempOutData[p]; break;
case 4: for (int k = 0, int p = 0; p < iOutFrameSize; k += pixSize, p++) pOutData[k] = (unsigned int)pTempOutData[p]; break;
case 6: for (int k = 0, int p = 0; p < iOutFrameSize; k += pixSize, p++) pOutData[k] = (float)pTempOutData[p]; break;
};
Error:
//hipFreeArray(cuArray_img);
if (d_InData != NULL)hipFree(d_InData);
if (d_OutData != NULL) hipFree(d_OutData);
if (d_RotatedFrameData != NULL) hipFree(d_RotatedFrameData);
if (d_InData_any != NULL)hipFree(d_InData_any);
if (d_pLut != NULL) hipFree(d_pLut);
delete[] pMaskData;
delete[] pTempInData;
delete[] pTempOutData;
delete[] pLut;
return cudaStatus;
}
| f7b6323e065a5fd52591d1308de829420e44d14c.cu | #include "Kernel.h"
#include <stdio.h>
#include <math.h>
#include <float.h>
#include "IPTools.h"
#include <math.h>
#include <algorithm>
#include "Common.h"
#ifndef min
#define min(a,b) (((a) < (b)) ? (a) : (b))
#endif
#ifndef max
#define max(a,b) (((a) > (b)) ? (a) : (b))
#endif
// Texture reference for 2D float texture
texture<float, 2, cudaReadModeElementType> tex_rot_imgA;
//-----------------------------------------------------------------------------
__device__ double getInterpolatedPixelA(double x, double y, int iWidth, int iHeight, float* pixels)
{
int xbase = (int)x;
int ybase = (int)y;
xbase = xbase < iWidth ? xbase : iWidth - 1;// min(xbase, iWidth - 1);
ybase = ybase < iHeight ? ybase : iHeight - 1;// (ybase, iHeight - 1);
//if (xbase >= iWidth ybase >= iHeight )
// return 1;
double xFraction = x - xbase;
double yFraction = y - ybase;
int offset = ybase * iWidth + xbase;
double lowerLeft = pixels[offset];
double lowerRight = xbase == iWidth - 1 ? pixels[offset] : pixels[offset + 1];
double upperRight = (xbase == iWidth - 1 || ybase == iHeight - 1) ? pixels[offset] : pixels[offset + iWidth + 1];
double upperLeft = ybase == iHeight - 1 ? pixels[offset] : pixels[offset + iWidth];
double upperAverage = upperLeft;
if (xFraction != 0.0)
upperAverage += xFraction * (upperRight - upperLeft);
double lowerAverage = lowerLeft;
if (xFraction != 0.0)
lowerAverage += xFraction * (lowerRight - lowerLeft);
if (yFraction == 0.0)
return lowerAverage;
else
return lowerAverage + yFraction * (upperAverage - lowerAverage);
}
//-------------------------------------------------------------------
__global__ void RotateImageA_tex_kernel(float *outputImagekernel, int inWidth, int inHeight, int outWidth, int outHeight, double theta)
{
// Set row and colum for thread.
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float rads = (theta) * 3.1415926 / 180.0;
float u = (float)col - (float)outWidth / 2;
float v = (float)row - (float)outHeight / 2;
float tu = u * cosf(rads) - v * sinf(rads);
float tv = v * cosf(rads) + u * sinf(rads);
tu /= (float)inWidth;
tv /= (float)inHeight;
if (col < outWidth && row < outHeight)
outputImagekernel[row*outWidth + col] = tex2D(tex_rot_imgA, tu + 0.5f, tv + 0.5f);
}
//-------------------------------------------------------------------
__global__ void RotateImageA_kernel(float *inImagekernel, float *outputImagekernel, int inWidth, int inHeight, int outWidth, int outHeight, double theta, double fMag)
{
// Set row and colum for thread.
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float rads = (theta) * 3.1415926 / 180.0;
float u = (float)col - (float)outWidth / 2;
float v = (float)row - (float)outHeight / 2;
float tu = u * cosf(rads) - v * sinf(rads);
float tv = v * cosf(rads) + u * sinf(rads);
//tu /= (float)inWidth;
//tv /= (float)inHeight;
tu *= fMag;
tv *= fMag;
tu = (float)(tu + (float)inWidth / 2.0f);
tv = (float)(tv + (float)inHeight / 2.0f);
if (/*col < outWidth && row < outHeight &&*/ tu >= 0 && tu < inWidth && tv >= 0 && tv < inHeight)
{
outputImagekernel[row*outWidth + col] = inImagekernel[((int)tv * inWidth) + (int)tu];//tex2D(tex_rot_imgA, tu + inWidth/2 , tv + inHeight/2);
//outputImagekernel[row*outWidth + col] = getInterpolatedPixelA(tu, tv, inWidth, inHeight, inImagekernel);
}
}
//---------------------------
__global__ void RotateImageA_usingLUT_kernel(float *inImagekernel, float *outputImagekernel, int inWidth, int inHeight, int outWidth, int outHeight, double theta, int *pLut)
{
// Set row and colum for thread.
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int offset = row * outWidth + col;
if (col < outWidth && row < outHeight )
{
if ( pLut[offset] >= 0 )
outputImagekernel[offset] = inImagekernel[pLut[offset]];
}
}
__global__ void SmoothBorder_kernel(float *inImagekernel, float *outputImagekernel, unsigned char *pMaskData, int iWidth, int iHeight, int iWin)
{
// Set row and colum for thread.
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int iOffset = row * iWidth + col;
outputImagekernel[iOffset] = inImagekernel[iOffset] * inImagekernel[iOffset];
//if (pMaskData[iOffset] > 0)
//{
// int count = 0;
// double fSum = 0.0;
// for (int j = -iWin; j <= iWin; j++)
// {
// for (int i = -iWin; i <= iWin; i++)
// {
// int iNewX = col + i;
// int iNewY = row + j;
// if (iNewX >= 0 && iNewX < iWidth && iNewY >= 0 && iNewY < iHeight)
// {
// fSum += (double)inImagekernel[iNewY*iWidth + iNewX];
// count++;
// }
// }
// }
// if (count > 0)
// {
// outputImagekernel[iOffset] = (float)(fSum / (double)count);
// }
//}
}
//-------------------------------------------------------------------
__global__ void TDS_AddA_kernel(float *inFrameData, float *outputImageData, int inWidth, int inHeight)
{
// Set row and colum for thread.
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int offset = row * inWidth + col;
if ( col < inWidth && row < inHeight)
outputImageData[offset] += inFrameData[offset];
//__syncthreads();
}
//-------------------------------------------------------------------
__global__ void TDS_AddA_conditional_kernel(float *inFrameData, float *outputImageData, int inWidth, int inHeight)
{
// Set row and colum for thread.
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float pVals[2];
int offset = row * inWidth + col;
if (col < inWidth && row < inHeight)
{
float fVal = inFrameData[offset];
if ( fVal > 0.0f)
outputImageData[offset] += fVal;
}
}
//-------------------------------------------------------------------
__global__ void SetValues_kernel(float *inFrameData, float iVal, int inWidth, int inHeight)
{
// Set row and colum for thread.
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col < inWidth && row < inHeight)
inFrameData[row*inWidth + col] = iVal;
}
//-------------------------------------------------------------------
__global__ void Memcpy_us_to_float__kernel(unsigned short *pInData, float *pOutData, int inWidth, int inHeight)
{
// Set row and colum for thread.
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col < inWidth && row < inHeight)
pOutData[row*inWidth + col] = pInData[row*inWidth + col];
}
//-------------------------------------------------------------------
__global__ void Memcpy_any_to_float__kernel(unsigned char *pInData, float *pOutData, int inWidth, int inHeight, int pixType)
{
// Set row and colum for thread.
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col < inWidth && row < inHeight)
{
switch (pixType)
{
case 1:
pOutData[row*inWidth + col] = (float)*((unsigned char*)&pInData[row*inWidth + col]);
break;
case 2:
pOutData[row*inWidth + col] = (float)*((unsigned short*)&pInData[row*inWidth*2 + col*2]);
break;
case 4:
pOutData[row*inWidth + col] = (float)*((unsigned int*)&pInData[row*inWidth*4 + col*4]);
break;
case 6:
pOutData[row*inWidth + col] = (float)*((float*)&pInData[row*inWidth*4 + col*4]);
break;
};
}
}
//----------------------------------------------------------------------------
unsigned char *GetMaskDataAfterRotation(int iW, int iH, double theta, double fMag, int &iNewW, int &iNewH)
{
unsigned char *pMaskData = NULL;
//int iNewW=0, iNewH = 0;
FindDimensionAfterRotation(iW, iH, theta, fMag, iNewW, iNewH);
if (iNewW <= 0 || iNewH <= 0) return NULL;
pMaskData = new unsigned char[iNewW*iNewH];
unsigned char *pTempData = new unsigned char[iW*iH];
unsigned char *pRotatedTempData = new unsigned char[iNewW*iNewH];
memset(pRotatedTempData, 0, iNewW*iNewH);
for (long i = 0; i < iW*iH; i++)
pTempData[i] = 1;
unsigned char *pMaks_rot_eroded = new unsigned char[iNewW*iNewH];
memset(pMaks_rot_eroded, 0, iNewW*iNewH);
IPTools<unsigned char>::RotateImage_cpu(pTempData, iW, iH, pRotatedTempData, iNewW, iNewH, theta, 0);
IPTools<unsigned char>::DoErosion(pRotatedTempData, pMaks_rot_eroded, iNewW, iNewH, 5);
float *pDisData = new float[iNewW*iNewH];
IPTools<unsigned char>::GetDistanceMap(pMaks_rot_eroded, iNewW, iNewH, 0, pDisData);
//WriteRawData<unsigned char>("c:\\Temp\\Dist.raw", pMaks_rot_eroded, iNewW, iNewH);
memset(pMaskData, 0, 1 * iNewW*iNewH);
for (long i = 0; i < iNewW*iNewH; i++)
{
float fVal = pDisData[i];
if (fVal > 0.0f && fVal < 6.0f)
pMaskData[i] = 1;
}
delete[] pTempData;
delete[] pRotatedTempData;
delete[] pMaks_rot_eroded;
delete[] pDisData;
return pMaskData;
}
//--------------------------------------------------------------------
void RotateImage_GetLUT_cpu(int iW, int iH, int *pLut, int iOutWidth, int iOutHeight, double theta, double fMagnification)
{
for (long i = 0; i < iOutWidth * iOutHeight; i++) pLut[i] = -1;
long iInFrameSize = iW * iH;
double fMag = 1.0 / fMagnification;
float rads = (theta) * 3.1415926 / 180.0;
float cs = cos(rads); // precalculate these values
float ss = sin(rads);
float xcenterOut = (float)(iOutWidth) / 2.0; // use float here!
float ycenterOut = (float)(iOutHeight) / 2.0;
float xcenterIn = (float)iW / 2.0f;
float ycenterIn = (float)iH / 2.0f;
for (int row = 0; row < iOutHeight; row++)
{
for (int col = 0; col < iOutWidth; col++)
{
float u = (float)col - xcenterOut;
float v = (float)row - ycenterOut;
float tu = u * cs - v * ss;
float tv = v * cs + u * ss;
tu *= fMag;
tv *= fMag;
tu += xcenterIn;
tv += ycenterIn;
//tu += (iOutWidth - iW) / 2;
//tu += (iOutHeight - iH) / 2;
if (tu >= 0 && tu < iW && tv >= 0 && tv < iH)
{
//pOutData[row*iOutWidth + col] = getInterpolatedPixel_TF(tu, tv, iW, iH, pData);
long offset = (int)tv*iW + (int)tu;
// pLut[offset] = row*iOutWidth + col;
pLut[row*iOutWidth + col] = offset;
}
}
}
}
//--------------------------------------------------------------------------------
//cudaError_t RotateAddImage_Cuda(unsigned short* pInData, int inWidth, int inHeight, int iNumFrames, unsigned short *pOutData, int outWidth, int outHeight, double theta, double fScale, double fMag)
//{
//
// cudaArray *cuArray_img;
// float *d_OutData = 0;
// unsigned short *d_InData_us = 0;
// float *d_InData = 0;
// float *d_RotatedFrameData = 0;
// unsigned char *d_pMaskData = 0;
// float *pTempInData = 0;
// float *pTempOutData = 0;
// int *pLut = 0;
// int *d_pLut = 0;
// unsigned char *pMaskData = NULL;
//
// int iFrameSize = inWidth * inHeight;
// int iOutFrameSize = outWidth * outHeight;
//
// pTempInData = new float[iFrameSize];
// pTempOutData = new float[iOutFrameSize];
//
//
// cudaError_t cudaStatus = cudaErrorInvalidValue;
//
// int iRotWidth, int iRotHeight;
// FindDimensionAfterRotation(inWidth, inHeight, theta, fMag, iRotWidth, iRotHeight);
//
// int iRotatedFrameSize = iRotWidth * iRotHeight;
//
// pLut = new int[iRotatedFrameSize];
// RotateImage_GetLUT_cpu(inWidth, inHeight, pLut, iRotWidth, iRotHeight, theta, fMag);
//
// //Get mask data to cover arround the edges after rotation
// int iNewMaskW = 0, iNewMaskH = 0;
// pMaskData = GetMaskDataAfterRotation(inWidth, inHeight, theta, fMag, iNewMaskW, iNewMaskH);
// if (iNewMaskW != iRotWidth || iNewMaskH != iRotHeight)
// {
// printf("dimension mismatch when creating mask\n");
// goto Error;
// }
// //WriteRawData<unsigned char>("c:\\Temp\\MaskData.raw", pMaskData, iNewMaskW, iNewMaskH);
//
// printf("Rotaed dim %d %d %d %d\n", iRotWidth, iRotHeight, outWidth, outHeight);
// // Choose which GPU to run on, change this on a multi-GPU system.
// //cudaStatus = cudaSetDevice(0);
// //if (cudaStatus != cudaSuccess) {
// // fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
// // goto Error;
// //}
//
// // Allocate GPU buffers for three vectors (two input, one output) .
// //imput image text array
//// cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
//// cudaMallocArray(&cuArray_img, &channelDesc, inWidth, inHeight);
////// cudaMemcpyToArray(cuArray_img, 0, 0, pInData, iFrameSize * sizeof(unsigned short), cudaMemcpyHostToDevice);
//// // Set texture parameters
//// tex_rot_imgA.addressMode[0] = cudaAddressModeBorder;// ModeWrap;
//// tex_rot_imgA.addressMode[1] = cudaAddressModeBorder;
//// tex_rot_imgA.filterMode = cudaFilterModeLinear;
//// tex_rot_imgA.normalized = true; // access with normalized texture coordinates
//// cudaBindTextureToArray(tex_rot_imgA, cuArray_img, channelDesc);
//
//
//
// cudaStatus = cudaMalloc((void**)&d_OutData, iOutFrameSize * sizeof(float));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// cudaStatus = cudaMalloc((void**)&d_InData_us, iFrameSize * sizeof(unsigned short));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// cudaStatus = cudaMalloc((void**)&d_InData, iFrameSize * sizeof(float));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// cudaStatus = cudaMalloc((void**)&d_RotatedFrameData, iRotatedFrameSize * sizeof(float)*2);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// cudaStatus = cudaMalloc((void**)&d_pLut, iRotatedFrameSize * sizeof(int));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
// cudaMemcpy(d_pLut, pLut, iRotatedFrameSize * sizeof(int), cudaMemcpyHostToDevice);
//
// cudaStatus = cudaMalloc((void**)&d_pMaskData, iRotatedFrameSize * sizeof(unsigned char) );
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//// cudaMemcpy(d_pMaskData, pMaskData, iRotatedFrameSize * sizeof(unsigned char), cudaMemcpyHostToDevice);
//
// int TILE_SIZE_X = 16;
// int TILE_SIZE_Y = 16;
// dim3 dimBlock(TILE_SIZE_X, TILE_SIZE_Y);
//
// dim3 dimGrid((int)ceil((float)iRotWidth / (float)TILE_SIZE_X), (int)ceil((float)iRotHeight / (float)TILE_SIZE_Y));
//
// dim3 dimGrid_in((int)ceil((float)inWidth / (float)TILE_SIZE_X), (int)ceil((float)inHeight / (float)TILE_SIZE_Y));
//
// dim3 dimGrid_out((int)ceil((float)outWidth / (float)TILE_SIZE_X), (int)ceil((float)outHeight / (float)TILE_SIZE_Y));
//
// double fShiftRow = 0.0;
// bool bReversed = false;
// int iCurIndex = 0;
// int iPrevIndex = -1;
// SetValues_kernel << <dimGrid_out, dimBlock >> > (d_OutData, 0, outWidth, outHeight);
//
//
// for (int iZ = 0; iZ < iNumFrames /*&&iZ<30*/ ; iZ++)
// {
// iCurIndex = (int)fShiftRow*outWidth;
// if (iPrevIndex != iCurIndex)
// {
//
//
// int iZIndex = bReversed ? iZ : iNumFrames - 1 - iZ;
//
// //////////////////
// //unsigned short *pInDataRef = &pInData[iZIndex * iFrameSize];
// //for (int k = 0; k < iFrameSize; k++)
// // pTempInData[k] = (float)pInDataRef[k];
// //cudaMemcpy(d_InData, pTempInData, iFrameSize * sizeof(float), cudaMemcpyHostToDevice);
// /////////////////////
//
// cudaMemcpy(d_InData_us, &pInData[iZIndex * iFrameSize], iFrameSize * sizeof(unsigned short), cudaMemcpyHostToDevice);
// Memcpy_us_to_float__kernel << <dimGrid_in, dimBlock>> > (d_InData_us, d_InData, inWidth, inHeight);
//
//
// SetValues_kernel << <dimGrid, dimBlock >> > (d_RotatedFrameData, 0, iRotWidth, iRotHeight);
// RotateImageA_kernel << <dimGrid, dimBlock >> > (d_InData, &d_RotatedFrameData[0], inWidth, inHeight, iRotWidth, iRotHeight, theta, 1.0 / fMag);
//
//
//
// // RotateImageA_usingLUT_kernel << <dimGrid, dimBlock >> > (d_InData, &d_RotatedFrameData[0], inWidth, inHeight, iRotWidth, iRotHeight, theta, d_pLut);
// //SmoothBorder_kernel << <dimGrid, dimBlock >> > (&d_RotatedFrameData[0], &d_RotatedFrameData[iRotatedFrameSize], d_pMaskData, iRotWidth, iRotHeight, 3);
// TDS_AddA_kernel << <dimGrid, dimBlock >> > (&d_RotatedFrameData[0], &d_OutData[iCurIndex], iRotWidth, iRotHeight);
//
// cudaDeviceSynchronize();
// }
// fShiftRow += fScale;
// if (fShiftRow >= outHeight) break;
// iPrevIndex = iCurIndex;
// }
//
//
// // Check for any errors launching the kernel
// cudaStatus = cudaGetLastError();
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
// goto Error;
// }
//
// // cudaDeviceSynchronize waits for the kernel to finish, and returns
// // any errors encountered during the launch.
// cudaStatus = cudaDeviceSynchronize();
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
// goto Error;
// }
//
// // Copy output vector from GPU buffer to host memory.
// //cudaStatus = cudaMemcpy(pOutData, d_OutData, iOutFrameSize * sizeof(unsigned short), cudaMemcpyDeviceToHost);
// cudaStatus = cudaMemcpy(pTempOutData, d_OutData, iOutFrameSize * sizeof(float), cudaMemcpyDeviceToHost);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
// for (int k = 0; k < iOutFrameSize; k++)
// pOutData[k] = (unsigned short)pTempOutData[k];
//
//Error:
// //cudaFreeArray(cuArray_img);
// if ( d_InData!=NULL )cudaFree(d_InData);
// if (d_OutData!=NULL) cudaFree(d_OutData);
// if (d_RotatedFrameData!=NULL) cudaFree(d_RotatedFrameData);
// if (d_pMaskData!=NULL)cudaFree(d_pMaskData);
// if (d_InData_us != NULL)cudaFree(d_InData_us);
// if (d_pLut != NULL) cudaFree(d_pLut);
// delete[] pMaskData;
// delete[] pTempInData;
// delete[] pTempOutData;
// delete[] pLut;
// return cudaStatus;
//}
//--------------------------------------------------------------------------------------------------------------------------
cudaError_t RotateAddImage_Cuda(unsigned char* pInData, int inWidth, int inHeight, int iNumFrames, unsigned char *pOutData, int outWidth, int outHeight, int pixType, double theta, double fScale, double fMag, bool bReversed)
{
cudaArray *cuArray_img;
float *d_OutData = 0;
unsigned char *d_InData_any = 0;
float *d_InData = 0;
float *d_RotatedFrameData = 0;
unsigned char *d_pMaskData = 0;
float *pTempInData = 0;
float *pTempOutData = 0;
int *pLut = 0;
int *d_pLut = 0;
unsigned char *pMaskData = NULL;
int iFrameSize = inWidth * inHeight;
int iOutFrameSize = outWidth * outHeight;
pTempInData = new float[iFrameSize];
pTempOutData = new float[iOutFrameSize];
int pixSize = 2; //u16
switch (pixType)
{
case 1: pixSize = 1;
case 4: case 6: pixSize = 4; break; //u32 and float
};
cudaError_t cudaStatus = cudaErrorInvalidValue;
int iRotWidth, int iRotHeight;
FindDimensionAfterRotation(inWidth, inHeight, theta, fMag, iRotWidth, iRotHeight);
int iRotatedFrameSize = iRotWidth * iRotHeight;
pLut = new int[iRotatedFrameSize];
RotateImage_GetLUT_cpu(inWidth, inHeight, pLut, iRotWidth, iRotHeight, theta, fMag);
//Get mask data to cover arround the edges after rotation
int iNewMaskW = 0, iNewMaskH = 0;
pMaskData = GetMaskDataAfterRotation(inWidth, inHeight, theta, fMag, iNewMaskW, iNewMaskH);
if (iNewMaskW != iRotWidth || iNewMaskH != iRotHeight)
{
printf("dimension mismatch when creating mask\n");
goto Error;
}
//WriteRawData<unsigned char>("c:\\Temp\\MaskData.raw", pMaskData, iNewMaskW, iNewMaskH);
printf("Rotaed dim %d %d %d %d\n", iRotWidth, iRotHeight, outWidth, outHeight);
cudaStatus = cudaMalloc((void**)&d_OutData, iOutFrameSize * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&d_InData_any, iFrameSize * pixSize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&d_InData, iFrameSize * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&d_RotatedFrameData, iRotatedFrameSize * sizeof(float) * 2);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&d_pLut, iRotatedFrameSize * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaMemcpy(d_pLut, pLut, iRotatedFrameSize * sizeof(int), cudaMemcpyHostToDevice);
cudaStatus = cudaMalloc((void**)&d_pMaskData, iRotatedFrameSize * sizeof(unsigned char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// cudaMemcpy(d_pMaskData, pMaskData, iRotatedFrameSize * sizeof(unsigned char), cudaMemcpyHostToDevice);
int TILE_SIZE_X = 16;
int TILE_SIZE_Y = 16;
dim3 dimBlock(TILE_SIZE_X, TILE_SIZE_Y);
dim3 dimGrid((int)ceil((float)iRotWidth / (float)TILE_SIZE_X), (int)ceil((float)iRotHeight / (float)TILE_SIZE_Y));
dim3 dimGrid_in((int)ceil((float)inWidth / (float)TILE_SIZE_X), (int)ceil((float)inHeight / (float)TILE_SIZE_Y));
dim3 dimGrid_out((int)ceil((float)outWidth / (float)TILE_SIZE_X), (int)ceil((float)outHeight / (float)TILE_SIZE_Y));
float fShiftRow = 0.0;
//bool bReversed = false;
int iCurIndex = 0;
int iPrevIndex = -1;
SetValues_kernel << <dimGrid_out, dimBlock >> > (d_OutData, 0, outWidth, outHeight);
for (int iZ = 0; iZ < iNumFrames /*&&iZ<30*/; iZ++)
{
iCurIndex = (int)fShiftRow*outWidth;
if (iPrevIndex != iCurIndex)
{
int iZIndex = bReversed ? iZ : iNumFrames - 1 - iZ;
//////////////////
//unsigned short *pInDataRef = &pInData[iZIndex * iFrameSize];
//for (int k = 0; k < iFrameSize; k++)
// pTempInData[k] = (float)pInDataRef[k];
//cudaMemcpy(d_InData, pTempInData, iFrameSize * sizeof(float), cudaMemcpyHostToDevice);
/////////////////////
cudaMemcpy(d_InData_any, &pInData[iZIndex * iFrameSize * pixSize], iFrameSize * pixSize, cudaMemcpyHostToDevice);
Memcpy_any_to_float__kernel << <dimGrid_in, dimBlock >> > (d_InData_any, d_InData, inWidth, inHeight, pixType);
//Memcpy_us_to_float__kernel << <dimGrid_in, dimBlock >> > ((unsigned short*)d_InData_any, d_InData, inWidth, inHeight);
SetValues_kernel << <dimGrid, dimBlock >> > (d_RotatedFrameData, 0, iRotWidth, iRotHeight);
RotateImageA_kernel << <dimGrid, dimBlock >> > (d_InData, &d_RotatedFrameData[0], inWidth, inHeight, iRotWidth, iRotHeight, theta, 1.0 / fMag);
//RotateImageA_usingLUT_kernel << <dimGrid, dimBlock >> > (d_InData, &d_RotatedFrameData[0], inWidth, inHeight, iRotWidth, iRotHeight, theta, d_pLut);
TDS_AddA_kernel << <dimGrid, dimBlock >> > (&d_RotatedFrameData[0], &d_OutData[iCurIndex], iRotWidth, iRotHeight);
cudaDeviceSynchronize();
}
fShiftRow += fScale;
if (fShiftRow >= outHeight) break;
iPrevIndex = iCurIndex;
}
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
//cudaStatus = cudaMemcpy(pOutData, d_OutData, iOutFrameSize * sizeof(unsigned short), cudaMemcpyDeviceToHost);
cudaStatus = cudaMemcpy(pTempOutData, d_OutData, iOutFrameSize * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
switch (pixType)
{
case 1: for (int k = 0, int p = 0; p < iOutFrameSize; k += pixSize, p++) *((unsigned char*)&pOutData[k]) = (unsigned char)pTempOutData[p]; break;
case 2: for (int k = 0, int p = 0; p < iOutFrameSize; k += pixSize, p++) *((unsigned short*)&pOutData[k]) = (unsigned short)pTempOutData[p]; break;
case 4: for (int k = 0, int p = 0; p < iOutFrameSize; k += pixSize, p++) pOutData[k] = (unsigned int)pTempOutData[p]; break;
case 6: for (int k = 0, int p = 0; p < iOutFrameSize; k += pixSize, p++) pOutData[k] = (float)pTempOutData[p]; break;
};
Error:
//cudaFreeArray(cuArray_img);
if (d_InData != NULL)cudaFree(d_InData);
if (d_OutData != NULL) cudaFree(d_OutData);
if (d_RotatedFrameData != NULL) cudaFree(d_RotatedFrameData);
if (d_pMaskData != NULL)cudaFree(d_pMaskData);
if (d_InData_any != NULL)cudaFree(d_InData_any);
if (d_pLut != NULL) cudaFree(d_pLut);
delete[] pMaskData;
delete[] pTempInData;
delete[] pTempOutData;
delete[] pLut;
return cudaStatus;
}
//--------------------------------------------------------------------------------
cudaError_t RotateAddImage_lut_Cuda(unsigned char* pInData, int inWidth, int inHeight, int iNumFrames, unsigned char *pOutData, int outWidth, int outHeight, int pixType, double theta, double fScale, double fMag, bool bReversed)
{
float *d_OutData = 0;
unsigned char *d_InData_any = 0;
float *d_InData = 0;
float *d_RotatedFrameData = 0;
float *pTempInData = 0;
float *pTempOutData = 0;
int *pLut = 0;
int *d_pLut = 0;
unsigned char *pMaskData = NULL;
int iFrameSize = inWidth * inHeight;
int iOutFrameSize = outWidth * outHeight;
pTempInData = new float[iFrameSize];
pTempOutData = new float[iOutFrameSize];
int pixSize = 2; //u16
switch (pixType)
{
case 1: pixSize = 1;
case 4: case 6: pixSize = 4; break; //u32 and float
};
cudaError_t cudaStatus = cudaErrorInvalidValue;
int iRotWidth, int iRotHeight;
FindDimensionAfterRotation(inWidth, inHeight, theta, fMag, iRotWidth, iRotHeight);
int iRotatedFrameSize = iRotWidth * iRotHeight;
pLut = new int[iRotatedFrameSize];
RotateImage_GetLUT_cpu(inWidth, inHeight, pLut, iRotWidth, iRotHeight, theta, fMag);
printf("Rotaed dim %d %d %d %d\n", iRotWidth, iRotHeight, outWidth, outHeight);
cudaStatus = cudaMalloc((void**)&d_OutData, iOutFrameSize * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&d_InData_any, iFrameSize * pixSize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&d_InData, iFrameSize * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&d_RotatedFrameData, iRotatedFrameSize * sizeof(float) * 2);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&d_pLut, iRotatedFrameSize * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaMemcpy(d_pLut, pLut, iRotatedFrameSize * sizeof(int), cudaMemcpyHostToDevice);
int TILE_SIZE_X = 32;
int TILE_SIZE_Y = 8;
dim3 dimBlock(TILE_SIZE_X, TILE_SIZE_Y);
dim3 dimGrid((int)ceil((float)iRotWidth / (float)TILE_SIZE_X), (int)ceil((float)iRotHeight / (float)TILE_SIZE_Y));
dim3 dimGrid_in((int)ceil((float)inWidth / (float)TILE_SIZE_X), (int)ceil((float)inHeight / (float)TILE_SIZE_Y));
float fShiftRow = 0.0f;
//bool bReversed = false;
//bReversed = false;
int iCurIndex = 0;
int iPrevIndex = -1;
for (int iZ = 0; iZ < iNumFrames; iZ++)
{
iCurIndex = (int)fShiftRow*outWidth;
if (iPrevIndex != iCurIndex)
{
int iZIndex = bReversed ? iZ : iNumFrames - 1 - iZ;
//////////////////
//unsigned short *pInDataRef = &pInData[iZIndex * iFrameSize];
//for (int k = 0; k < iFrameSize; k++)
// pTempInData[k] = (float)pInDataRef[k];
//cudaMemcpy(d_InData, pTempInData, iFrameSize * sizeof(float), cudaMemcpyHostToDevice);
/////////////////////
cudaMemcpy(d_InData_any, &pInData[iZIndex * iFrameSize * pixSize], iFrameSize * pixSize, cudaMemcpyHostToDevice);
Memcpy_any_to_float__kernel << <dimGrid_in, dimBlock >> > (d_InData_any, d_InData, inWidth, inHeight, pixType);
//Memcpy_us_to_float__kernel << <dimGrid_in, dimBlock >> > ((unsigned short*)d_InData_any, d_InData, inWidth, inHeight);
SetValues_kernel << <dimGrid, dimBlock >> > (d_RotatedFrameData, 0, iRotWidth, iRotHeight);
//RotateImageA_kernel << <dimGrid, dimBlock >> > (d_InData, &d_RotatedFrameData[0], inWidth, inHeight, iRotWidth, iRotHeight, theta, 1.0 / fMag);
RotateImageA_usingLUT_kernel << <dimGrid, dimBlock >> > (d_InData, &d_RotatedFrameData[0], inWidth, inHeight, iRotWidth, iRotHeight, theta, d_pLut);
// TDS_AddA_kernel << <dimGrid, dimBlock >> > (&d_RotatedFrameData[0], &d_OutData[iCurIndex], iRotWidth, iRotHeight);
TDS_AddA_conditional_kernel << <dimGrid, dimBlock >> > (&d_RotatedFrameData[0], &d_OutData[iCurIndex], iRotWidth, iRotHeight);
cudaDeviceSynchronize();
}
fShiftRow += fScale;
iPrevIndex = iCurIndex;
if (fShiftRow >= outHeight) break;
}
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
//cudaStatus = cudaMemcpy(pOutData, d_OutData, iOutFrameSize * sizeof(unsigned short), cudaMemcpyDeviceToHost);
cudaStatus = cudaMemcpy(pTempOutData, d_OutData, iOutFrameSize * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
/////////////////////
//FILE *out; fopen_s(&out, "c:\\temp\\testOut.raw", "wb"); fwrite(pTempOutData, 4, iOutFrameSize, out); fclose(out);
////////////////////
switch (pixType)
{
case 1: for (int k = 0, int p=0; p < iOutFrameSize; k+=pixSize, p++) *((unsigned char*)&pOutData[k]) = (unsigned char)pTempOutData[p]; break;
case 2: for (int k = 0, int p = 0; p < iOutFrameSize; k += pixSize, p++) *((unsigned short*)&pOutData[k]) = (unsigned short)pTempOutData[p]; break;
case 4: for (int k = 0, int p = 0; p < iOutFrameSize; k += pixSize, p++) pOutData[k] = (unsigned int)pTempOutData[p]; break;
case 6: for (int k = 0, int p = 0; p < iOutFrameSize; k += pixSize, p++) pOutData[k] = (float)pTempOutData[p]; break;
};
Error:
//cudaFreeArray(cuArray_img);
if (d_InData != NULL)cudaFree(d_InData);
if (d_OutData != NULL) cudaFree(d_OutData);
if (d_RotatedFrameData != NULL) cudaFree(d_RotatedFrameData);
if (d_InData_any != NULL)cudaFree(d_InData_any);
if (d_pLut != NULL) cudaFree(d_pLut);
delete[] pMaskData;
delete[] pTempInData;
delete[] pTempOutData;
delete[] pLut;
return cudaStatus;
}
|
709871ce93f2d391301f45386b1d699c07fe1e3b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zgeadd.cu, normal z -> s, Tue Aug 30 09:38:27 2016
@author Mark Gates
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to slaset.
*/
__global__
void sgeadd_full(
int m, int n,
float alpha,
const float *dA, int ldda,
float *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb];
}
}
}
}
/***************************************************************************//**
Purpose
-------
ZGEADD adds two matrices, dB = alpha*dA + dB.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
alpha REAL
The scalar alpha.
@param[in]
dA REAL array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in,out]
dB REAL array, dimension (LDDB,N)
The m by n matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_geadd
*******************************************************************************/
extern "C" void
magmablas_sgeadd_q(
magma_int_t m, magma_int_t n,
float alpha,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 )
return;
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) );
hipLaunchKernelGGL(( sgeadd_full), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, dA, ldda, dB, lddb );
}
| 709871ce93f2d391301f45386b1d699c07fe1e3b.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zgeadd.cu, normal z -> s, Tue Aug 30 09:38:27 2016
@author Mark Gates
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to slaset.
*/
__global__
void sgeadd_full(
int m, int n,
float alpha,
const float *dA, int ldda,
float *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb];
}
}
}
}
/***************************************************************************//**
Purpose
-------
ZGEADD adds two matrices, dB = alpha*dA + dB.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
alpha REAL
The scalar alpha.
@param[in]
dA REAL array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in,out]
dB REAL array, dimension (LDDB,N)
The m by n matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_geadd
*******************************************************************************/
extern "C" void
magmablas_sgeadd_q(
magma_int_t m, magma_int_t n,
float alpha,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 )
return;
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) );
sgeadd_full<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, alpha, dA, ldda, dB, lddb );
}
|
bac092e0b179bfe3c56d593795bcdcb3ee21c98f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include "connected_component_gpu.h"
#include "texture.h"
#include "block_uf.h"
namespace apollo {
namespace perception {
using std::shared_ptr;
using std::unordered_set;
using std::vector;
ConnectedComponentGeneratorGPU::ConnectedComponentGeneratorGPU(int image_width,
int image_height)
: image_width_(image_width),
image_height_(image_height),
width_(image_width),
height_(image_height),
roi_x_min_(0),
roi_y_min_(0),
roi_x_max_(image_width - 1),
roi_y_max_(image_height - 1) {
total_pix_ =
static_cast<size_t>(image_width_) * static_cast<size_t>(image_height_);
hipChannelFormatDesc uchar_desc = hipCreateChannelDesc<unsigned char>();
hipMallocArray(&img_array_, &uchar_desc, static_cast<size_t>(width_),
static_cast<size_t>(height_));
hipBindTextureToArray(img_tex, img_array_, uchar_desc);
hipMalloc(
reinterpret_cast<void**>(&label_array_),
static_cast<size_t>(width_) * static_cast<size_t>(height_) * sizeof(int));
hipError_t cuda_err = hipGetLastError();
if (cuda_err != hipSuccess) {
std::cerr
<< "failed to initialize 'img_array' and 'label_array' with CUDA: "
<< hipGetErrorString(cuda_err) << std::endl;
}
labels_ = static_cast<int*>(malloc(total_pix_ * sizeof(int)));
root_map_.reserve(total_pix_);
}
ConnectedComponentGeneratorGPU::ConnectedComponentGeneratorGPU(int image_width,
int image_height,
cv::Rect roi)
: image_width_(image_width),
image_height_(image_height),
width_(roi.width),
height_(roi.height),
roi_x_min_(roi.x),
roi_y_min_(roi.y),
roi_x_max_(roi.x + roi.width - 1),
roi_y_max_(roi.y + roi.height - 1) {
if (roi_x_min_ < 0) {
std::cerr << "x_min is less than 0: " << roi_x_min_ << std::endl;
}
if (roi_y_min_ < 0) {
std::cerr << "y_min is less than 0: " << roi_y_min_ << std::endl;
}
if (roi_x_max_ >= image_width_) {
std::cerr << "x_max is larger than image width: "
<< roi_x_max_ << "|"
<< image_width_ << std::endl;
}
if (roi_y_max_ >= image_height_) {
std::cerr << "y_max is larger than image height: "
<< roi_y_max_ << "|"
<< image_height_ << std::endl;
}
total_pix_ = static_cast<size_t>(width_) * static_cast<size_t>(height_);
hipChannelFormatDesc uchar_desc = hipCreateChannelDesc<unsigned char>();
hipMallocArray(&img_array_, &uchar_desc, static_cast<size_t>(width_),
static_cast<size_t>(height_));
hipBindTextureToArray(img_tex, img_array_, uchar_desc);
hipMalloc(
reinterpret_cast<void**>(&label_array_),
static_cast<size_t>(width_) * static_cast<size_t>(height_) * sizeof(int));
hipError_t cuda_err = hipGetLastError();
if (cuda_err != hipSuccess) {
std::cerr << "failed to initialize 'img_array' and 'label_array' with CUDA: "
<< hipGetErrorString(cuda_err) << std::endl;
}
labels_ = static_cast<int*>(malloc(total_pix_ * sizeof(int)));
root_map_.reserve(total_pix_);
}
bool ConnectedComponentGeneratorGPU::BlockUnionFind(const unsigned char* img) {
hipError_t cuda_err;
if (width_ == image_width_) {
size_t siz = static_cast<size_t>(width_) * static_cast<size_t>(height_) *
sizeof(unsigned char);
hipMemcpyToArray(img_array_, 0, 0, img, siz, hipMemcpyHostToDevice);
} else {
size_t siz = static_cast<size_t>(width_) * sizeof(unsigned char);
for (size_t i = 0; i < static_cast<size_t>(height_); ++i) {
hipMemcpyToArray(img_array_, 0, i, img, siz, hipMemcpyHostToDevice);
img += image_width_;
}
}
dim3 block(UF_BLOCK_WIDTH, UF_BLOCK_HEIGHT);
dim3 grid(
static_cast<unsigned int>((width_ + UF_BLOCK_WIDTH - 1) / UF_BLOCK_WIDTH),
static_cast<unsigned int>((height_ + UF_BLOCK_HEIGHT - 1) /
UF_BLOCK_HEIGHT));
cuda_err = hipGetLastError();
if (cuda_err != hipSuccess) {
std::cerr << "failed to start block union find with CUDA: "
<< hipGetErrorString(cuda_err) << std::endl;
return false;
}
hipDeviceSetCacheConfig(hipFuncCachePreferShared);
hipLaunchKernelGGL(( block_uf::BlockUnionFindInternal), dim3(grid), dim3(block), 0, 0, label_array_, width_,
height_);
hipDeviceSetCacheConfig(hipFuncCachePreferL1);
hipLaunchKernelGGL(( block_uf::BlockUnionFindBoundary), dim3(grid), dim3(block), 0, 0, label_array_, width_,
height_);
hipLaunchKernelGGL(( block_uf::BlockUnionFindRoot), dim3(grid), dim3(block), 0, 0, label_array_, width_, height_);
hipMemcpy(
labels_, label_array_,
static_cast<size_t>(width_) * static_cast<size_t>(height_) * sizeof(int),
hipMemcpyDeviceToHost);
cuda_err = hipGetLastError();
if (cuda_err != hipSuccess) {
std::cerr << "failed to finish block union find with CUDA: "
<< hipGetErrorString(cuda_err) << std::endl;
return false;
}
return true;
}
bool ConnectedComponentGeneratorGPU::FindConnectedComponents(
const cv::Mat& lane_map, vector<shared_ptr<ConnectedComponent>>* cc) {
if (lane_map.empty()) {
std::cerr << "The input lane map is empty." << std::endl;
return false;
}
if (lane_map.type() != CV_8UC1) {
std::cerr << "The input lane map type is not CV_8UC1." << std::endl;
return false;
}
if (lane_map.cols != image_width_) {
std::cerr << "The width of input lane map does not match." << std::endl;
return false;
}
if (lane_map.rows != image_height_) {
std::cerr << "The height of input lane map does not match." << std::endl;
return false;
}
if (cc == NULL) {
std::cerr << "The pointer of output connected components is null."
<< std::endl;
return false;
}
cc->clear();
const unsigned char* img =
lane_map.data + roi_y_min_ * image_width_ + roi_x_min_;
BlockUnionFind(img);
int cur_idx = 0;
int curt_label = 0;
int cc_count = 0;
root_map_.assign(total_pix_, -1);
for (int y = roi_y_min_; y <= roi_y_max_; ++y) {
for (int x = roi_x_min_; x <= roi_x_max_; ++x) {
curt_label = labels_[cur_idx];
if (curt_label >= 0) {
if (curt_label >= static_cast<int>(total_pix_)) {
std::cerr << "curt_label should be smaller than root_map.size() "
<< curt_label << " (" << total_pix_ << ")." << std::endl;
return false;
}
if (root_map_[curt_label] != -1) {
cc->at(root_map_[curt_label])->AddPixel(x, y);
} else {
cc->push_back(std::make_shared<ConnectedComponent>(x, y));
root_map_[curt_label] = cc_count++;
}
}
++cur_idx;
} // end for x
} // end for y
std::cout << "#cc = " << cc_count << std::endl;
return true;
}
} // namespace perception
} // namespace apollo
| bac092e0b179bfe3c56d593795bcdcb3ee21c98f.cu | /******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include "connected_component_gpu.h"
#include "texture.h"
#include "block_uf.h"
namespace apollo {
namespace perception {
using std::shared_ptr;
using std::unordered_set;
using std::vector;
ConnectedComponentGeneratorGPU::ConnectedComponentGeneratorGPU(int image_width,
int image_height)
: image_width_(image_width),
image_height_(image_height),
width_(image_width),
height_(image_height),
roi_x_min_(0),
roi_y_min_(0),
roi_x_max_(image_width - 1),
roi_y_max_(image_height - 1) {
total_pix_ =
static_cast<size_t>(image_width_) * static_cast<size_t>(image_height_);
cudaChannelFormatDesc uchar_desc = cudaCreateChannelDesc<unsigned char>();
cudaMallocArray(&img_array_, &uchar_desc, static_cast<size_t>(width_),
static_cast<size_t>(height_));
cudaBindTextureToArray(img_tex, img_array_, uchar_desc);
cudaMalloc(
reinterpret_cast<void**>(&label_array_),
static_cast<size_t>(width_) * static_cast<size_t>(height_) * sizeof(int));
cudaError_t cuda_err = cudaGetLastError();
if (cuda_err != cudaSuccess) {
std::cerr
<< "failed to initialize 'img_array' and 'label_array' with CUDA: "
<< cudaGetErrorString(cuda_err) << std::endl;
}
labels_ = static_cast<int*>(malloc(total_pix_ * sizeof(int)));
root_map_.reserve(total_pix_);
}
ConnectedComponentGeneratorGPU::ConnectedComponentGeneratorGPU(int image_width,
int image_height,
cv::Rect roi)
: image_width_(image_width),
image_height_(image_height),
width_(roi.width),
height_(roi.height),
roi_x_min_(roi.x),
roi_y_min_(roi.y),
roi_x_max_(roi.x + roi.width - 1),
roi_y_max_(roi.y + roi.height - 1) {
if (roi_x_min_ < 0) {
std::cerr << "x_min is less than 0: " << roi_x_min_ << std::endl;
}
if (roi_y_min_ < 0) {
std::cerr << "y_min is less than 0: " << roi_y_min_ << std::endl;
}
if (roi_x_max_ >= image_width_) {
std::cerr << "x_max is larger than image width: "
<< roi_x_max_ << "|"
<< image_width_ << std::endl;
}
if (roi_y_max_ >= image_height_) {
std::cerr << "y_max is larger than image height: "
<< roi_y_max_ << "|"
<< image_height_ << std::endl;
}
total_pix_ = static_cast<size_t>(width_) * static_cast<size_t>(height_);
cudaChannelFormatDesc uchar_desc = cudaCreateChannelDesc<unsigned char>();
cudaMallocArray(&img_array_, &uchar_desc, static_cast<size_t>(width_),
static_cast<size_t>(height_));
cudaBindTextureToArray(img_tex, img_array_, uchar_desc);
cudaMalloc(
reinterpret_cast<void**>(&label_array_),
static_cast<size_t>(width_) * static_cast<size_t>(height_) * sizeof(int));
cudaError_t cuda_err = cudaGetLastError();
if (cuda_err != cudaSuccess) {
std::cerr << "failed to initialize 'img_array' and 'label_array' with CUDA: "
<< cudaGetErrorString(cuda_err) << std::endl;
}
labels_ = static_cast<int*>(malloc(total_pix_ * sizeof(int)));
root_map_.reserve(total_pix_);
}
bool ConnectedComponentGeneratorGPU::BlockUnionFind(const unsigned char* img) {
cudaError_t cuda_err;
if (width_ == image_width_) {
size_t siz = static_cast<size_t>(width_) * static_cast<size_t>(height_) *
sizeof(unsigned char);
cudaMemcpyToArray(img_array_, 0, 0, img, siz, cudaMemcpyHostToDevice);
} else {
size_t siz = static_cast<size_t>(width_) * sizeof(unsigned char);
for (size_t i = 0; i < static_cast<size_t>(height_); ++i) {
cudaMemcpyToArray(img_array_, 0, i, img, siz, cudaMemcpyHostToDevice);
img += image_width_;
}
}
dim3 block(UF_BLOCK_WIDTH, UF_BLOCK_HEIGHT);
dim3 grid(
static_cast<unsigned int>((width_ + UF_BLOCK_WIDTH - 1) / UF_BLOCK_WIDTH),
static_cast<unsigned int>((height_ + UF_BLOCK_HEIGHT - 1) /
UF_BLOCK_HEIGHT));
cuda_err = cudaGetLastError();
if (cuda_err != cudaSuccess) {
std::cerr << "failed to start block union find with CUDA: "
<< cudaGetErrorString(cuda_err) << std::endl;
return false;
}
cudaThreadSetCacheConfig(cudaFuncCachePreferShared);
block_uf::BlockUnionFindInternal<<<grid, block>>>(label_array_, width_,
height_);
cudaThreadSetCacheConfig(cudaFuncCachePreferL1);
block_uf::BlockUnionFindBoundary<<<grid, block>>>(label_array_, width_,
height_);
block_uf::BlockUnionFindRoot<<<grid, block>>>(label_array_, width_, height_);
cudaMemcpy(
labels_, label_array_,
static_cast<size_t>(width_) * static_cast<size_t>(height_) * sizeof(int),
cudaMemcpyDeviceToHost);
cuda_err = cudaGetLastError();
if (cuda_err != cudaSuccess) {
std::cerr << "failed to finish block union find with CUDA: "
<< cudaGetErrorString(cuda_err) << std::endl;
return false;
}
return true;
}
bool ConnectedComponentGeneratorGPU::FindConnectedComponents(
const cv::Mat& lane_map, vector<shared_ptr<ConnectedComponent>>* cc) {
if (lane_map.empty()) {
std::cerr << "The input lane map is empty." << std::endl;
return false;
}
if (lane_map.type() != CV_8UC1) {
std::cerr << "The input lane map type is not CV_8UC1." << std::endl;
return false;
}
if (lane_map.cols != image_width_) {
std::cerr << "The width of input lane map does not match." << std::endl;
return false;
}
if (lane_map.rows != image_height_) {
std::cerr << "The height of input lane map does not match." << std::endl;
return false;
}
if (cc == NULL) {
std::cerr << "The pointer of output connected components is null."
<< std::endl;
return false;
}
cc->clear();
const unsigned char* img =
lane_map.data + roi_y_min_ * image_width_ + roi_x_min_;
BlockUnionFind(img);
int cur_idx = 0;
int curt_label = 0;
int cc_count = 0;
root_map_.assign(total_pix_, -1);
for (int y = roi_y_min_; y <= roi_y_max_; ++y) {
for (int x = roi_x_min_; x <= roi_x_max_; ++x) {
curt_label = labels_[cur_idx];
if (curt_label >= 0) {
if (curt_label >= static_cast<int>(total_pix_)) {
std::cerr << "curt_label should be smaller than root_map.size() "
<< curt_label << " (" << total_pix_ << ")." << std::endl;
return false;
}
if (root_map_[curt_label] != -1) {
cc->at(root_map_[curt_label])->AddPixel(x, y);
} else {
cc->push_back(std::make_shared<ConnectedComponent>(x, y));
root_map_[curt_label] = cc_count++;
}
}
++cur_idx;
} // end for x
} // end for y
std::cout << "#cc = " << cc_count << std::endl;
return true;
}
} // namespace perception
} // namespace apollo
|
25dc038b2ccae3db3ee7b79cddec26557255b532.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/sparse_split_impl.cuh"
#include <complex>
#include <algorithm>
#include <iostream>
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h"
#include "plugin/device/cpu/kernel/nnacl/op_base.h"
template <typename DataType, typename IndexType>
__global__ void SparseSplitKernel(IndexType *split_dim_ptr, IndexType *indices_ptr, DataType *values_ptr,
IndexType *shape_ptr, IndexType num_split, IndexType **y_indices_ptr,
DataType **y_values_ptr, IndexType *out_shape_ptr, int *sum_count_ptr,
size_t input_nnz_, size_t num_dim_, IndexType *d_block_ptr) {
// atomicADD
for (size_t input_nz = blockIdx.x * blockDim.x + threadIdx.x; input_nz < input_nnz_;
input_nz += blockDim.x * gridDim.x) {
DataType value = values_ptr[input_nz];
IndexType *index = indices_ptr + input_nz * 2;
IndexType idx_i = index[*split_dim_ptr];
IndexType block;
for (IndexType i = 0; i < num_split; i++) {
if (idx_i < d_block_ptr[i + 1] && idx_i >= d_block_ptr[i]) {
block = i;
}
}
int idx = atomicAdd(&sum_count_ptr[block], 1);
if (*split_dim_ptr == 0) {
y_indices_ptr[block][idx * 2] = index[0] - d_block_ptr[block];
y_indices_ptr[block][idx * 2 + 1] = index[1];
} else {
y_indices_ptr[block][idx * 2] = index[0];
y_indices_ptr[block][idx * 2 + 1] = index[1] - d_block_ptr[block];
}
y_values_ptr[block][idx] = value;
}
}
template <typename DataType, typename IndexType>
CUDA_LIB_EXPORT void SparseSplit(IndexType *split_dim_ptr, IndexType *indices_ptr, DataType *values_ptr,
IndexType *shape_ptr, IndexType num_split, IndexType **y_indices_ptr,
DataType **y_values_ptr, IndexType *out_shape_ptr, int *sum_count_ptr,
size_t input_nnz_, size_t num_dim_, IndexType *d_block_ptr, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( SparseSplitKernel), dim3(GET_BLOCKS(input_nnz_)), dim3(GET_THREADS), 0, cuda_stream,
split_dim_ptr, indices_ptr, values_ptr, shape_ptr, num_split, y_indices_ptr, y_values_ptr, out_shape_ptr,
sum_count_ptr, input_nnz_, num_dim_, d_block_ptr);
}
template CUDA_LIB_EXPORT void SparseSplit<uint8_t, int64_t>(int64_t *split_dim_ptr, int64_t *indices_ptr,
uint8_t *values_ptr, int64_t *shape_ptr, int64_t num_split,
int64_t **y_indices_ptr, uint8_t **y_values_ptr,
int64_t *out_shape_ptr, int *sum_count_ptr,
size_t input_nnz_, size_t num_dim_, int64_t *d_block_ptr,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void SparseSplit<uint16_t, int64_t>(int64_t *split_dim_ptr, int64_t *indices_ptr,
uint16_t *values_ptr, int64_t *shape_ptr,
int64_t num_split, int64_t **y_indices_ptr,
uint16_t **y_values_ptr, int64_t *out_shape_ptr,
int *sum_count_ptr, size_t input_nnz_, size_t num_dim_,
int64_t *d_block_ptr, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void SparseSplit<int64_t, int64_t>(int64_t *split_dim_ptr, int64_t *indices_ptr,
int64_t *values_ptr, int64_t *shape_ptr, int64_t num_split,
int64_t **y_indices_ptr, int64_t **y_values_ptr,
int64_t *out_shape_ptr, int *sum_count_ptr,
size_t input_nnz_, size_t num_dim_, int64_t *d_block_ptr,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void SparseSplit<int32_t, int64_t>(int64_t *split_dim_ptr, int64_t *indices_ptr,
int32_t *values_ptr, int64_t *shape_ptr, int64_t num_split,
int64_t **y_indices_ptr, int32_t **y_values_ptr,
int64_t *out_shape_ptr, int *sum_count_ptr,
size_t input_nnz_, size_t num_dim_, int64_t *d_block_ptr,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void SparseSplit<int16_t, int64_t>(int64_t *split_dim_ptr, int64_t *indices_ptr,
int16_t *values_ptr, int64_t *shape_ptr, int64_t num_split,
int64_t **y_indices_ptr, int16_t **y_values_ptr,
int64_t *out_shape_ptr, int *sum_count_ptr,
size_t input_nnz_, size_t num_dim_, int64_t *d_block_ptr,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void SparseSplit<int8_t, int64_t>(int64_t *split_dim_ptr, int64_t *indices_ptr,
int8_t *values_ptr, int64_t *shape_ptr, int64_t num_split,
int64_t **y_indices_ptr, int8_t **y_values_ptr,
int64_t *out_shape_ptr, int *sum_count_ptr,
size_t input_nnz_, size_t num_dim_, int64_t *d_block_ptr,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void SparseSplit<double, int64_t>(int64_t *split_dim_ptr, int64_t *indices_ptr,
double *values_ptr, int64_t *shape_ptr, int64_t num_split,
int64_t **y_indices_ptr, double **y_values_ptr,
int64_t *out_shape_ptr, int *sum_count_ptr,
size_t input_nnz_, size_t num_dim_, int64_t *d_block_ptr,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void SparseSplit<float, int64_t>(int64_t *split_dim_ptr, int64_t *indices_ptr,
float *values_ptr, int64_t *shape_ptr, int64_t num_split,
int64_t **y_indices_ptr, float **y_values_ptr,
int64_t *out_shape_ptr, int *sum_count_ptr, size_t input_nnz_,
size_t num_dim_, int64_t *d_block_ptr,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void SparseSplit<half, int64_t>(int64_t *split_dim_ptr, int64_t *indices_ptr, half *values_ptr,
int64_t *shape_ptr, int64_t num_split, int64_t **y_indices_ptr,
half **y_values_ptr, int64_t *out_shape_ptr,
int *sum_count_ptr, size_t input_nnz_, size_t num_dim_,
int64_t *d_block_ptr, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void SparseSplit<bool, int64_t>(int64_t *split_dim_ptr, int64_t *indices_ptr, bool *values_ptr,
int64_t *shape_ptr, int64_t num_split, int64_t **y_indices_ptr,
bool **y_values_ptr, int64_t *out_shape_ptr,
int *sum_count_ptr, size_t input_nnz_, size_t num_dim_,
int64_t *d_block_ptr, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void SparseSplit<Complex<float>, int64_t>(
int64_t *split_dim_ptr, int64_t *indices_ptr, Complex<float> *values_ptr, int64_t *shape_ptr, int64_t num_split,
int64_t **y_indices_ptr, Complex<float> **y_values_ptr, int64_t *out_shape_ptr, int *sum_count_ptr, size_t input_nnz_,
size_t num_dim_, int64_t *d_block_ptr, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void SparseSplit<Complex<double>, int64_t>(
int64_t *split_dim_ptr, int64_t *indices_ptr, Complex<double> *values_ptr, int64_t *shape_ptr, int64_t num_split,
int64_t **y_indices_ptr, Complex<double> **y_values_ptr, int64_t *out_shape_ptr, int *sum_count_ptr,
size_t input_nnz_, size_t num_dim_, int64_t *d_block_ptr, hipStream_t cuda_stream);
| 25dc038b2ccae3db3ee7b79cddec26557255b532.cu | /**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/sparse_split_impl.cuh"
#include <complex>
#include <algorithm>
#include <iostream>
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h"
#include "plugin/device/cpu/kernel/nnacl/op_base.h"
template <typename DataType, typename IndexType>
__global__ void SparseSplitKernel(IndexType *split_dim_ptr, IndexType *indices_ptr, DataType *values_ptr,
IndexType *shape_ptr, IndexType num_split, IndexType **y_indices_ptr,
DataType **y_values_ptr, IndexType *out_shape_ptr, int *sum_count_ptr,
size_t input_nnz_, size_t num_dim_, IndexType *d_block_ptr) {
// atomicADD
for (size_t input_nz = blockIdx.x * blockDim.x + threadIdx.x; input_nz < input_nnz_;
input_nz += blockDim.x * gridDim.x) {
DataType value = values_ptr[input_nz];
IndexType *index = indices_ptr + input_nz * 2;
IndexType idx_i = index[*split_dim_ptr];
IndexType block;
for (IndexType i = 0; i < num_split; i++) {
if (idx_i < d_block_ptr[i + 1] && idx_i >= d_block_ptr[i]) {
block = i;
}
}
int idx = atomicAdd(&sum_count_ptr[block], 1);
if (*split_dim_ptr == 0) {
y_indices_ptr[block][idx * 2] = index[0] - d_block_ptr[block];
y_indices_ptr[block][idx * 2 + 1] = index[1];
} else {
y_indices_ptr[block][idx * 2] = index[0];
y_indices_ptr[block][idx * 2 + 1] = index[1] - d_block_ptr[block];
}
y_values_ptr[block][idx] = value;
}
}
template <typename DataType, typename IndexType>
CUDA_LIB_EXPORT void SparseSplit(IndexType *split_dim_ptr, IndexType *indices_ptr, DataType *values_ptr,
IndexType *shape_ptr, IndexType num_split, IndexType **y_indices_ptr,
DataType **y_values_ptr, IndexType *out_shape_ptr, int *sum_count_ptr,
size_t input_nnz_, size_t num_dim_, IndexType *d_block_ptr, cudaStream_t cuda_stream) {
SparseSplitKernel<<<GET_BLOCKS(input_nnz_), GET_THREADS, 0, cuda_stream>>>(
split_dim_ptr, indices_ptr, values_ptr, shape_ptr, num_split, y_indices_ptr, y_values_ptr, out_shape_ptr,
sum_count_ptr, input_nnz_, num_dim_, d_block_ptr);
}
template CUDA_LIB_EXPORT void SparseSplit<uint8_t, int64_t>(int64_t *split_dim_ptr, int64_t *indices_ptr,
uint8_t *values_ptr, int64_t *shape_ptr, int64_t num_split,
int64_t **y_indices_ptr, uint8_t **y_values_ptr,
int64_t *out_shape_ptr, int *sum_count_ptr,
size_t input_nnz_, size_t num_dim_, int64_t *d_block_ptr,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void SparseSplit<uint16_t, int64_t>(int64_t *split_dim_ptr, int64_t *indices_ptr,
uint16_t *values_ptr, int64_t *shape_ptr,
int64_t num_split, int64_t **y_indices_ptr,
uint16_t **y_values_ptr, int64_t *out_shape_ptr,
int *sum_count_ptr, size_t input_nnz_, size_t num_dim_,
int64_t *d_block_ptr, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void SparseSplit<int64_t, int64_t>(int64_t *split_dim_ptr, int64_t *indices_ptr,
int64_t *values_ptr, int64_t *shape_ptr, int64_t num_split,
int64_t **y_indices_ptr, int64_t **y_values_ptr,
int64_t *out_shape_ptr, int *sum_count_ptr,
size_t input_nnz_, size_t num_dim_, int64_t *d_block_ptr,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void SparseSplit<int32_t, int64_t>(int64_t *split_dim_ptr, int64_t *indices_ptr,
int32_t *values_ptr, int64_t *shape_ptr, int64_t num_split,
int64_t **y_indices_ptr, int32_t **y_values_ptr,
int64_t *out_shape_ptr, int *sum_count_ptr,
size_t input_nnz_, size_t num_dim_, int64_t *d_block_ptr,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void SparseSplit<int16_t, int64_t>(int64_t *split_dim_ptr, int64_t *indices_ptr,
int16_t *values_ptr, int64_t *shape_ptr, int64_t num_split,
int64_t **y_indices_ptr, int16_t **y_values_ptr,
int64_t *out_shape_ptr, int *sum_count_ptr,
size_t input_nnz_, size_t num_dim_, int64_t *d_block_ptr,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void SparseSplit<int8_t, int64_t>(int64_t *split_dim_ptr, int64_t *indices_ptr,
int8_t *values_ptr, int64_t *shape_ptr, int64_t num_split,
int64_t **y_indices_ptr, int8_t **y_values_ptr,
int64_t *out_shape_ptr, int *sum_count_ptr,
size_t input_nnz_, size_t num_dim_, int64_t *d_block_ptr,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void SparseSplit<double, int64_t>(int64_t *split_dim_ptr, int64_t *indices_ptr,
double *values_ptr, int64_t *shape_ptr, int64_t num_split,
int64_t **y_indices_ptr, double **y_values_ptr,
int64_t *out_shape_ptr, int *sum_count_ptr,
size_t input_nnz_, size_t num_dim_, int64_t *d_block_ptr,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void SparseSplit<float, int64_t>(int64_t *split_dim_ptr, int64_t *indices_ptr,
float *values_ptr, int64_t *shape_ptr, int64_t num_split,
int64_t **y_indices_ptr, float **y_values_ptr,
int64_t *out_shape_ptr, int *sum_count_ptr, size_t input_nnz_,
size_t num_dim_, int64_t *d_block_ptr,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void SparseSplit<half, int64_t>(int64_t *split_dim_ptr, int64_t *indices_ptr, half *values_ptr,
int64_t *shape_ptr, int64_t num_split, int64_t **y_indices_ptr,
half **y_values_ptr, int64_t *out_shape_ptr,
int *sum_count_ptr, size_t input_nnz_, size_t num_dim_,
int64_t *d_block_ptr, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void SparseSplit<bool, int64_t>(int64_t *split_dim_ptr, int64_t *indices_ptr, bool *values_ptr,
int64_t *shape_ptr, int64_t num_split, int64_t **y_indices_ptr,
bool **y_values_ptr, int64_t *out_shape_ptr,
int *sum_count_ptr, size_t input_nnz_, size_t num_dim_,
int64_t *d_block_ptr, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void SparseSplit<Complex<float>, int64_t>(
int64_t *split_dim_ptr, int64_t *indices_ptr, Complex<float> *values_ptr, int64_t *shape_ptr, int64_t num_split,
int64_t **y_indices_ptr, Complex<float> **y_values_ptr, int64_t *out_shape_ptr, int *sum_count_ptr, size_t input_nnz_,
size_t num_dim_, int64_t *d_block_ptr, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void SparseSplit<Complex<double>, int64_t>(
int64_t *split_dim_ptr, int64_t *indices_ptr, Complex<double> *values_ptr, int64_t *shape_ptr, int64_t num_split,
int64_t **y_indices_ptr, Complex<double> **y_values_ptr, int64_t *out_shape_ptr, int *sum_count_ptr,
size_t input_nnz_, size_t num_dim_, int64_t *d_block_ptr, cudaStream_t cuda_stream);
|
19d7f5577d4076b9c68c32e3293bf1b01a8e6dd2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef CUDA_PRECISION_CU
#define CUDA_PRECISION_CU
//routines that calls cuda_kernels, i.e. driver-routines for cuda calls
//routines with prefix cu as in cuRoutine can be called directly from host code.
#ifdef DEBUG
const bool WTRACE=true;
#else
const bool WTRACE=false;
#endif
#ifdef DEBUG2
const bool TRACE=true;
#else
const bool TRACE=false;
#endif
#include<iostream>
#include<iomanip>
#include "cuda_global.h"
#include "cuda_precision.h"
#include "cuda_precision_kernels.h"
#include <qd/dd_real.h>
#include <qd/qd_real.h>
#include <qd/fpu.h>
struct dd_cmplx{dd_real x; dd_real y; dd_cmplx(double xd=0.,double yd=0.){x=dd_real(xd); y=dd_real(yd);}};
struct qd_cmplx{qd_real x; qd_real y; qd_cmplx(double xd=0.,double yd=0.){x=qd_real(xd); y=qd_real(yd);}};
#ifdef DOUBLEDOUBLE
#define realformat dd_real
#define cmplxformat dd_cmplx
#define todouble to_double
#define makereal dd_real
#elif defined QUADDOUBLE
#define realformat qd_real
#define cmplxformat qd_cmplx
#define todouble to_double
#define makereal qd_real
#else
#define realformat double
#define cmplxformat cuda_cmplx
#define todouble double
#define makereal double
#endif
const int sizeofreal=sizeof(realformat);
const int sizeofcmplx=sizeof(cmplxformat);
/*
const int NTHREADSPERBLOCK=256;
const bool REORTHOGONALIZE=true;
*/
int cuda_mat_vec_multiply_cmplx(
const int M,const int Nc,const int* indices ,
const void* data , const void* x, void* y)
{
if(WTRACE) cout << "Starting cuda_mat_vec_multiply_cmplx" << endl;
int b = (M + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK;
const cuda_cmplx* datap=reinterpret_cast<const cuda_cmplx*>(data);
const cuda_cmplx* xp=reinterpret_cast<const cuda_cmplx*>(x);
cuda_cmplx* yp=reinterpret_cast<cuda_cmplx*>(y);
hipLaunchKernelGGL(( cuda_kernel_mat_vec_multiply_cmplx), dim3(b), dim3(THREADS_PER_BLOCK), 0, 0, M,Nc,indices,datap,xp,yp);
if(WTRACE) cout << "Done with cuda_mat_vec_multiply_cmplx" << endl;
return hipSuccess;
}
int cuda_real_Zmaxpy(const int N,const void* q,void* d_x,const void* d_y)
{
if(WTRACE) cout << "Starting cuda_real_Zmaxpy" << endl;
void* d_q=NULL;
AllocateSpaceOnDevice(sizeofreal,&d_q);
UploadToDevice(q,sizeofreal,&d_q);
int b = (N + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK;
hipLaunchKernelGGL(( cuda_kernel_real_Zmaxpy), dim3(b), dim3(THREADS_PER_BLOCK), 0, 0, N,d_q,d_x,d_y);
FreeMemoryOnDevice(&d_q);
if(WTRACE) cout << "Done with cuda_real_Zmaxpy" << endl;
return hipSuccess;
}
int cuda_cmplx_Zmaxpy(const int N,const void* q,void* d_x,const void* d_y)
{
if(WTRACE) cout << "Starting cuda_cmplx_Zmaxpy" << endl;
void* d_q=NULL;
AllocateSpaceOnDevice(sizeofcmplx,&d_q);
UploadToDevice(q,sizeofcmplx,&d_q);
int b = (N + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK;
hipLaunchKernelGGL(( cuda_kernel_cmplx_Zmaxpy), dim3(b), dim3(THREADS_PER_BLOCK), 0, 0, N,d_q,d_x,d_y);
FreeMemoryOnDevice(&d_q);
if(WTRACE) cout << "Done with cuda_cmplx_Zmaxpy" << endl;
return hipSuccess;
}
int cuda_realdotproduct_real(const int N,const void* d_x,const void* d_y,void* ans)
{
if(WTRACE) cout << "Starting cuda_realdotproduct_cmplx" << endl;
hipError_t err;
realformat c[BLOCKS];
for(int i=0; i<BLOCKS; i++){ c[i]=realformat(0.);}
void* d_c=NULL;
AllocateSpaceOnDevice(sizeofreal*BLOCKS,&d_c);
UploadToDevice(c,sizeofreal*BLOCKS,&d_c);
hipLaunchKernelGGL(( cuda_kernel_realdotproduct_real), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, N, d_x, d_y, d_c);
err = hipGetLastError();
if(err != hipSuccess)
{
fprintf(stderr, "Failed to execute (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
DownloadFromDevice(&d_c,sizeofreal*BLOCKS,c);
FreeMemoryOnDevice(&d_c);
// sum the contribution from all the blocks:
unsigned int old_cw;
fpu_fix_start(&old_cw);
for(int i = 1; i < BLOCKS; i++){c[0] += c[i];}
fpu_fix_end(&old_cw);
realformat* ansp = reinterpret_cast<realformat*>(ans);
*ansp=c[0];
if(WTRACE) cout << "Done with cuda_realdotproduct_cmplx" << endl;
return hipSuccess;
}
int cuda_realdotproduct_cmplx(const int N,const void* d_x,const void* d_y,void* ans)
{
if(WTRACE) cout << "Starting cuda_realdotproduct_cmplx" << endl;
hipError_t err;
realformat c[BLOCKS];
for(int i=0; i<BLOCKS; i++){ c[i]=realformat(0.);}
void* d_c=NULL;
AllocateSpaceOnDevice(sizeofreal*BLOCKS,&d_c);
UploadToDevice(c,sizeofreal*BLOCKS,&d_c);
hipLaunchKernelGGL(( cuda_kernel_realdotproduct_cmplx), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, N, d_x, d_y, d_c);
err = hipGetLastError();
if(err != hipSuccess)
{
fprintf(stderr, "Failed to execute (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
DownloadFromDevice(&d_c,sizeofreal*BLOCKS,c);
FreeMemoryOnDevice(&d_c);
// sum the contribution from all the blocks:
unsigned int old_cw;
fpu_fix_start(&old_cw);
for(int i = 1; i < BLOCKS; i++){c[0] += c[i];}
fpu_fix_end(&old_cw);
realformat* ansp = reinterpret_cast<realformat*>(ans);
*ansp=c[0];
if(WTRACE) cout << "Done with cuda_realdotproduct_cmplx" << endl;
return hipSuccess;
}
int cuda_cmplxdotproduct_cmplx(const int N,const void* d_x,const void* d_y,void* ans)
{
if(WTRACE) cout << "Starting cuda_cmplxdotproduct_cmplx" << endl;
hipError_t err;
cmplxformat c[BLOCKS];
for(int i=0; i<BLOCKS; i++){ c[i]=cmplxformat(0.,0.);}
void* d_c=NULL;
AllocateSpaceOnDevice(sizeofcmplx*BLOCKS,&d_c);
UploadToDevice(c,sizeofcmplx*BLOCKS,&d_c);
hipLaunchKernelGGL(( cuda_kernel_cmplxdotproduct_cmplx), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, N, d_x, d_y, d_c);
err = hipGetLastError();
if(err != hipSuccess)
{
fprintf(stderr, "Failed to execute (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
DownloadFromDevice(&d_c,sizeofcmplx*BLOCKS,c);
FreeMemoryOnDevice(&d_c);
// sum the contribution from all the blocks:
unsigned int old_cw;
fpu_fix_start(&old_cw);
for(int i = 1; i < BLOCKS; i++){c[0].x += c[i].x; c[0].y += c[i].y;}
fpu_fix_end(&old_cw);
cmplxformat* ansp = reinterpret_cast<cmplxformat*>(ans);
*ansp=c[0];
if(WTRACE) cout << "Done with cuda_cmplxdotproduct_cmplx" << endl;
return hipSuccess;
}
// computes the dot product between two real vectors
int cuRealDotProductReal(const int N,const double* x,const double* y,double* res)
{
void* d_x=NULL;
AllocateSpaceOnDevice(N*sizeofreal,&d_x);
UploadToDeviceAndExpandReal(x,N,&d_x);
void* d_y=NULL;
AllocateSpaceOnDevice(N*sizeofreal,&d_y);
UploadToDeviceAndExpandReal(y,N,&d_y);
realformat rp =realformat(0.);
cuda_realdotproduct_real(N,d_x,d_y,&rp);
*res=todouble(rp);
FreeMemoryOnDevice(&d_x);
FreeMemoryOnDevice(&d_y);
return 0;
}
// computes the real part of the dot product between two complex vectors
int cuRealDotProductCmplx(const int N,const void* x,const void* y,double* res)
{
void* d_x=NULL;
AllocateSpaceOnDevice(N*sizeofcmplx,&d_x);
UploadToDeviceAndExpandCmplx(x,N,&d_x);
void* d_y=NULL;
AllocateSpaceOnDevice(N*sizeofcmplx,&d_y);
UploadToDeviceAndExpandCmplx(y,N,&d_y);
realformat rp =realformat(0.);
cuda_realdotproduct_cmplx(N,d_x,d_y,&rp);
*res=todouble(rp);
FreeMemoryOnDevice(&d_x);
FreeMemoryOnDevice(&d_y);
return 0;
}
int cuConvertToDouble(void* p,double* d)
{
realformat pval=reinterpret_cast<realformat>(*p);
*d = todouble(pval);
}
// computes the dot product between two complex vectors
int cuCmplxDotProductCmplx(int N,void* x,void* y,void* res)
{
void* d_x=NULL;
AllocateSpaceOnDevice(N*sizeofcmplx,&d_x);
UploadToDeviceAndExpandCmplx(x,N,&d_x);
void* d_y=NULL;
AllocateSpaceOnDevice(N*sizeofcmplx,&d_y);
UploadToDeviceAndExpandCmplx(y,N,&d_y);
cmplxformat rp =cmplxformat(0.,0.);
cuda_cmplxdotproduct_cmplx(N,d_x,d_y,&rp);
cuda_cmplx* res_ptr=reinterpret_cast<cuda_cmplx*>(res);
(*res_ptr).x=todouble(rp.x);
(*res_ptr).y=todouble(rp.y);
FreeMemoryOnDevice(&d_x);
FreeMemoryOnDevice(&d_y);
return 0;
}
int cuda_norm_cmplx(const int N,const void* d_x,void* ans)
{
if(WTRACE) cout << "Starting cuda_norm_cmplx" << endl;
hipError_t err;
realformat c[BLOCKS];
for(int i=0; i<BLOCKS; i++){ c[i]=realformat(0.);}
void* d_c=NULL;
AllocateSpaceOnDevice(sizeofreal*BLOCKS,&d_c);
UploadToDevice(c,sizeofreal*BLOCKS,&d_c);
hipLaunchKernelGGL(( cuda_kernel_realdotproduct_cmplx), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, N, d_x, d_x, d_c);
err = hipGetLastError();
if(err != hipSuccess)
{
fprintf(stderr, "Failed to execute (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
DownloadFromDevice(&d_c,sizeofreal*BLOCKS,c);
FreeMemoryOnDevice(&d_c);
// sum the contribution from all the blocks:
unsigned int old_cw;
fpu_fix_start(&old_cw);
for(int i = 1; i < BLOCKS; i++){c[0] += c[i];}
fpu_fix_end(&old_cw);
realformat* ansp = reinterpret_cast<realformat*>(ans);
*ansp=sqrt(c[0]);
if(WTRACE) cout << "Done with cuda_norm_cmplx" << endl;
return hipSuccess;
}
// divide the complex vector x by a real number: x=x/q
int cuda_Zdinvscal(int N,void* q,void* d_x)
{
if(WTRACE) cout << "Starting cuda_Zdinvscal " << endl;
realformat* qp= reinterpret_cast<realformat*>(q);
realformat s= inv(*qp); // s= 1/q taking the inverse
void* sp=reinterpret_cast<void*>(&s);
void* d_s=NULL;
AllocateSpaceOnDevice(sizeofreal,&d_s);
UploadToDevice(sp,sizeofreal,&d_s);
int b = (N + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK;
hipLaunchKernelGGL(( cuda_kernel_Zdscal), dim3(b), dim3(THREADS_PER_BLOCK), 0, 0, N,d_s,d_x);
FreeMemoryOnDevice(&d_s);
if(WTRACE) cout << "Done with cuda_Zdinvscal" << endl;
return hipSuccess;
}
// multiply the complex vector x by a real number: x=x*q
int cuda_Zdscal(int N,void* q,void* d_x)
{
if(WTRACE) cout << "Starting cuda_Zdscal " << endl;
void* d_s=NULL;
AllocateSpaceOnDevice(sizeofreal,&d_s);
UploadToDevice(q,sizeofreal,&d_s);
int b = (N + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK;
hipLaunchKernelGGL(( cuda_kernel_Zdscal), dim3(b), dim3(THREADS_PER_BLOCK), 0, 0, N,d_s,d_x);
FreeMemoryOnDevice(&d_s);
if(WTRACE) cout << "Done with cuda_Zdscal" << endl;
return hipSuccess;
}
int cuda_Normalize(const int N,void* d_x,double* ans)
{
if(WTRACE) cout << "Starting cuda_Normalize" << endl;
realformat norm=makereal(0.);
void* norm_ptr=reinterpret_cast<void*>(norm);
cuda_norm_cmplx(N,d_x,norm_ptr);
cuda_Zdinvscal(N,norm_ptr,d_x);
*ans=todouble(norm);
}
/*
void* GetPrecPtrCmplx(void* d_p,const int N)
{
cmplxformat* ptr=reinterpret_cast<cmplxformat*>(d_p);
return reinterpret_cast<void*>(&ptr[N]);
}
void* GetPrecPtrReal(void* d_p,const int N)
{
realformat* ptr=reinterpret_cast<realformat*>(d_p);
return reinterpret_cast<void*>(&ptr[N]);
}
*/
int AllocateSpaceOnDeviceCmplx(const int N,void** d_p)
{
AllocateSpaceOnDevice(d_p,N*sizeofcmplx);
}
int AllocateSpaceOnDeviceReal(const int N,void** d_p)
{
AllocateSpaceOnDevice(d_p,N*sizeofreal);
}
int UploadToDeviceAndExpandCmplx(const void* p,const int N,void** d_p)
{
const cuda_cmplx* pptr=reinterpret_cast<const cuda_cmplx*>(p);
cmplxformat cp[N];
for(int i=0; i<N; i++){ cp[i]=cmplxformat(pptr[i].x,pptr[i].y);}
void* cp_ptr=reinterpret_cast<void*>(cp);
UploadToDevice(cp_ptr,N*sizeofcmplx,d_p);
return hipSuccess;
}
int UploadToDeviceAndExpandReal(const double* p,const int N,void** d_p)
{
realformat cp[N];
for(int i=0; i<N; i++){ cp[i]=realformat(p[i]);}
void* cp_ptr=reinterpret_cast<void*>(cp);
UploadToDevice(cp_ptr,N*sizeofreal,d_p);
return hipSuccess;
}
int DownloadFromDeviceAndContractCmplx(void** d_p,int N,void* p)
{
cuda_cmplx* pptr=reinterpret_cast<cuda_cmplx*>(p);
cmplxformat cp[N];
void* cp_ptr=reinterpret_cast<void*>(&cp[0]);
DownloadFromDevice(d_p,N*sizeofcmplx,cp_ptr);
for(int i=0; i<N; i++){ pptr[i]=make_cuDoubleComplex(todouble(cp[i].x),todouble(cp[i].y));}
return hipSuccess;
}
int DownloadFromDeviceAndContractReal(void** d_p,int N,double* p)
{
realformat cp[N];
void* cp_ptr=reinterpret_cast<void*>(&cp[0]);
DownloadFromDevice(d_p,N*sizeofreal,cp_ptr);
for(int i=0; i<N; i++){ p[i]=todouble(cp[i]);}
return hipSuccess;
}
int InspectDeviceCmplx(void** d_p,int N)
{
cuda_cmplx cp[N];
void* cp_ptr=reinterpret_cast<void*>(&cp[0]);
DownloadFromDeviceAndContractCmplx(d_p,N,cp_ptr);
for(int i=0; i<N; i++){cout << cp[i] << endl;}
return hipSuccess;
}
int InspectDeviceReal(void** d_p,int N)
{
double cp[N];
DownloadFromDeviceAndContractReal(d_p,N,&cp[0]);
for(int i=0; i<N; i++){cout << cp[i] << endl;}
return hipSuccess;
}
/*
//*****************************************************************
int cuda_Zaxpy(int N,cuda_cmplx* q,cuda_cmplx* x,cuda_cmplx* y)
{
hipError_t err;
if(WTRACE) cout << "In cuda_Zaxpy" << endl;
cuda_cmplx* d_q=NULL;
AllocateSpaceOnDevice(1,&d_q);
UploadToDevice(q,1,&d_q);
int blockspergrid = (N + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK;
if(WTRACE) cout << "Launcing cuda_Zaxpy with " << blockspergrid << " blocks" << endl;
#ifdef DOUBLEDOUBLE
cudakernel_Zaxpy_dd<<<blockspergrid, THREADS_PER_BLOCK>>>(N,d_q,x,y);
#elif QUADDOUBLE
cudakernel_Zaxpy_qd<<<blockspergrid, THREADS_PER_BLOCK>>>(N,d_q,x,y);
#else
cudakernel_Zaxpy<<<blockspergrid, THREADS_PER_BLOCK>>>(N,d_q,x,y);
#endif
err = hipGetLastError();
if(err != hipSuccess)
{
fprintf(stderr, "Failed to execute cuda_Zaxpy (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
FreeMemoryOnDevice(&d_q);
if(WTRACE) cout << "Done with cuda_Zaxpy" << endl;
return hipSuccess;
}
int cuZaxpy(int N,void* q,void* x,void* y)
{
if(WTRACE) cout << "Starting Zaxpy " << endl;
cuda_cmplx* d_x=NULL;
AllocateSpaceOnDevice(N,&d_x);
cuda_cmplx* x_ptr=reinterpret_cast<cuda_cmplx*>(x);
UploadToDevice(x_ptr,N,&d_x);
cuda_cmplx* d_y=NULL;
AllocateSpaceOnDevice(N,&d_y);
cuda_cmplx* y_ptr=reinterpret_cast<cuda_cmplx*>(y);
UploadToDevice(y_ptr,N,&d_y);
cuda_cmplx* q_ptr=reinterpret_cast<cuda_cmplx*>(q);
cuda_Zaxpy(N,q_ptr,d_x,d_y);
DownloadFromDevice(&d_x,N,x_ptr);
FreeMemoryOnDevice(&d_x);
FreeMemoryOnDevice(&d_y);
if(WTRACE) cout << "Done with Zaxpy " << endl;
return hipSuccess;
}
int cuda_ComplexDotProduct(int N,cuda_cmplx* x,cuda_cmplx* y,cuda_cmplx* res)
{
if(WTRACE) cout << "Starting cuda_ComplexDotProduct" << endl;
hipError_t err;
#ifdef DOUBLEDOUBLE
dd_real cr[BLOCKS];
dd_real ci[BLOCKS];
for(int i=0; i<BLOCKS; i++){ cr[i]=dd_real(0.); ci[i]=dd_real(0.);}
void* d_cr=NULL;
void* d_ci=NULL;
int sizeofdouble=sizeof(dd_real);
#elif QUADDOUBLE
qd_real cr[BLOCKS];
qd_real ci[BLOCKS];
for(int i=0; i<BLOCKS; i++){ cr[i]=qd_real(0.); ci[i]=qd_real(0.);}
void* d_cr=NULL;
void* d_ci=NULL;
int sizeofdouble=sizeof(qd_real);
#else
double cr[BLOCKS];
double ci[BLOCKS];
for(int i=0; i<BLOCKS; i++){ cr[i]=0.; ci[i]=0.;}
void* d_cr=NULL;
void* d_ci=NULL;
int sizeofdouble=sizeof(double);
#endif
err = hipMalloc(&d_cr, sizeofdouble * BLOCKS);
err = hipMalloc(&d_ci, sizeofdouble * BLOCKS);
if(err !=hipSuccess)
{
fprintf(stderr, "!!!! device memory allocatioNerror (ComplexDotProduct)\n");
return EXIT_FAILURE;
}
err=hipMemcpy(d_cr,&cr[0], sizeofdouble*BLOCKS, hipMemcpyHostToDevice);
err=hipMemcpy(d_ci,&ci[0], sizeofdouble*BLOCKS, hipMemcpyHostToDevice);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to copy from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
#ifdef DOUBLEDOUBLE
cudakernel_dotproduct_cmplx_dd<<<BLOCKS, THREADS_PER_BLOCK>>>(N, x, y, d_cr, d_ci);
#elif QUADDOUBLE
cudakernel_dotproduct_cmplx_qd<<<BLOCKS, THREADS_PER_BLOCK>>>(N, x, y, d_cr, d_ci);
#else
cudakernel_dotproduct_cmplx<<<BLOCKS, THREADS_PER_BLOCK>>>(N, x, y, d_cr, d_ci);
#endif
err = hipGetLastError();
if(err != hipSuccess)
{
fprintf(stderr, "Failed to execute (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err=hipMemcpy(&cr[0],d_cr,sizeofdouble*BLOCKS, hipMemcpyDeviceToHost);
err=hipMemcpy(&ci[0],d_ci,sizeofdouble*BLOCKS, hipMemcpyDeviceToHost);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to copy from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err=hipFree(d_cr);
err=hipFree(d_ci);
if ( err != hipSuccess)
{
fprintf(stderr, "Failed to free memory on device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
unsigned int old_cw;
fpu_fix_start(&old_cw);
for(int i = 1; i < BLOCKS; i++)
{
cr[0] += cr[i];
ci[0] += ci[i];
}
#if defined DOUBLEDOUBLE || defined QUADDOUBLE
*res=make_cuDoubleComplex(to_double(cr[0]),to_double(ci[0]));
#else
*res=make_cuDoubleComplex(cr[0],ci[0]);
#endif
fpu_fix_end(&old_cw);
if(WTRACE) cout << "Done with cuda_ComplexDotProduct" << endl;
return hipSuccess;
}
int cuComplexDotProduct(int N,void* x,void* y,void* res)
{
cuda_cmplx* d_x=NULL;
AllocateSpaceOnDevice(N,&d_x);
cuda_cmplx* x_ptr=reinterpret_cast<cuda_cmplx*>(x);
UploadToDevice(x_ptr,N,&d_x);
cuda_cmplx* d_y=NULL;
AllocateSpaceOnDevice(N,&d_y);
cuda_cmplx* y_ptr=reinterpret_cast<cuda_cmplx*>(y);
UploadToDevice(y_ptr,N,&d_y);
cuda_cmplx* res_ptr=reinterpret_cast<cuda_cmplx*>(res);
cuda_ComplexDotProduct(N,d_x,d_y,res_ptr);
FreeMemoryOnDevice(&d_x);
FreeMemoryOnDevice(&d_y);
return 0;
}
//***********************************************************************
// cuda norm routines
int cuda_Dznrm2(int N,cuda_cmplx* d_x,double* res)
{
if(WTRACE) cout << "Starting cuda_Dznrm2" << endl;
hipError_t err;
#ifdef DOUBLEDOUBLE
dd_real c[BLOCKS];
for(int i=0; i<BLOCKS; i++){ c[i]=dd_real(0.);}
void* d_c=NULL;
int sizeofdouble=sizeof(dd_real);
#elif QUADDOUBLE
qd_real c[BLOCKS];
for(int i=0; i<BLOCKS; i++){ c[i]=qd_real(0.);}
void* d_c=NULL;
int sizeofdouble=sizeof(qd_real);
#else
double c[BLOCKS];
for(int i=0; i<BLOCKS; i++){ c[i]=0.;}
void* d_c=NULL;
int sizeofdouble=sizeof(double);
#endif
err = hipMalloc(&d_c, sizeofdouble * BLOCKS);
if(err !=hipSuccess)
{
fprintf(stderr, "!!!! device memory allocatioNerror (ComplexDotProduct)\n");
return EXIT_FAILURE;
}
err=hipMemcpy(d_c,&c[0], sizeofdouble*BLOCKS, hipMemcpyHostToDevice);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to copy from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
#ifdef DOUBLEDOUBLE
cudakernel_norm_cmplx_dd<<<BLOCKS, THREADS_PER_BLOCK>>>(N, d_x, d_c);
#elif QUADDOUBLE
cudakernel_norm_cmplx_qd<<<BLOCKS, THREADS_PER_BLOCK>>>(N, d_x, d_c);
#else
cudakernel_norm_cmplx<<<BLOCKS, THREADS_PER_BLOCK>>>(N, d_x, d_c);
#endif
err = hipGetLastError();
if(err != hipSuccess)
{
fprintf(stderr, "Failed to execute (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err=hipMemcpy(&c[0],d_c,sizeofdouble*BLOCKS, hipMemcpyDeviceToHost);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to copy from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err=hipFree(d_c);
if ( err != hipSuccess)
{
fprintf(stderr, "Failed to free memory on device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
unsigned int old_cw;
fpu_fix_start(&old_cw);
for(int i = 1; i < BLOCKS; i++)
{
c[0] += c[i];
}
#if defined DOUBLEDOUBLE || defined QUADDOUBLE
*res=to_double(sqrt(c[0]));
#else
*res=sqrt(c[0]);
#endif
fpu_fix_end(&old_cw);
if(WTRACE) cout << "Done with cuda_Dznrm2" << endl;
return hipSuccess;
}
int cuDznrm2(int N,void* x,double* res)
{
cuda_cmplx* d_x=NULL;
AllocateSpaceOnDevice(N,&d_x);
cuda_cmplx* x_ptr=reinterpret_cast<cuda_cmplx*>(x);
UploadToDevice(x_ptr,N,&d_x);
cuda_Dznrm2(N,d_x,res);
FreeMemoryOnDevice(&d_x);
return 0;
}
//*************************************************************************************
// Normalize a complex vector
int cuda_Normalize(int N,cuda_cmplx* d_x,double* res)
{
if(WTRACE) cout << "Starting cuda_Dznrm2" << endl;
hipError_t err;
#ifdef DOUBLEDOUBLE
dd_real c[BLOCKS];
for(int i=0; i<BLOCKS; i++){ c[i]=dd_real(0.);}
void* d_c=NULL;
dd_real norm=dd_real(0.);
dd_real invnorm=dd_real(0.);
int sizeofdouble=sizeof(dd_real);
#elif QUADDOUBLE
qd_real c[BLOCKS];
for(int i=0; i<BLOCKS; i++){ c[i]=qd_real(0.);}
void* d_c=NULL;
qd_real norm=qd_real(0.);
qd_real invnorm=qd_real(0.);
int sizeofdouble=sizeof(qd_real);
#else
double c[BLOCKS];
for(int i=0; i<BLOCKS; i++){ c[i]=0.;}
void* d_c=NULL;
double norm=0.;
double invnorm=0.;
int sizeofdouble=sizeof(double);
#endif
err = hipMalloc(&d_c, sizeofdouble * BLOCKS);
if(err !=hipSuccess)
{
fprintf(stderr, "!!!! device memory allocatioNerror (ComplexDotProduct)\n");
return EXIT_FAILURE;
}
err=hipMemcpy(d_c,&c[0], sizeofdouble*BLOCKS, hipMemcpyHostToDevice);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to copy from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
#ifdef DOUBLEDOUBLE
cudakernel_norm_cmplx_dd<<<BLOCKS, THREADS_PER_BLOCK>>>(N, d_x, d_c);
#elif QUADDOUBLE
cudakernel_norm_cmplx_qd<<<BLOCKS, THREADS_PER_BLOCK>>>(N, d_x, d_c);
#else
cudakernel_norm_cmplx<<<BLOCKS, THREADS_PER_BLOCK>>>(N, d_x, d_c);
#endif
err = hipGetLastError();
if(err != hipSuccess)
{
fprintf(stderr, "Failed to execute (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err=hipMemcpy(&c[0],d_c,sizeofdouble*BLOCKS, hipMemcpyDeviceToHost);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to copy from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
unsigned int old_cw;
fpu_fix_start(&old_cw);
for(int i = 1; i < BLOCKS; i++)
{
c[0] += c[i];
}
norm=sqrt(c[0]);
#if defined DOUBLEDOUBLE || defined QUADDOUBLE
*res=to_double(norm);
#else
*res=norm;
#endif
// use c[0] to store the inverse norm and then multiply by c[0]
c[0]=inv(norm);
fpu_fix_end(&old_cw);
err=hipMemcpy(d_c,&c[0], sizeofdouble*1, hipMemcpyHostToDevice);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to copy from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
int blockspergrid = (N + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK;
#ifdef DOUBLEDOUBLE
cudakernel_Zddscal_dd<<<blockspergrid, THREADS_PER_BLOCK>>>(N,d_c,d_x);
#elif QUADDOUBLE
cudakernel_Zqdscal_qd<<<blockspergrid, THREADS_PER_BLOCK>>>(N,d_c,d_x);
#else
cudakernel_Zdscal<<<blockspergrid, THREADS_PER_BLOCK>>>(N,d_c,d_x);
#endif
err=hipFree(d_c);
if ( err != hipSuccess)
{
fprintf(stderr, "Failed to free memory on device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
if(WTRACE) cout << "Done with cuda_norm_driver" << endl;
return hipSuccess;
}
int cuNormalize(int N,void* x,double* res)
{
cuda_cmplx* d_x=NULL;
AllocateSpaceOnDevice(N,&d_x);
cuda_cmplx* x_ptr=reinterpret_cast<cuda_cmplx*>(x);
UploadToDevice(x_ptr,N,&d_x);
cuda_Normalize(N,d_x,res);
DownloadFromDevice(&d_x,N,x_ptr);
FreeMemoryOnDevice(&d_x);
return 0;
}
//*********************** the double dot product: *************************************
int cuda_DoubleDotProduct(int N,double* d_x,double* d_y,double* res)
{
hipError_t err;
#ifdef DOUBLEDOUBLE
dd_real c[BLOCKS];
for(int i=0; i<BLOCKS; i++) c[i]=dd_real(0.);
void* d_c=NULL;
int sizeofdouble=sizeof(dd_real);
#elif QUADDOUBLE
qd_real c[BLOCKS];
for(int i=0; i<BLOCKS; i++) c[i]=qd_real(0.);
void* d_c=NULL;
int sizeofdouble=sizeof(qd_real);
#else
double c[BLOCKS];
for(int i=0; i<BLOCKS; i++) c[i]=0.;
void* d_c=NULL;
int sizeofdouble=sizeof(double);
#endif
err = hipMalloc(&d_c, sizeofdouble * BLOCKS);
if(err !=hipSuccess)
{
fprintf(stderr, "!!!! device memory allocatioNerror (DoubleDotProduct)\n");
return EXIT_FAILURE;
}
err=hipMemcpy(d_c,&c[0], sizeofdouble*BLOCKS, hipMemcpyHostToDevice);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to copy from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
#ifdef DOUBLEDOUBLE
cudakernel_dotproduct_double_dd<<<BLOCKS, THREADS_PER_BLOCK>>>(N, d_x, d_y, d_c);
#elif QUADDOUBLE
cudakernel_dotproduct_double_qd<<<BLOCKS, THREADS_PER_BLOCK>>>(N, d_x, d_y, d_c);
#else
cudakernel_dotproduct_double<<<BLOCKS, THREADS_PER_BLOCK>>>(N, d_x, d_y, d_c);
#endif
err = hipGetLastError();
if(err != hipSuccess)
{
fprintf(stderr, "Failed to execute cuda_dotproduct_double_dd (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err=hipMemcpy(&c[0],d_c,sizeofdouble*BLOCKS, hipMemcpyDeviceToHost);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to copy from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err=hipFree(d_c);
if ( err != hipSuccess)
{
fprintf(stderr, "Failed to free memory d_c on device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
unsigned int old_cw;
fpu_fix_start(&old_cw);
for(int i = 1; i < BLOCKS; i++){ c[0] += c[i];}
#if defined DOUBLEDOUBLE || defined QUADDOUBLE
*res=to_double(c[0]);
#else
*res=c[0];
#endif
fpu_fix_end(&old_cw);
return hipSuccess;
}
int cuDoubleDotProduct(int N,double* x,double* y,double* res)
{
double* d_x=NULL;
AllocateSpaceOnDevice(N,&d_x);
UploadToDevice(x,N,&d_x);
double* d_y=NULL;
AllocateSpaceOnDevice(N,&d_y);
UploadToDevice(y,N,&d_y);
cuda_DoubleDotProduct(N,d_x,d_y,res);
FreeMemoryOnDevice(&d_x);
FreeMemoryOnDevice(&d_y);
return hipSuccess;
}
*/
#endif // CUDA_PRECISION_DRIVERS_CU
| 19d7f5577d4076b9c68c32e3293bf1b01a8e6dd2.cu | #ifndef CUDA_PRECISION_CU
#define CUDA_PRECISION_CU
//routines that calls cuda_kernels, i.e. driver-routines for cuda calls
//routines with prefix cu as in cuRoutine can be called directly from host code.
#ifdef DEBUG
const bool WTRACE=true;
#else
const bool WTRACE=false;
#endif
#ifdef DEBUG2
const bool TRACE=true;
#else
const bool TRACE=false;
#endif
#include<iostream>
#include<iomanip>
#include "cuda_global.h"
#include "cuda_precision.h"
#include "cuda_precision_kernels.h"
#include <qd/dd_real.h>
#include <qd/qd_real.h>
#include <qd/fpu.h>
struct dd_cmplx{dd_real x; dd_real y; dd_cmplx(double xd=0.,double yd=0.){x=dd_real(xd); y=dd_real(yd);}};
struct qd_cmplx{qd_real x; qd_real y; qd_cmplx(double xd=0.,double yd=0.){x=qd_real(xd); y=qd_real(yd);}};
#ifdef DOUBLEDOUBLE
#define realformat dd_real
#define cmplxformat dd_cmplx
#define todouble to_double
#define makereal dd_real
#elif defined QUADDOUBLE
#define realformat qd_real
#define cmplxformat qd_cmplx
#define todouble to_double
#define makereal qd_real
#else
#define realformat double
#define cmplxformat cuda_cmplx
#define todouble double
#define makereal double
#endif
const int sizeofreal=sizeof(realformat);
const int sizeofcmplx=sizeof(cmplxformat);
/*
const int NTHREADSPERBLOCK=256;
const bool REORTHOGONALIZE=true;
*/
int cuda_mat_vec_multiply_cmplx(
const int M,const int Nc,const int* indices ,
const void* data , const void* x, void* y)
{
if(WTRACE) cout << "Starting cuda_mat_vec_multiply_cmplx" << endl;
int b = (M + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK;
const cuda_cmplx* datap=reinterpret_cast<const cuda_cmplx*>(data);
const cuda_cmplx* xp=reinterpret_cast<const cuda_cmplx*>(x);
cuda_cmplx* yp=reinterpret_cast<cuda_cmplx*>(y);
cuda_kernel_mat_vec_multiply_cmplx<<<b, THREADS_PER_BLOCK>>>(M,Nc,indices,datap,xp,yp);
if(WTRACE) cout << "Done with cuda_mat_vec_multiply_cmplx" << endl;
return cudaSuccess;
}
int cuda_real_Zmaxpy(const int N,const void* q,void* d_x,const void* d_y)
{
if(WTRACE) cout << "Starting cuda_real_Zmaxpy" << endl;
void* d_q=NULL;
AllocateSpaceOnDevice(sizeofreal,&d_q);
UploadToDevice(q,sizeofreal,&d_q);
int b = (N + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK;
cuda_kernel_real_Zmaxpy<<<b, THREADS_PER_BLOCK>>>(N,d_q,d_x,d_y);
FreeMemoryOnDevice(&d_q);
if(WTRACE) cout << "Done with cuda_real_Zmaxpy" << endl;
return cudaSuccess;
}
int cuda_cmplx_Zmaxpy(const int N,const void* q,void* d_x,const void* d_y)
{
if(WTRACE) cout << "Starting cuda_cmplx_Zmaxpy" << endl;
void* d_q=NULL;
AllocateSpaceOnDevice(sizeofcmplx,&d_q);
UploadToDevice(q,sizeofcmplx,&d_q);
int b = (N + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK;
cuda_kernel_cmplx_Zmaxpy<<<b, THREADS_PER_BLOCK>>>(N,d_q,d_x,d_y);
FreeMemoryOnDevice(&d_q);
if(WTRACE) cout << "Done with cuda_cmplx_Zmaxpy" << endl;
return cudaSuccess;
}
int cuda_realdotproduct_real(const int N,const void* d_x,const void* d_y,void* ans)
{
if(WTRACE) cout << "Starting cuda_realdotproduct_cmplx" << endl;
cudaError_t err;
realformat c[BLOCKS];
for(int i=0; i<BLOCKS; i++){ c[i]=realformat(0.);}
void* d_c=NULL;
AllocateSpaceOnDevice(sizeofreal*BLOCKS,&d_c);
UploadToDevice(c,sizeofreal*BLOCKS,&d_c);
cuda_kernel_realdotproduct_real<<<BLOCKS, THREADS_PER_BLOCK>>>(N, d_x, d_y, d_c);
err = cudaGetLastError();
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to execute (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
DownloadFromDevice(&d_c,sizeofreal*BLOCKS,c);
FreeMemoryOnDevice(&d_c);
// sum the contribution from all the blocks:
unsigned int old_cw;
fpu_fix_start(&old_cw);
for(int i = 1; i < BLOCKS; i++){c[0] += c[i];}
fpu_fix_end(&old_cw);
realformat* ansp = reinterpret_cast<realformat*>(ans);
*ansp=c[0];
if(WTRACE) cout << "Done with cuda_realdotproduct_cmplx" << endl;
return cudaSuccess;
}
int cuda_realdotproduct_cmplx(const int N,const void* d_x,const void* d_y,void* ans)
{
if(WTRACE) cout << "Starting cuda_realdotproduct_cmplx" << endl;
cudaError_t err;
realformat c[BLOCKS];
for(int i=0; i<BLOCKS; i++){ c[i]=realformat(0.);}
void* d_c=NULL;
AllocateSpaceOnDevice(sizeofreal*BLOCKS,&d_c);
UploadToDevice(c,sizeofreal*BLOCKS,&d_c);
cuda_kernel_realdotproduct_cmplx<<<BLOCKS, THREADS_PER_BLOCK>>>(N, d_x, d_y, d_c);
err = cudaGetLastError();
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to execute (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
DownloadFromDevice(&d_c,sizeofreal*BLOCKS,c);
FreeMemoryOnDevice(&d_c);
// sum the contribution from all the blocks:
unsigned int old_cw;
fpu_fix_start(&old_cw);
for(int i = 1; i < BLOCKS; i++){c[0] += c[i];}
fpu_fix_end(&old_cw);
realformat* ansp = reinterpret_cast<realformat*>(ans);
*ansp=c[0];
if(WTRACE) cout << "Done with cuda_realdotproduct_cmplx" << endl;
return cudaSuccess;
}
int cuda_cmplxdotproduct_cmplx(const int N,const void* d_x,const void* d_y,void* ans)
{
if(WTRACE) cout << "Starting cuda_cmplxdotproduct_cmplx" << endl;
cudaError_t err;
cmplxformat c[BLOCKS];
for(int i=0; i<BLOCKS; i++){ c[i]=cmplxformat(0.,0.);}
void* d_c=NULL;
AllocateSpaceOnDevice(sizeofcmplx*BLOCKS,&d_c);
UploadToDevice(c,sizeofcmplx*BLOCKS,&d_c);
cuda_kernel_cmplxdotproduct_cmplx<<<BLOCKS, THREADS_PER_BLOCK>>>(N, d_x, d_y, d_c);
err = cudaGetLastError();
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to execute (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
DownloadFromDevice(&d_c,sizeofcmplx*BLOCKS,c);
FreeMemoryOnDevice(&d_c);
// sum the contribution from all the blocks:
unsigned int old_cw;
fpu_fix_start(&old_cw);
for(int i = 1; i < BLOCKS; i++){c[0].x += c[i].x; c[0].y += c[i].y;}
fpu_fix_end(&old_cw);
cmplxformat* ansp = reinterpret_cast<cmplxformat*>(ans);
*ansp=c[0];
if(WTRACE) cout << "Done with cuda_cmplxdotproduct_cmplx" << endl;
return cudaSuccess;
}
// computes the dot product between two real vectors
int cuRealDotProductReal(const int N,const double* x,const double* y,double* res)
{
void* d_x=NULL;
AllocateSpaceOnDevice(N*sizeofreal,&d_x);
UploadToDeviceAndExpandReal(x,N,&d_x);
void* d_y=NULL;
AllocateSpaceOnDevice(N*sizeofreal,&d_y);
UploadToDeviceAndExpandReal(y,N,&d_y);
realformat rp =realformat(0.);
cuda_realdotproduct_real(N,d_x,d_y,&rp);
*res=todouble(rp);
FreeMemoryOnDevice(&d_x);
FreeMemoryOnDevice(&d_y);
return 0;
}
// computes the real part of the dot product between two complex vectors
int cuRealDotProductCmplx(const int N,const void* x,const void* y,double* res)
{
void* d_x=NULL;
AllocateSpaceOnDevice(N*sizeofcmplx,&d_x);
UploadToDeviceAndExpandCmplx(x,N,&d_x);
void* d_y=NULL;
AllocateSpaceOnDevice(N*sizeofcmplx,&d_y);
UploadToDeviceAndExpandCmplx(y,N,&d_y);
realformat rp =realformat(0.);
cuda_realdotproduct_cmplx(N,d_x,d_y,&rp);
*res=todouble(rp);
FreeMemoryOnDevice(&d_x);
FreeMemoryOnDevice(&d_y);
return 0;
}
int cuConvertToDouble(void* p,double* d)
{
realformat pval=reinterpret_cast<realformat>(*p);
*d = todouble(pval);
}
// computes the dot product between two complex vectors
int cuCmplxDotProductCmplx(int N,void* x,void* y,void* res)
{
void* d_x=NULL;
AllocateSpaceOnDevice(N*sizeofcmplx,&d_x);
UploadToDeviceAndExpandCmplx(x,N,&d_x);
void* d_y=NULL;
AllocateSpaceOnDevice(N*sizeofcmplx,&d_y);
UploadToDeviceAndExpandCmplx(y,N,&d_y);
cmplxformat rp =cmplxformat(0.,0.);
cuda_cmplxdotproduct_cmplx(N,d_x,d_y,&rp);
cuda_cmplx* res_ptr=reinterpret_cast<cuda_cmplx*>(res);
(*res_ptr).x=todouble(rp.x);
(*res_ptr).y=todouble(rp.y);
FreeMemoryOnDevice(&d_x);
FreeMemoryOnDevice(&d_y);
return 0;
}
int cuda_norm_cmplx(const int N,const void* d_x,void* ans)
{
if(WTRACE) cout << "Starting cuda_norm_cmplx" << endl;
cudaError_t err;
realformat c[BLOCKS];
for(int i=0; i<BLOCKS; i++){ c[i]=realformat(0.);}
void* d_c=NULL;
AllocateSpaceOnDevice(sizeofreal*BLOCKS,&d_c);
UploadToDevice(c,sizeofreal*BLOCKS,&d_c);
cuda_kernel_realdotproduct_cmplx<<<BLOCKS, THREADS_PER_BLOCK>>>(N, d_x, d_x, d_c);
err = cudaGetLastError();
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to execute (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
DownloadFromDevice(&d_c,sizeofreal*BLOCKS,c);
FreeMemoryOnDevice(&d_c);
// sum the contribution from all the blocks:
unsigned int old_cw;
fpu_fix_start(&old_cw);
for(int i = 1; i < BLOCKS; i++){c[0] += c[i];}
fpu_fix_end(&old_cw);
realformat* ansp = reinterpret_cast<realformat*>(ans);
*ansp=sqrt(c[0]);
if(WTRACE) cout << "Done with cuda_norm_cmplx" << endl;
return cudaSuccess;
}
// divide the complex vector x by a real number: x=x/q
int cuda_Zdinvscal(int N,void* q,void* d_x)
{
if(WTRACE) cout << "Starting cuda_Zdinvscal " << endl;
realformat* qp= reinterpret_cast<realformat*>(q);
realformat s= inv(*qp); // s= 1/q taking the inverse
void* sp=reinterpret_cast<void*>(&s);
void* d_s=NULL;
AllocateSpaceOnDevice(sizeofreal,&d_s);
UploadToDevice(sp,sizeofreal,&d_s);
int b = (N + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK;
cuda_kernel_Zdscal<<<b, THREADS_PER_BLOCK>>>(N,d_s,d_x);
FreeMemoryOnDevice(&d_s);
if(WTRACE) cout << "Done with cuda_Zdinvscal" << endl;
return cudaSuccess;
}
// multiply the complex vector x by a real number: x=x*q
int cuda_Zdscal(int N,void* q,void* d_x)
{
if(WTRACE) cout << "Starting cuda_Zdscal " << endl;
void* d_s=NULL;
AllocateSpaceOnDevice(sizeofreal,&d_s);
UploadToDevice(q,sizeofreal,&d_s);
int b = (N + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK;
cuda_kernel_Zdscal<<<b, THREADS_PER_BLOCK>>>(N,d_s,d_x);
FreeMemoryOnDevice(&d_s);
if(WTRACE) cout << "Done with cuda_Zdscal" << endl;
return cudaSuccess;
}
int cuda_Normalize(const int N,void* d_x,double* ans)
{
if(WTRACE) cout << "Starting cuda_Normalize" << endl;
realformat norm=makereal(0.);
void* norm_ptr=reinterpret_cast<void*>(norm);
cuda_norm_cmplx(N,d_x,norm_ptr);
cuda_Zdinvscal(N,norm_ptr,d_x);
*ans=todouble(norm);
}
/*
void* GetPrecPtrCmplx(void* d_p,const int N)
{
cmplxformat* ptr=reinterpret_cast<cmplxformat*>(d_p);
return reinterpret_cast<void*>(&ptr[N]);
}
void* GetPrecPtrReal(void* d_p,const int N)
{
realformat* ptr=reinterpret_cast<realformat*>(d_p);
return reinterpret_cast<void*>(&ptr[N]);
}
*/
int AllocateSpaceOnDeviceCmplx(const int N,void** d_p)
{
AllocateSpaceOnDevice(d_p,N*sizeofcmplx);
}
int AllocateSpaceOnDeviceReal(const int N,void** d_p)
{
AllocateSpaceOnDevice(d_p,N*sizeofreal);
}
int UploadToDeviceAndExpandCmplx(const void* p,const int N,void** d_p)
{
const cuda_cmplx* pptr=reinterpret_cast<const cuda_cmplx*>(p);
cmplxformat cp[N];
for(int i=0; i<N; i++){ cp[i]=cmplxformat(pptr[i].x,pptr[i].y);}
void* cp_ptr=reinterpret_cast<void*>(cp);
UploadToDevice(cp_ptr,N*sizeofcmplx,d_p);
return cudaSuccess;
}
int UploadToDeviceAndExpandReal(const double* p,const int N,void** d_p)
{
realformat cp[N];
for(int i=0; i<N; i++){ cp[i]=realformat(p[i]);}
void* cp_ptr=reinterpret_cast<void*>(cp);
UploadToDevice(cp_ptr,N*sizeofreal,d_p);
return cudaSuccess;
}
int DownloadFromDeviceAndContractCmplx(void** d_p,int N,void* p)
{
cuda_cmplx* pptr=reinterpret_cast<cuda_cmplx*>(p);
cmplxformat cp[N];
void* cp_ptr=reinterpret_cast<void*>(&cp[0]);
DownloadFromDevice(d_p,N*sizeofcmplx,cp_ptr);
for(int i=0; i<N; i++){ pptr[i]=make_cuDoubleComplex(todouble(cp[i].x),todouble(cp[i].y));}
return cudaSuccess;
}
int DownloadFromDeviceAndContractReal(void** d_p,int N,double* p)
{
realformat cp[N];
void* cp_ptr=reinterpret_cast<void*>(&cp[0]);
DownloadFromDevice(d_p,N*sizeofreal,cp_ptr);
for(int i=0; i<N; i++){ p[i]=todouble(cp[i]);}
return cudaSuccess;
}
int InspectDeviceCmplx(void** d_p,int N)
{
cuda_cmplx cp[N];
void* cp_ptr=reinterpret_cast<void*>(&cp[0]);
DownloadFromDeviceAndContractCmplx(d_p,N,cp_ptr);
for(int i=0; i<N; i++){cout << cp[i] << endl;}
return cudaSuccess;
}
int InspectDeviceReal(void** d_p,int N)
{
double cp[N];
DownloadFromDeviceAndContractReal(d_p,N,&cp[0]);
for(int i=0; i<N; i++){cout << cp[i] << endl;}
return cudaSuccess;
}
/*
//*****************************************************************
int cuda_Zaxpy(int N,cuda_cmplx* q,cuda_cmplx* x,cuda_cmplx* y)
{
cudaError_t err;
if(WTRACE) cout << "In cuda_Zaxpy" << endl;
cuda_cmplx* d_q=NULL;
AllocateSpaceOnDevice(1,&d_q);
UploadToDevice(q,1,&d_q);
int blockspergrid = (N + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK;
if(WTRACE) cout << "Launcing cuda_Zaxpy with " << blockspergrid << " blocks" << endl;
#ifdef DOUBLEDOUBLE
cudakernel_Zaxpy_dd<<<blockspergrid, THREADS_PER_BLOCK>>>(N,d_q,x,y);
#elif QUADDOUBLE
cudakernel_Zaxpy_qd<<<blockspergrid, THREADS_PER_BLOCK>>>(N,d_q,x,y);
#else
cudakernel_Zaxpy<<<blockspergrid, THREADS_PER_BLOCK>>>(N,d_q,x,y);
#endif
err = cudaGetLastError();
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to execute cuda_Zaxpy (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
FreeMemoryOnDevice(&d_q);
if(WTRACE) cout << "Done with cuda_Zaxpy" << endl;
return cudaSuccess;
}
int cuZaxpy(int N,void* q,void* x,void* y)
{
if(WTRACE) cout << "Starting Zaxpy " << endl;
cuda_cmplx* d_x=NULL;
AllocateSpaceOnDevice(N,&d_x);
cuda_cmplx* x_ptr=reinterpret_cast<cuda_cmplx*>(x);
UploadToDevice(x_ptr,N,&d_x);
cuda_cmplx* d_y=NULL;
AllocateSpaceOnDevice(N,&d_y);
cuda_cmplx* y_ptr=reinterpret_cast<cuda_cmplx*>(y);
UploadToDevice(y_ptr,N,&d_y);
cuda_cmplx* q_ptr=reinterpret_cast<cuda_cmplx*>(q);
cuda_Zaxpy(N,q_ptr,d_x,d_y);
DownloadFromDevice(&d_x,N,x_ptr);
FreeMemoryOnDevice(&d_x);
FreeMemoryOnDevice(&d_y);
if(WTRACE) cout << "Done with Zaxpy " << endl;
return cudaSuccess;
}
int cuda_ComplexDotProduct(int N,cuda_cmplx* x,cuda_cmplx* y,cuda_cmplx* res)
{
if(WTRACE) cout << "Starting cuda_ComplexDotProduct" << endl;
cudaError_t err;
#ifdef DOUBLEDOUBLE
dd_real cr[BLOCKS];
dd_real ci[BLOCKS];
for(int i=0; i<BLOCKS; i++){ cr[i]=dd_real(0.); ci[i]=dd_real(0.);}
void* d_cr=NULL;
void* d_ci=NULL;
int sizeofdouble=sizeof(dd_real);
#elif QUADDOUBLE
qd_real cr[BLOCKS];
qd_real ci[BLOCKS];
for(int i=0; i<BLOCKS; i++){ cr[i]=qd_real(0.); ci[i]=qd_real(0.);}
void* d_cr=NULL;
void* d_ci=NULL;
int sizeofdouble=sizeof(qd_real);
#else
double cr[BLOCKS];
double ci[BLOCKS];
for(int i=0; i<BLOCKS; i++){ cr[i]=0.; ci[i]=0.;}
void* d_cr=NULL;
void* d_ci=NULL;
int sizeofdouble=sizeof(double);
#endif
err = cudaMalloc(&d_cr, sizeofdouble * BLOCKS);
err = cudaMalloc(&d_ci, sizeofdouble * BLOCKS);
if(err !=cudaSuccess)
{
fprintf(stderr, "!!!! device memory allocatioNerror (ComplexDotProduct)\n");
return EXIT_FAILURE;
}
err=cudaMemcpy(d_cr,&cr[0], sizeofdouble*BLOCKS, cudaMemcpyHostToDevice);
err=cudaMemcpy(d_ci,&ci[0], sizeofdouble*BLOCKS, cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to copy from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
#ifdef DOUBLEDOUBLE
cudakernel_dotproduct_cmplx_dd<<<BLOCKS, THREADS_PER_BLOCK>>>(N, x, y, d_cr, d_ci);
#elif QUADDOUBLE
cudakernel_dotproduct_cmplx_qd<<<BLOCKS, THREADS_PER_BLOCK>>>(N, x, y, d_cr, d_ci);
#else
cudakernel_dotproduct_cmplx<<<BLOCKS, THREADS_PER_BLOCK>>>(N, x, y, d_cr, d_ci);
#endif
err = cudaGetLastError();
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to execute (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err=cudaMemcpy(&cr[0],d_cr,sizeofdouble*BLOCKS, cudaMemcpyDeviceToHost);
err=cudaMemcpy(&ci[0],d_ci,sizeofdouble*BLOCKS, cudaMemcpyDeviceToHost);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to copy from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err=cudaFree(d_cr);
err=cudaFree(d_ci);
if ( err != cudaSuccess)
{
fprintf(stderr, "Failed to free memory on device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
unsigned int old_cw;
fpu_fix_start(&old_cw);
for(int i = 1; i < BLOCKS; i++)
{
cr[0] += cr[i];
ci[0] += ci[i];
}
#if defined DOUBLEDOUBLE || defined QUADDOUBLE
*res=make_cuDoubleComplex(to_double(cr[0]),to_double(ci[0]));
#else
*res=make_cuDoubleComplex(cr[0],ci[0]);
#endif
fpu_fix_end(&old_cw);
if(WTRACE) cout << "Done with cuda_ComplexDotProduct" << endl;
return cudaSuccess;
}
int cuComplexDotProduct(int N,void* x,void* y,void* res)
{
cuda_cmplx* d_x=NULL;
AllocateSpaceOnDevice(N,&d_x);
cuda_cmplx* x_ptr=reinterpret_cast<cuda_cmplx*>(x);
UploadToDevice(x_ptr,N,&d_x);
cuda_cmplx* d_y=NULL;
AllocateSpaceOnDevice(N,&d_y);
cuda_cmplx* y_ptr=reinterpret_cast<cuda_cmplx*>(y);
UploadToDevice(y_ptr,N,&d_y);
cuda_cmplx* res_ptr=reinterpret_cast<cuda_cmplx*>(res);
cuda_ComplexDotProduct(N,d_x,d_y,res_ptr);
FreeMemoryOnDevice(&d_x);
FreeMemoryOnDevice(&d_y);
return 0;
}
//***********************************************************************
// cuda norm routines
int cuda_Dznrm2(int N,cuda_cmplx* d_x,double* res)
{
if(WTRACE) cout << "Starting cuda_Dznrm2" << endl;
cudaError_t err;
#ifdef DOUBLEDOUBLE
dd_real c[BLOCKS];
for(int i=0; i<BLOCKS; i++){ c[i]=dd_real(0.);}
void* d_c=NULL;
int sizeofdouble=sizeof(dd_real);
#elif QUADDOUBLE
qd_real c[BLOCKS];
for(int i=0; i<BLOCKS; i++){ c[i]=qd_real(0.);}
void* d_c=NULL;
int sizeofdouble=sizeof(qd_real);
#else
double c[BLOCKS];
for(int i=0; i<BLOCKS; i++){ c[i]=0.;}
void* d_c=NULL;
int sizeofdouble=sizeof(double);
#endif
err = cudaMalloc(&d_c, sizeofdouble * BLOCKS);
if(err !=cudaSuccess)
{
fprintf(stderr, "!!!! device memory allocatioNerror (ComplexDotProduct)\n");
return EXIT_FAILURE;
}
err=cudaMemcpy(d_c,&c[0], sizeofdouble*BLOCKS, cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to copy from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
#ifdef DOUBLEDOUBLE
cudakernel_norm_cmplx_dd<<<BLOCKS, THREADS_PER_BLOCK>>>(N, d_x, d_c);
#elif QUADDOUBLE
cudakernel_norm_cmplx_qd<<<BLOCKS, THREADS_PER_BLOCK>>>(N, d_x, d_c);
#else
cudakernel_norm_cmplx<<<BLOCKS, THREADS_PER_BLOCK>>>(N, d_x, d_c);
#endif
err = cudaGetLastError();
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to execute (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err=cudaMemcpy(&c[0],d_c,sizeofdouble*BLOCKS, cudaMemcpyDeviceToHost);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to copy from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err=cudaFree(d_c);
if ( err != cudaSuccess)
{
fprintf(stderr, "Failed to free memory on device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
unsigned int old_cw;
fpu_fix_start(&old_cw);
for(int i = 1; i < BLOCKS; i++)
{
c[0] += c[i];
}
#if defined DOUBLEDOUBLE || defined QUADDOUBLE
*res=to_double(sqrt(c[0]));
#else
*res=sqrt(c[0]);
#endif
fpu_fix_end(&old_cw);
if(WTRACE) cout << "Done with cuda_Dznrm2" << endl;
return cudaSuccess;
}
int cuDznrm2(int N,void* x,double* res)
{
cuda_cmplx* d_x=NULL;
AllocateSpaceOnDevice(N,&d_x);
cuda_cmplx* x_ptr=reinterpret_cast<cuda_cmplx*>(x);
UploadToDevice(x_ptr,N,&d_x);
cuda_Dznrm2(N,d_x,res);
FreeMemoryOnDevice(&d_x);
return 0;
}
//*************************************************************************************
// Normalize a complex vector
int cuda_Normalize(int N,cuda_cmplx* d_x,double* res)
{
if(WTRACE) cout << "Starting cuda_Dznrm2" << endl;
cudaError_t err;
#ifdef DOUBLEDOUBLE
dd_real c[BLOCKS];
for(int i=0; i<BLOCKS; i++){ c[i]=dd_real(0.);}
void* d_c=NULL;
dd_real norm=dd_real(0.);
dd_real invnorm=dd_real(0.);
int sizeofdouble=sizeof(dd_real);
#elif QUADDOUBLE
qd_real c[BLOCKS];
for(int i=0; i<BLOCKS; i++){ c[i]=qd_real(0.);}
void* d_c=NULL;
qd_real norm=qd_real(0.);
qd_real invnorm=qd_real(0.);
int sizeofdouble=sizeof(qd_real);
#else
double c[BLOCKS];
for(int i=0; i<BLOCKS; i++){ c[i]=0.;}
void* d_c=NULL;
double norm=0.;
double invnorm=0.;
int sizeofdouble=sizeof(double);
#endif
err = cudaMalloc(&d_c, sizeofdouble * BLOCKS);
if(err !=cudaSuccess)
{
fprintf(stderr, "!!!! device memory allocatioNerror (ComplexDotProduct)\n");
return EXIT_FAILURE;
}
err=cudaMemcpy(d_c,&c[0], sizeofdouble*BLOCKS, cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to copy from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
#ifdef DOUBLEDOUBLE
cudakernel_norm_cmplx_dd<<<BLOCKS, THREADS_PER_BLOCK>>>(N, d_x, d_c);
#elif QUADDOUBLE
cudakernel_norm_cmplx_qd<<<BLOCKS, THREADS_PER_BLOCK>>>(N, d_x, d_c);
#else
cudakernel_norm_cmplx<<<BLOCKS, THREADS_PER_BLOCK>>>(N, d_x, d_c);
#endif
err = cudaGetLastError();
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to execute (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err=cudaMemcpy(&c[0],d_c,sizeofdouble*BLOCKS, cudaMemcpyDeviceToHost);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to copy from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
unsigned int old_cw;
fpu_fix_start(&old_cw);
for(int i = 1; i < BLOCKS; i++)
{
c[0] += c[i];
}
norm=sqrt(c[0]);
#if defined DOUBLEDOUBLE || defined QUADDOUBLE
*res=to_double(norm);
#else
*res=norm;
#endif
// use c[0] to store the inverse norm and then multiply by c[0]
c[0]=inv(norm);
fpu_fix_end(&old_cw);
err=cudaMemcpy(d_c,&c[0], sizeofdouble*1, cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to copy from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int blockspergrid = (N + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK;
#ifdef DOUBLEDOUBLE
cudakernel_Zddscal_dd<<<blockspergrid, THREADS_PER_BLOCK>>>(N,d_c,d_x);
#elif QUADDOUBLE
cudakernel_Zqdscal_qd<<<blockspergrid, THREADS_PER_BLOCK>>>(N,d_c,d_x);
#else
cudakernel_Zdscal<<<blockspergrid, THREADS_PER_BLOCK>>>(N,d_c,d_x);
#endif
err=cudaFree(d_c);
if ( err != cudaSuccess)
{
fprintf(stderr, "Failed to free memory on device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
if(WTRACE) cout << "Done with cuda_norm_driver" << endl;
return cudaSuccess;
}
int cuNormalize(int N,void* x,double* res)
{
cuda_cmplx* d_x=NULL;
AllocateSpaceOnDevice(N,&d_x);
cuda_cmplx* x_ptr=reinterpret_cast<cuda_cmplx*>(x);
UploadToDevice(x_ptr,N,&d_x);
cuda_Normalize(N,d_x,res);
DownloadFromDevice(&d_x,N,x_ptr);
FreeMemoryOnDevice(&d_x);
return 0;
}
//*********************** the double dot product: *************************************
int cuda_DoubleDotProduct(int N,double* d_x,double* d_y,double* res)
{
cudaError_t err;
#ifdef DOUBLEDOUBLE
dd_real c[BLOCKS];
for(int i=0; i<BLOCKS; i++) c[i]=dd_real(0.);
void* d_c=NULL;
int sizeofdouble=sizeof(dd_real);
#elif QUADDOUBLE
qd_real c[BLOCKS];
for(int i=0; i<BLOCKS; i++) c[i]=qd_real(0.);
void* d_c=NULL;
int sizeofdouble=sizeof(qd_real);
#else
double c[BLOCKS];
for(int i=0; i<BLOCKS; i++) c[i]=0.;
void* d_c=NULL;
int sizeofdouble=sizeof(double);
#endif
err = cudaMalloc(&d_c, sizeofdouble * BLOCKS);
if(err !=cudaSuccess)
{
fprintf(stderr, "!!!! device memory allocatioNerror (DoubleDotProduct)\n");
return EXIT_FAILURE;
}
err=cudaMemcpy(d_c,&c[0], sizeofdouble*BLOCKS, cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to copy from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
#ifdef DOUBLEDOUBLE
cudakernel_dotproduct_double_dd<<<BLOCKS, THREADS_PER_BLOCK>>>(N, d_x, d_y, d_c);
#elif QUADDOUBLE
cudakernel_dotproduct_double_qd<<<BLOCKS, THREADS_PER_BLOCK>>>(N, d_x, d_y, d_c);
#else
cudakernel_dotproduct_double<<<BLOCKS, THREADS_PER_BLOCK>>>(N, d_x, d_y, d_c);
#endif
err = cudaGetLastError();
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to execute cuda_dotproduct_double_dd (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err=cudaMemcpy(&c[0],d_c,sizeofdouble*BLOCKS, cudaMemcpyDeviceToHost);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to copy from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err=cudaFree(d_c);
if ( err != cudaSuccess)
{
fprintf(stderr, "Failed to free memory d_c on device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
unsigned int old_cw;
fpu_fix_start(&old_cw);
for(int i = 1; i < BLOCKS; i++){ c[0] += c[i];}
#if defined DOUBLEDOUBLE || defined QUADDOUBLE
*res=to_double(c[0]);
#else
*res=c[0];
#endif
fpu_fix_end(&old_cw);
return cudaSuccess;
}
int cuDoubleDotProduct(int N,double* x,double* y,double* res)
{
double* d_x=NULL;
AllocateSpaceOnDevice(N,&d_x);
UploadToDevice(x,N,&d_x);
double* d_y=NULL;
AllocateSpaceOnDevice(N,&d_y);
UploadToDevice(y,N,&d_y);
cuda_DoubleDotProduct(N,d_x,d_y,res);
FreeMemoryOnDevice(&d_x);
FreeMemoryOnDevice(&d_y);
return cudaSuccess;
}
*/
#endif // CUDA_PRECISION_DRIVERS_CU
|
fcb1ae256859187a210847862a849fe1c95dbb64.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "opencv2/core/cuda.hpp"
#include "opencv2/cudaimgproc.hpp"
#include "opencv2/cudaarithm.hpp"
#include "opencv2/cudafilters.hpp"
using namespace std;
using namespace cv;
using namespace cv::cuda;
const int BLOCK_SIZE = 256;
__global__ void in_range_worker(const cv::cuda::PtrStepSz<uchar3> src, cv::cuda::PtrStepSzb dst, int lb0, int ub0, int lb1, int ub1, int lb2, int ub2)
{
const int n = src.rows*src.cols;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int x = 0, y = 0;
for (int i = index; i < n; i += stride)
{
x = i % src.cols;
y = i / src.cols;
uchar3 v = src(y, x);
dst(y, x) = (v.x >= lb0 && v.x <= ub0 && v.y >= lb1 && v.y <= ub1 && v.z >= lb2 && v.z <= ub2)*255;
}
}
void in_range(cv::cuda::GpuMat &src, cv::cuda::GpuMat &dst, const cv::Scalar &lower, const cv::Scalar &upper)
{
const int n = src.rows*src.cols;
const int num_blocks = (n + BLOCK_SIZE - 1)/BLOCK_SIZE;
hipLaunchKernelGGL(( in_range_worker), dim3(num_blocks), dim3(BLOCK_SIZE), 0, 0, src, dst, lower[0], upper[0], lower[1], upper[1], lower[2], upper[2]);
hipDeviceSynchronize();
} | fcb1ae256859187a210847862a849fe1c95dbb64.cu | #include <iostream>
#include "opencv2/core/cuda.hpp"
#include "opencv2/cudaimgproc.hpp"
#include "opencv2/cudaarithm.hpp"
#include "opencv2/cudafilters.hpp"
using namespace std;
using namespace cv;
using namespace cv::cuda;
const int BLOCK_SIZE = 256;
__global__ void in_range_worker(const cv::cuda::PtrStepSz<uchar3> src, cv::cuda::PtrStepSzb dst, int lb0, int ub0, int lb1, int ub1, int lb2, int ub2)
{
const int n = src.rows*src.cols;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int x = 0, y = 0;
for (int i = index; i < n; i += stride)
{
x = i % src.cols;
y = i / src.cols;
uchar3 v = src(y, x);
dst(y, x) = (v.x >= lb0 && v.x <= ub0 && v.y >= lb1 && v.y <= ub1 && v.z >= lb2 && v.z <= ub2)*255;
}
}
void in_range(cv::cuda::GpuMat &src, cv::cuda::GpuMat &dst, const cv::Scalar &lower, const cv::Scalar &upper)
{
const int n = src.rows*src.cols;
const int num_blocks = (n + BLOCK_SIZE - 1)/BLOCK_SIZE;
in_range_worker<<<num_blocks, BLOCK_SIZE>>>(src, dst, lower[0], upper[0], lower[1], upper[1], lower[2], upper[2]);
cudaDeviceSynchronize();
} |
32237dc0c25625d386b54ddb6bb0f642f977d02a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017 Madhavan Seshadri
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
extern "C" { __global__ void dgemm(double *A, double *B, double *C, int *m, int *n, int *k, double *alpha, double *beta){
int ROW = blockIdx.y*blockDim.y+threadIdx.y;
int COL = blockIdx.x*blockDim.x+threadIdx.x;
if(ROW<*m && COL<*n){
double sum = 0;
for(int i = 0;i<*k;i++)
{
sum+=(*alpha) * A[ROW * (*k) + i] * B[i*(*n)+COL];
}
C[ROW*(*n)+COL] = sum + (*beta) * C[ROW*(*n)+COL];
}
}
} | 32237dc0c25625d386b54ddb6bb0f642f977d02a.cu | // Copyright (c) 2017 Madhavan Seshadri
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
extern "C" { __global__ void dgemm(double *A, double *B, double *C, int *m, int *n, int *k, double *alpha, double *beta){
int ROW = blockIdx.y*blockDim.y+threadIdx.y;
int COL = blockIdx.x*blockDim.x+threadIdx.x;
if(ROW<*m && COL<*n){
double sum = 0;
for(int i = 0;i<*k;i++)
{
sum+=(*alpha) * A[ROW * (*k) + i] * B[i*(*n)+COL];
}
C[ROW*(*n)+COL] = sum + (*beta) * C[ROW*(*n)+COL];
}
}
} |
8b5872792a45bfad806fe8e5f5a5e4c7500b77ab.hip | // !!! This is a file automatically generated by hipify!!!
#include <assert.h>
#include <stdio.h>
#include <hiprand/hiprand.h>
#include <rocblas.h>
#include <hip/hip_runtime.h>
#include <mma.h>
using namespace nvcuda;
#include "tzgemm_mix.cu"
#define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); }
void cublasErrCheck_(hipblasStatus_t stat, const char *file, int line) {
if (stat != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line);
}
}
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(hipError_t stat, const char *file, int line) {
if (stat != hipSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line);
}
}
#define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); }
void curandErrCheck_(hiprandStatus_t stat, const char *file, int line) {
if (stat != HIPRAND_STATUS_SUCCESS) {
fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line);
}
}
#define checkKernelErrors(expr) \
do \
{ \
expr; \
\
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) \
{ \
printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \
hipGetErrorString(__err)); \
abort(); \
} \
} while (0)
int main(int argc, char* argv[]) {
int wmma_iter = 1;
int M_INPUT = 16 * 32 * 8;
int N_INPUT = 16 * 4 * 48;
int K_INPUT = 16 * 4 * 12;
if (argc == 5) {
wmma_iter = atoi(argv[1]);
M_INPUT = atoi(argv[2]);
N_INPUT = atoi(argv[3]);
K_INPUT = atoi(argv[4]);
}
int M_GLOBAL = (M_INPUT < 64) ? 64 : (M_INPUT / 64) * 64;
int N_GLOBAL = (N_INPUT < 64) ? 64 : (N_INPUT / 64) * 64;
int K_GLOBAL = (K_INPUT < 64) ? 64 : (K_INPUT / 64) * 64;
int M_TILES = M_GLOBAL / WMMA_M;
int N_TILES = N_GLOBAL / WMMA_N;
int K_TILES = K_GLOBAL / WMMA_K;
float kernel_time;
hiprandGenerator_t gen;
hipEvent_t startKERNEL;
hipEvent_t stopKERNEL;
cudaErrCheck(hipEventCreate(&startKERNEL));
cudaErrCheck(hipEventCreate(&stopKERNEL));
// wmma variables
// ----------------------------------------------------------------------------------------------------------------------
float *ori_host_A = NULL;
float *ori_host_B = NULL;
float *ori_result_C = NULL;
float *mix_result_C = NULL;
half *ori_wmma_A = NULL;
half *ori_wmma_B = NULL;
float *ori_wmma_C = NULL;
half *mix_wmma_A = NULL;
half *mix_wmma_B = NULL;
float *mix_wmma_C = NULL;
// ori_host_A = (half *)malloc(sizeof(half) * M_GLOBAL * K_GLOBAL);
// ori_host_B = (half *)malloc(sizeof(half) * K_GLOBAL * N_GLOBAL);
ori_result_C = (float *)malloc(sizeof(float) * M_GLOBAL * N_GLOBAL);
mix_result_C = (float *)malloc(sizeof(float) * M_GLOBAL * N_GLOBAL);
// init_host_matrices(ori_host_A, ori_host_B);
curandErrCheck(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT));
curandErrCheck(hiprandSetPseudoRandomGeneratorSeed(gen, 1337ULL));
cudaErrCheck(hipMalloc(reinterpret_cast<void **>(&ori_wmma_A), sizeof(half) * M_GLOBAL * K_GLOBAL));
cudaErrCheck(hipMalloc(reinterpret_cast<void **>(&ori_wmma_B), sizeof(half) * N_GLOBAL * K_GLOBAL));
cudaErrCheck(hipMalloc(reinterpret_cast<void **>(&ori_wmma_C), sizeof(float) * M_GLOBAL * N_GLOBAL));
cudaErrCheck(hipMalloc(reinterpret_cast<void **>(&mix_wmma_A), sizeof(half) * M_GLOBAL * K_GLOBAL));
cudaErrCheck(hipMalloc(reinterpret_cast<void **>(&mix_wmma_B), sizeof(half) * N_GLOBAL * K_GLOBAL));
cudaErrCheck(hipMalloc(reinterpret_cast<void **>(&mix_wmma_C), sizeof(float) * M_GLOBAL * N_GLOBAL));
cudaErrCheck(hipMalloc(reinterpret_cast<void **>(&ori_host_A), sizeof(float) * M_GLOBAL * K_GLOBAL));
cudaErrCheck(hipMalloc(reinterpret_cast<void **>(&ori_host_B), sizeof(float) * N_GLOBAL * K_GLOBAL));
curandErrCheck(hiprandGenerateUniform(gen, ori_host_A, M_GLOBAL * K_GLOBAL));
curandErrCheck(hiprandGenerateUniform(gen, ori_host_B, N_GLOBAL * K_GLOBAL));
// hiprand doesn't currently support fp16 so we generate in fp32 and convert to fp16.
hipLaunchKernelGGL(( convertFp32ToFp16) , dim3((M_GLOBAL * K_GLOBAL + 255) / 256), dim3(256) , 0, 0, ori_wmma_A, ori_host_A, M_GLOBAL * K_GLOBAL);
hipLaunchKernelGGL(( convertFp32ToFp16) , dim3((N_GLOBAL * K_GLOBAL + 255) / 256), dim3(256) , 0, 0, ori_wmma_B, ori_host_B, N_GLOBAL * K_GLOBAL);
cudaErrCheck(hipMemcpy(mix_wmma_A, ori_wmma_A, sizeof(half) * M_GLOBAL * K_GLOBAL, hipMemcpyDeviceToDevice));
cudaErrCheck(hipMemcpy(mix_wmma_B, ori_wmma_B, sizeof(half) * N_GLOBAL * K_GLOBAL, hipMemcpyDeviceToDevice));
assert(((unsigned long long)ori_wmma_A) % 128 == 0);
assert(((unsigned long long)ori_wmma_B) % 128 == 0);
assert(((unsigned long long)ori_wmma_C) % 128 == 0);
assert(((unsigned long long)mix_wmma_A) % 128 == 0);
assert(((unsigned long long)mix_wmma_B) % 128 == 0);
assert(((unsigned long long)mix_wmma_C) % 128 == 0);
// cudaErrCheck(hipMemcpy(ori_wmma_A, ori_host_A, sizeof(half) * M_GLOBAL * K_GLOBAL, hipMemcpyHostToDevice));
// cudaErrCheck(hipMemcpy(mix_wmma_A, ori_host_A, sizeof(half) * M_GLOBAL * K_GLOBAL, hipMemcpyHostToDevice));
// cudaErrCheck(hipMemcpy(ori_wmma_B, ori_host_B, sizeof(half) * N_GLOBAL * K_GLOBAL, hipMemcpyHostToDevice));
// cudaErrCheck(hipMemcpy(mix_wmma_B, ori_host_B, sizeof(half) * N_GLOBAL * K_GLOBAL, hipMemcpyHostToDevice));
cudaErrCheck(hipMemset(ori_wmma_C, 0, sizeof(float) * M_GLOBAL * N_GLOBAL));
cudaErrCheck(hipMemset(mix_wmma_C, 0, sizeof(float) * M_GLOBAL * N_GLOBAL));
// start solo running
// ----------------------------------------------------------------------------------------------------------------------
dim3 wmma_grid;
dim3 wmma_block;
wmma_grid.x = 68 * 4;
wmma_block.x = THREADS_PER_BLOCK;
int wmma_grid_dim_x = (M_TILES * N_TILES) / (BLOCK_COL_TILES * BLOCK_ROW_TILES);
int wmma_block_dim_x = wmma_block.x;
printf("[ORI] Running with tzgemm...\n");
printf("[ORI] wmma_grid -- %d * 1 wmma_block -- %d * 1 \n", wmma_grid.x, wmma_block.x);
hipLaunchKernelGGL(( checkKernelErrors((pers_tzgemm), dim3(wmma_grid), dim3(wmma_block), 0, 0, ori_wmma_A, ori_wmma_B, ori_wmma_C,
64, 64, 64,
4, wmma_block_dim_x, 1)));
cudaErrCheck(hipMemset(ori_wmma_C, 0, sizeof(float) * M_GLOBAL * N_GLOBAL));
cudaErrCheck(hipEventRecord(startKERNEL));
for(int i = 0; i < wmma_iter; i++) {
hipLaunchKernelGGL(( checkKernelErrors((pers_tzgemm), dim3(wmma_grid), dim3(wmma_block), 0, 0, ori_wmma_A, ori_wmma_B, ori_wmma_C,
M_GLOBAL, N_GLOBAL, K_GLOBAL,
wmma_grid_dim_x, wmma_block_dim_x, 1)));
}
cudaErrCheck(hipEventRecord(stopKERNEL));
cudaErrCheck(hipEventSynchronize(stopKERNEL));
cudaErrCheck(hipEventElapsedTime(&kernel_time, startKERNEL, stopKERNEL));
printf("[ORI] tzgemm took %f us\n", kernel_time * 1000 / wmma_iter);
hipblasHandle_t cublasHandle;
cublasErrCheck(hipblasCreate(&cublasHandle));
cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH));
printf("Running with cuBLAS...\n");
cublasErrCheck(hipblasGemmEx(cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N,
64, 64, 64,
&alpha_g,
mix_wmma_B, HIP_R_16F, 64,
mix_wmma_A, HIP_R_16F, 64,
&beta_g,
mix_wmma_C, HIP_R_32F, 64,
HIP_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP));
cudaErrCheck(hipMemset(mix_wmma_C, 0, sizeof(float) * M_GLOBAL * N_GLOBAL));
cudaErrCheck(hipEventRecord(startKERNEL));
for(int i = 0; i < wmma_iter; i++) {
cublasErrCheck(hipblasGemmEx(cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N,
N_GLOBAL, M_GLOBAL, K_GLOBAL,
&alpha_g,
mix_wmma_B, HIP_R_16F, K_GLOBAL,
mix_wmma_A, HIP_R_16F, K_GLOBAL,
&beta_g,
mix_wmma_C, HIP_R_32F, N_GLOBAL,
HIP_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP));
}
cudaErrCheck(hipEventRecord(stopKERNEL));
cudaErrCheck(hipEventSynchronize(stopKERNEL));
cudaErrCheck(hipEventElapsedTime(&kernel_time, startKERNEL, stopKERNEL));
printf("[ORI] cublas took %f us\n", kernel_time * 1000 /wmma_iter);
printf("Checking results...\n");
cudaErrCheck(hipMemcpy(ori_result_C, ori_wmma_C, sizeof(float) * M_GLOBAL * N_GLOBAL, hipMemcpyDeviceToHost));
cudaErrCheck(hipMemcpy(mix_result_C, mix_wmma_C, sizeof(float) * M_GLOBAL * N_GLOBAL, hipMemcpyDeviceToHost));
int errors = 0;
for (int i = 0; i < M_GLOBAL * N_GLOBAL; i++) {
float v1 = ori_result_C[i];
float v2 = mix_result_C[i];
if (fabs(v1 - v2) > 0.00001f) {
errors++;
if (errors < 5) printf("%f %f\n", v1, v2);
}
// if (i < 10) printf("%f %f\n", ori_result_C[i], mix_result_C[i]);
}
if (errors > 0) {
printf("[WMMA] ORIGIN VERSION does not agree with MY VERSION! %d errors!\n", errors);
}
else {
printf("[WMMA] Results verified: ORIGIN VERSION and MY VERSION agree.\n");
}
cudaErrCheck(hipEventDestroy(startKERNEL));
cudaErrCheck(hipEventDestroy(stopKERNEL));
cudaErrCheck(hipDeviceReset());
return 0;
}
| 8b5872792a45bfad806fe8e5f5a5e4c7500b77ab.cu | #include <assert.h>
#include <stdio.h>
#include <curand.h>
#include <cublas_v2.h>
#include <cuda.h>
#include <mma.h>
using namespace nvcuda;
#include "tzgemm_mix.cu"
#define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); }
void cublasErrCheck_(cublasStatus_t stat, const char *file, int line) {
if (stat != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line);
}
}
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(cudaError_t stat, const char *file, int line) {
if (stat != cudaSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line);
}
}
#define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); }
void curandErrCheck_(curandStatus_t stat, const char *file, int line) {
if (stat != CURAND_STATUS_SUCCESS) {
fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line);
}
}
#define checkKernelErrors(expr) \
do \
{ \
expr; \
\
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) \
{ \
printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \
cudaGetErrorString(__err)); \
abort(); \
} \
} while (0)
int main(int argc, char* argv[]) {
int wmma_iter = 1;
int M_INPUT = 16 * 32 * 8;
int N_INPUT = 16 * 4 * 48;
int K_INPUT = 16 * 4 * 12;
if (argc == 5) {
wmma_iter = atoi(argv[1]);
M_INPUT = atoi(argv[2]);
N_INPUT = atoi(argv[3]);
K_INPUT = atoi(argv[4]);
}
int M_GLOBAL = (M_INPUT < 64) ? 64 : (M_INPUT / 64) * 64;
int N_GLOBAL = (N_INPUT < 64) ? 64 : (N_INPUT / 64) * 64;
int K_GLOBAL = (K_INPUT < 64) ? 64 : (K_INPUT / 64) * 64;
int M_TILES = M_GLOBAL / WMMA_M;
int N_TILES = N_GLOBAL / WMMA_N;
int K_TILES = K_GLOBAL / WMMA_K;
float kernel_time;
curandGenerator_t gen;
cudaEvent_t startKERNEL;
cudaEvent_t stopKERNEL;
cudaErrCheck(cudaEventCreate(&startKERNEL));
cudaErrCheck(cudaEventCreate(&stopKERNEL));
// wmma variables
// ----------------------------------------------------------------------------------------------------------------------
float *ori_host_A = NULL;
float *ori_host_B = NULL;
float *ori_result_C = NULL;
float *mix_result_C = NULL;
half *ori_wmma_A = NULL;
half *ori_wmma_B = NULL;
float *ori_wmma_C = NULL;
half *mix_wmma_A = NULL;
half *mix_wmma_B = NULL;
float *mix_wmma_C = NULL;
// ori_host_A = (half *)malloc(sizeof(half) * M_GLOBAL * K_GLOBAL);
// ori_host_B = (half *)malloc(sizeof(half) * K_GLOBAL * N_GLOBAL);
ori_result_C = (float *)malloc(sizeof(float) * M_GLOBAL * N_GLOBAL);
mix_result_C = (float *)malloc(sizeof(float) * M_GLOBAL * N_GLOBAL);
// init_host_matrices(ori_host_A, ori_host_B);
curandErrCheck(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));
curandErrCheck(curandSetPseudoRandomGeneratorSeed(gen, 1337ULL));
cudaErrCheck(cudaMalloc(reinterpret_cast<void **>(&ori_wmma_A), sizeof(half) * M_GLOBAL * K_GLOBAL));
cudaErrCheck(cudaMalloc(reinterpret_cast<void **>(&ori_wmma_B), sizeof(half) * N_GLOBAL * K_GLOBAL));
cudaErrCheck(cudaMalloc(reinterpret_cast<void **>(&ori_wmma_C), sizeof(float) * M_GLOBAL * N_GLOBAL));
cudaErrCheck(cudaMalloc(reinterpret_cast<void **>(&mix_wmma_A), sizeof(half) * M_GLOBAL * K_GLOBAL));
cudaErrCheck(cudaMalloc(reinterpret_cast<void **>(&mix_wmma_B), sizeof(half) * N_GLOBAL * K_GLOBAL));
cudaErrCheck(cudaMalloc(reinterpret_cast<void **>(&mix_wmma_C), sizeof(float) * M_GLOBAL * N_GLOBAL));
cudaErrCheck(cudaMalloc(reinterpret_cast<void **>(&ori_host_A), sizeof(float) * M_GLOBAL * K_GLOBAL));
cudaErrCheck(cudaMalloc(reinterpret_cast<void **>(&ori_host_B), sizeof(float) * N_GLOBAL * K_GLOBAL));
curandErrCheck(curandGenerateUniform(gen, ori_host_A, M_GLOBAL * K_GLOBAL));
curandErrCheck(curandGenerateUniform(gen, ori_host_B, N_GLOBAL * K_GLOBAL));
// curand doesn't currently support fp16 so we generate in fp32 and convert to fp16.
convertFp32ToFp16 <<< (M_GLOBAL * K_GLOBAL + 255) / 256, 256 >>> (ori_wmma_A, ori_host_A, M_GLOBAL * K_GLOBAL);
convertFp32ToFp16 <<< (N_GLOBAL * K_GLOBAL + 255) / 256, 256 >>> (ori_wmma_B, ori_host_B, N_GLOBAL * K_GLOBAL);
cudaErrCheck(cudaMemcpy(mix_wmma_A, ori_wmma_A, sizeof(half) * M_GLOBAL * K_GLOBAL, cudaMemcpyDeviceToDevice));
cudaErrCheck(cudaMemcpy(mix_wmma_B, ori_wmma_B, sizeof(half) * N_GLOBAL * K_GLOBAL, cudaMemcpyDeviceToDevice));
assert(((unsigned long long)ori_wmma_A) % 128 == 0);
assert(((unsigned long long)ori_wmma_B) % 128 == 0);
assert(((unsigned long long)ori_wmma_C) % 128 == 0);
assert(((unsigned long long)mix_wmma_A) % 128 == 0);
assert(((unsigned long long)mix_wmma_B) % 128 == 0);
assert(((unsigned long long)mix_wmma_C) % 128 == 0);
// cudaErrCheck(cudaMemcpy(ori_wmma_A, ori_host_A, sizeof(half) * M_GLOBAL * K_GLOBAL, cudaMemcpyHostToDevice));
// cudaErrCheck(cudaMemcpy(mix_wmma_A, ori_host_A, sizeof(half) * M_GLOBAL * K_GLOBAL, cudaMemcpyHostToDevice));
// cudaErrCheck(cudaMemcpy(ori_wmma_B, ori_host_B, sizeof(half) * N_GLOBAL * K_GLOBAL, cudaMemcpyHostToDevice));
// cudaErrCheck(cudaMemcpy(mix_wmma_B, ori_host_B, sizeof(half) * N_GLOBAL * K_GLOBAL, cudaMemcpyHostToDevice));
cudaErrCheck(cudaMemset(ori_wmma_C, 0, sizeof(float) * M_GLOBAL * N_GLOBAL));
cudaErrCheck(cudaMemset(mix_wmma_C, 0, sizeof(float) * M_GLOBAL * N_GLOBAL));
// start solo running
// ----------------------------------------------------------------------------------------------------------------------
dim3 wmma_grid;
dim3 wmma_block;
wmma_grid.x = 68 * 4;
wmma_block.x = THREADS_PER_BLOCK;
int wmma_grid_dim_x = (M_TILES * N_TILES) / (BLOCK_COL_TILES * BLOCK_ROW_TILES);
int wmma_block_dim_x = wmma_block.x;
printf("[ORI] Running with tzgemm...\n");
printf("[ORI] wmma_grid -- %d * 1 wmma_block -- %d * 1 \n", wmma_grid.x, wmma_block.x);
checkKernelErrors((pers_tzgemm<<<wmma_grid, wmma_block>>>(ori_wmma_A, ori_wmma_B, ori_wmma_C,
64, 64, 64,
4, wmma_block_dim_x, 1)));
cudaErrCheck(cudaMemset(ori_wmma_C, 0, sizeof(float) * M_GLOBAL * N_GLOBAL));
cudaErrCheck(cudaEventRecord(startKERNEL));
for(int i = 0; i < wmma_iter; i++) {
checkKernelErrors((pers_tzgemm<<<wmma_grid, wmma_block>>>(ori_wmma_A, ori_wmma_B, ori_wmma_C,
M_GLOBAL, N_GLOBAL, K_GLOBAL,
wmma_grid_dim_x, wmma_block_dim_x, 1)));
}
cudaErrCheck(cudaEventRecord(stopKERNEL));
cudaErrCheck(cudaEventSynchronize(stopKERNEL));
cudaErrCheck(cudaEventElapsedTime(&kernel_time, startKERNEL, stopKERNEL));
printf("[ORI] tzgemm took %f us\n", kernel_time * 1000 / wmma_iter);
cublasHandle_t cublasHandle;
cublasErrCheck(cublasCreate(&cublasHandle));
cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH));
printf("Running with cuBLAS...\n");
cublasErrCheck(cublasGemmEx(cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N,
64, 64, 64,
&alpha_g,
mix_wmma_B, CUDA_R_16F, 64,
mix_wmma_A, CUDA_R_16F, 64,
&beta_g,
mix_wmma_C, CUDA_R_32F, 64,
CUDA_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP));
cudaErrCheck(cudaMemset(mix_wmma_C, 0, sizeof(float) * M_GLOBAL * N_GLOBAL));
cudaErrCheck(cudaEventRecord(startKERNEL));
for(int i = 0; i < wmma_iter; i++) {
cublasErrCheck(cublasGemmEx(cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N,
N_GLOBAL, M_GLOBAL, K_GLOBAL,
&alpha_g,
mix_wmma_B, CUDA_R_16F, K_GLOBAL,
mix_wmma_A, CUDA_R_16F, K_GLOBAL,
&beta_g,
mix_wmma_C, CUDA_R_32F, N_GLOBAL,
CUDA_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP));
}
cudaErrCheck(cudaEventRecord(stopKERNEL));
cudaErrCheck(cudaEventSynchronize(stopKERNEL));
cudaErrCheck(cudaEventElapsedTime(&kernel_time, startKERNEL, stopKERNEL));
printf("[ORI] cublas took %f us\n", kernel_time * 1000 /wmma_iter);
printf("Checking results...\n");
cudaErrCheck(cudaMemcpy(ori_result_C, ori_wmma_C, sizeof(float) * M_GLOBAL * N_GLOBAL, cudaMemcpyDeviceToHost));
cudaErrCheck(cudaMemcpy(mix_result_C, mix_wmma_C, sizeof(float) * M_GLOBAL * N_GLOBAL, cudaMemcpyDeviceToHost));
int errors = 0;
for (int i = 0; i < M_GLOBAL * N_GLOBAL; i++) {
float v1 = ori_result_C[i];
float v2 = mix_result_C[i];
if (fabs(v1 - v2) > 0.00001f) {
errors++;
if (errors < 5) printf("%f %f\n", v1, v2);
}
// if (i < 10) printf("%f %f\n", ori_result_C[i], mix_result_C[i]);
}
if (errors > 0) {
printf("[WMMA] ORIGIN VERSION does not agree with MY VERSION! %d errors!\n", errors);
}
else {
printf("[WMMA] Results verified: ORIGIN VERSION and MY VERSION agree.\n");
}
cudaErrCheck(cudaEventDestroy(startKERNEL));
cudaErrCheck(cudaEventDestroy(stopKERNEL));
cudaErrCheck(cudaDeviceReset());
return 0;
}
|
1daf5b55cb1d351376fa7f17b33674aacedfb635.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Fermat
*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "eaw.h"
#include <cugar/basic/cuda/arch.h>
#include <cugar/linalg/vector.h>
FERMAT_HOST_DEVICE
float norm_diff(const cugar::Vector3f a, const cugar::Vector3f b)
{
const float d = cugar::max(1e-8f, cugar::dot(a, b));
return 1.0f - d;
//return sqrtf(cugar::max(0.0f, 1.0f - d * d)) / d;
}
/// perform a step of Edge A-trous Wavelet filtering
///
__global__
void EAW_kernel(FBufferChannelView dst, const FBufferChannelView img, const GBufferView gb, const float* var, const EAWParams params, const uint32 step_size)
{
const uint32 x = threadIdx.x + blockIdx.x * blockDim.x;
const uint32 y = threadIdx.y + blockIdx.y * blockDim.y;
// check whether this pixel is in range
if (x >= dst.res_x ||
y >= dst.res_y)
return;
const float kernelWeights[3] = { 1.0, 2.0 / 3.0, 1.0 / 6.0 };
const cugar::Vector4f colorCenter = img(x, y);
const cugar::Vector4f packed_geo = gb.geo(x, y);
const cugar::Vector3f normalCenter = GBufferView::unpack_normal(packed_geo);
const cugar::Vector3f positionCenter = GBufferView::unpack_pos(packed_geo);
// check whether this pixel represents a miss (TODO: for stochastic effects, we might want to do some filtering in this case too...)
if (GBufferView::is_miss(packed_geo))
{
dst(x, y) = colorCenter;
return;
}
const float variance = var ? var[x + y * img.res_x] : 1.0f;
const float phiNormal = params.phi_normal * step_size * step_size;
const float phiPosition = params.phi_position;
const float phiColor = params.phi_color / cugar::max(1.0e-3f, variance );
float sumWeight = 0.0;
cugar::Vector3f sumColor = cugar::Vector3f(0.0f);
for (int yy = -2; yy <= 2; yy++)
{
for (int xx = -2; xx <= 2; xx++)
{
const int2 p = make_int2( x + xx * step_size, y + yy * step_size );
const bool inside =
/*__all*/(p.x >= 0 && p.y >= 0) &&
/*__all*/(p.x < img.res_x && p.y < img.res_y);
const float kernel = kernelWeights[cugar::abs(xx)] * kernelWeights[cugar::abs(yy)];
if (inside)
{
const cugar::Vector4f colorP = img(p);
const cugar::Vector4f geoP = gb.geo(p);
const cugar::Vector3f normalP = GBufferView::unpack_normal(geoP);
const cugar::Vector3f positionP = GBufferView::unpack_pos(geoP);
if (GBufferView::is_miss(geoP) == false)
{
// compute the color weight
cugar::Vector3f diffCol = colorP.xyz() - colorCenter.xyz();
const float wColor = cugar::dot(diffCol, diffCol) * phiColor;
// compute the normal weight
const float wNormal = norm_diff(normalP, normalCenter) * phiNormal;
// compute the positional weight
cugar::Vector3f diffPosition = (positionP - positionCenter);
const float wPosition = dot(diffPosition, diffPosition) * phiPosition;
const float w = kernel * expf(0.0
- cugar::max(wPosition, 0.0f)
- cugar::max(wNormal, 0.0f)
- cugar::max(wColor, 0.0f)
);
sumWeight += w;
sumColor += w * colorP.xyz();
}
}
}
}
dst(x, y) = sumWeight ? cugar::Vector4f(sumColor / sumWeight, colorCenter.w) : colorCenter;
}
/// perform a step of Edge A-trous Wavelet filtering
///
__global__
void EAW_mad_kernel(
FBufferChannelView dst,
const uint32 op,
const FBufferChannelView w_img,
const float w_min,
const FBufferChannelView img,
const GBufferView gb,
const float* var,
const EAWParams params,
const uint32 step_size)
{
const uint32 x = threadIdx.x + blockIdx.x * blockDim.x;
const uint32 y = threadIdx.y + blockIdx.y * blockDim.y;
// check whether this pixel is in range
if (x >= dst.res_x ||
y >= dst.res_y)
return;
const float kernelWeights[3] = { 1.0, 2.0 / 3.0, 1.0 / 6.0 };
const cugar::Vector4f weightCenter = cugar::max( cugar::Vector4f(w_img(x, y)), w_min );
const cugar::Vector4f imgCenter = img(x, y);
const cugar::Vector4f colorCenter =
(op & kEAWOpModulateInput) ? imgCenter * weightCenter :
(op & kEAWOpDemodulateInput) ? imgCenter / weightCenter :
imgCenter;
const cugar::Vector4f packed_geo = gb.geo(x, y);
const cugar::Vector3f normalCenter = GBufferView::unpack_normal(packed_geo);
const cugar::Vector3f positionCenter = GBufferView::unpack_pos(packed_geo);
// check whether this pixel represents a miss (TODO: for stochastic effects, we might want to do some filtering in this case too...)
if (GBufferView::is_miss(packed_geo))
{
cugar::Vector4f r = (op & kEAWOpAddMode) ? dst(x, y) : cugar::Vector4f(0.0f);
r +=
(op & kEAWOpModulateOutput) ? colorCenter * weightCenter :
(op & kEAWOpDemodulateOutput) ? colorCenter / weightCenter :
colorCenter;
dst(x, y) = r;
return;
}
const float variance = var ? var[x + y * img.res_x] : 1.0f;
const float phiNormal = params.phi_normal * step_size * step_size;
const float phiPosition = params.phi_position;
const float phiColor = params.phi_color / cugar::max(1.0e-3f, variance );
float sumWeight = 0.0;
cugar::Vector3f sumColor = cugar::Vector3f(0.0f);
for (int yy = -2; yy <= 2; yy++)
{
for (int xx = -2; xx <= 2; xx++)
{
const int2 p = make_int2(x + xx * step_size, y + yy * step_size);
const bool inside =
/*__all*/(p.x >= 0 && p.y >= 0) &&
/*__all*/(p.x < img.res_x && p.y < img.res_y);
const float kernel = kernelWeights[cugar::abs(xx)] * kernelWeights[cugar::abs(yy)];
if (inside)
{
const cugar::Vector4f weightP = cugar::max( cugar::Vector4f(w_img(p)), w_min );
const cugar::Vector4f imgP = img(p);
const cugar::Vector4f colorP =
(op & kEAWOpModulateInput) ? imgP * weightP :
(op & kEAWOpDemodulateInput) ? imgP / weightP :
imgP;
const cugar::Vector4f geoP = gb.geo(p);
const cugar::Vector3f normalP = GBufferView::unpack_normal(geoP);
const cugar::Vector3f positionP = GBufferView::unpack_pos(geoP);
if (GBufferView::is_miss(geoP) == false)
{
// compute the color weight
cugar::Vector3f diffCol = colorP.xyz() - colorCenter.xyz();
const float wColor = cugar::dot(diffCol, diffCol) * phiColor;
// compute the normal weight
const float wNormal = norm_diff(normalP, normalCenter) * phiNormal;
// compute the positional weight
cugar::Vector3f diffPosition = (positionP - positionCenter);
const float wPosition = dot(diffPosition, diffPosition) * phiPosition;
const float w = kernel * expf(0.0
- cugar::max(wPosition, 0.0f)
- cugar::max(wNormal, 0.0f)
- cugar::max(wColor, 0.0f)
);
sumWeight += w;
sumColor += w * colorP.xyz();
}
}
}
}
cugar::Vector4f r = (op & kEAWOpAddMode) ? dst(x, y) : cugar::Vector4f(0.0f);
cugar::Vector4f c = (sumWeight ? cugar::Vector4f(sumColor / sumWeight, colorCenter.w) : colorCenter);
r +=
(op & kEAWOpModulateOutput) ? c * weightCenter :
(op & kEAWOpDemodulateOutput) ? c / weightCenter :
c;
dst(x, y) = r;
}
// perform a step of Edge A-trous Wavelet filtering
//
void EAW(FBufferChannelView dst, const FBufferChannelView img, const GBufferView gb, const float* var, const EAWParams params, const uint32 step_size)
{
dim3 blockSize(32, 4);
dim3 gridSize(cugar::divide_ri(dst.res_x, blockSize.x), cugar::divide_ri(dst.res_y, blockSize.y));
EAW_kernel << < gridSize, blockSize >> > (dst, img, gb, var, params, step_size);
CUDA_CHECK(cugar::cuda::sync_and_check_error("EAW_kernel"));
}
// perform a step of Edge A-trous Wavelet filtering, multiplying the result by a weight and adding it to the output, i.e. solving:
//
// dst += w_img * eaw(img)
//
void EAW(FBufferChannelView dst, const EAWOp op, const FBufferChannelView w_img, const float w_min, const FBufferChannelView img, const GBufferView gb, const float* var, const EAWParams params, const uint32 step_size)
{
dim3 blockSize(32, 4);
dim3 gridSize(cugar::divide_ri(dst.res_x, blockSize.x), cugar::divide_ri(dst.res_y, blockSize.y));
EAW_mad_kernel<< < gridSize, blockSize >> > (dst, op, w_img, w_min, img, gb, var, params, step_size);
CUDA_CHECK(cugar::cuda::sync_and_check_error("EAW_mad_kernel"));
}
// perform several iterations of Edge A-trous Wavelet filtering
//
void EAW(const uint32 n_iterations, uint32& in_buffer, FBufferChannelView pingpong[2], const GBufferView gb, const float* var, const EAWParams params)
{
for (uint32 i = 0; i < n_iterations; ++i)
{
const uint32 out_buffer = in_buffer ? 0 : 1;
EAW(
pingpong[out_buffer],
pingpong[in_buffer],
gb,
var,
params,
1u << i);
in_buffer = out_buffer;
}
}
// perform several iterations of Edge A-trous Wavelet filtering
//
void EAW(const uint32 n_iterations, FBufferChannelView dst, const FBufferChannelView img, const GBufferView gb, const float* var, const EAWParams params, FBufferChannelView pingpong[2])
{
uint32 in_buffer = 0;
for (uint32 i = 0; i < n_iterations; ++i)
{
const uint32 out_buffer = in_buffer ? 0 : 1;
EAW(
i == n_iterations - 1 ? dst : pingpong[out_buffer],
i == 0 ? img : pingpong[in_buffer],
gb,
var,
params,
1u << i);
in_buffer = out_buffer;
}
}
// perform several iterations of Edge A-trous Wavelet filtering
//
void EAW(const uint32 n_iterations, FBufferChannelView dst, const FBufferChannelView w_img, const FBufferChannelView img, const GBufferView gb, const float* var, const EAWParams params, FBufferChannelView pingpong[2])
{
uint32 in_buffer = 0;
for (uint32 i = 0; i < n_iterations; ++i)
{
const uint32 out_buffer = in_buffer ? 0 : 1;
if (i == n_iterations - 1)
{
EAW(
dst,
EAWOp(kEAWOpModulateOutput | kEAWOpAddMode),
w_img,
1.0e-4f,
i == 0 ? img : pingpong[in_buffer],
gb,
var,
params,
1u << i);
}
else if (i == 0)
{
EAW(
pingpong[out_buffer],
EAWOp(kEAWOpDemodulateInput | kEAWOpReplaceMode),
w_img,
1.0e-4f,
img,
gb,
var,
params,
1u << i);
}
else
{
EAW(
pingpong[out_buffer],
pingpong[in_buffer],
gb,
var,
params,
1u << i);
}
in_buffer = out_buffer;
}
}
| 1daf5b55cb1d351376fa7f17b33674aacedfb635.cu | /*
* Fermat
*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "eaw.h"
#include <cugar/basic/cuda/arch.h>
#include <cugar/linalg/vector.h>
FERMAT_HOST_DEVICE
float norm_diff(const cugar::Vector3f a, const cugar::Vector3f b)
{
const float d = cugar::max(1e-8f, cugar::dot(a, b));
return 1.0f - d;
//return sqrtf(cugar::max(0.0f, 1.0f - d * d)) / d;
}
/// perform a step of Edge A-trous Wavelet filtering
///
__global__
void EAW_kernel(FBufferChannelView dst, const FBufferChannelView img, const GBufferView gb, const float* var, const EAWParams params, const uint32 step_size)
{
const uint32 x = threadIdx.x + blockIdx.x * blockDim.x;
const uint32 y = threadIdx.y + blockIdx.y * blockDim.y;
// check whether this pixel is in range
if (x >= dst.res_x ||
y >= dst.res_y)
return;
const float kernelWeights[3] = { 1.0, 2.0 / 3.0, 1.0 / 6.0 };
const cugar::Vector4f colorCenter = img(x, y);
const cugar::Vector4f packed_geo = gb.geo(x, y);
const cugar::Vector3f normalCenter = GBufferView::unpack_normal(packed_geo);
const cugar::Vector3f positionCenter = GBufferView::unpack_pos(packed_geo);
// check whether this pixel represents a miss (TODO: for stochastic effects, we might want to do some filtering in this case too...)
if (GBufferView::is_miss(packed_geo))
{
dst(x, y) = colorCenter;
return;
}
const float variance = var ? var[x + y * img.res_x] : 1.0f;
const float phiNormal = params.phi_normal * step_size * step_size;
const float phiPosition = params.phi_position;
const float phiColor = params.phi_color / cugar::max(1.0e-3f, variance );
float sumWeight = 0.0;
cugar::Vector3f sumColor = cugar::Vector3f(0.0f);
for (int yy = -2; yy <= 2; yy++)
{
for (int xx = -2; xx <= 2; xx++)
{
const int2 p = make_int2( x + xx * step_size, y + yy * step_size );
const bool inside =
/*__all*/(p.x >= 0 && p.y >= 0) &&
/*__all*/(p.x < img.res_x && p.y < img.res_y);
const float kernel = kernelWeights[cugar::abs(xx)] * kernelWeights[cugar::abs(yy)];
if (inside)
{
const cugar::Vector4f colorP = img(p);
const cugar::Vector4f geoP = gb.geo(p);
const cugar::Vector3f normalP = GBufferView::unpack_normal(geoP);
const cugar::Vector3f positionP = GBufferView::unpack_pos(geoP);
if (GBufferView::is_miss(geoP) == false)
{
// compute the color weight
cugar::Vector3f diffCol = colorP.xyz() - colorCenter.xyz();
const float wColor = cugar::dot(diffCol, diffCol) * phiColor;
// compute the normal weight
const float wNormal = norm_diff(normalP, normalCenter) * phiNormal;
// compute the positional weight
cugar::Vector3f diffPosition = (positionP - positionCenter);
const float wPosition = dot(diffPosition, diffPosition) * phiPosition;
const float w = kernel * expf(0.0
- cugar::max(wPosition, 0.0f)
- cugar::max(wNormal, 0.0f)
- cugar::max(wColor, 0.0f)
);
sumWeight += w;
sumColor += w * colorP.xyz();
}
}
}
}
dst(x, y) = sumWeight ? cugar::Vector4f(sumColor / sumWeight, colorCenter.w) : colorCenter;
}
/// perform a step of Edge A-trous Wavelet filtering
///
__global__
void EAW_mad_kernel(
FBufferChannelView dst,
const uint32 op,
const FBufferChannelView w_img,
const float w_min,
const FBufferChannelView img,
const GBufferView gb,
const float* var,
const EAWParams params,
const uint32 step_size)
{
const uint32 x = threadIdx.x + blockIdx.x * blockDim.x;
const uint32 y = threadIdx.y + blockIdx.y * blockDim.y;
// check whether this pixel is in range
if (x >= dst.res_x ||
y >= dst.res_y)
return;
const float kernelWeights[3] = { 1.0, 2.0 / 3.0, 1.0 / 6.0 };
const cugar::Vector4f weightCenter = cugar::max( cugar::Vector4f(w_img(x, y)), w_min );
const cugar::Vector4f imgCenter = img(x, y);
const cugar::Vector4f colorCenter =
(op & kEAWOpModulateInput) ? imgCenter * weightCenter :
(op & kEAWOpDemodulateInput) ? imgCenter / weightCenter :
imgCenter;
const cugar::Vector4f packed_geo = gb.geo(x, y);
const cugar::Vector3f normalCenter = GBufferView::unpack_normal(packed_geo);
const cugar::Vector3f positionCenter = GBufferView::unpack_pos(packed_geo);
// check whether this pixel represents a miss (TODO: for stochastic effects, we might want to do some filtering in this case too...)
if (GBufferView::is_miss(packed_geo))
{
cugar::Vector4f r = (op & kEAWOpAddMode) ? dst(x, y) : cugar::Vector4f(0.0f);
r +=
(op & kEAWOpModulateOutput) ? colorCenter * weightCenter :
(op & kEAWOpDemodulateOutput) ? colorCenter / weightCenter :
colorCenter;
dst(x, y) = r;
return;
}
const float variance = var ? var[x + y * img.res_x] : 1.0f;
const float phiNormal = params.phi_normal * step_size * step_size;
const float phiPosition = params.phi_position;
const float phiColor = params.phi_color / cugar::max(1.0e-3f, variance );
float sumWeight = 0.0;
cugar::Vector3f sumColor = cugar::Vector3f(0.0f);
for (int yy = -2; yy <= 2; yy++)
{
for (int xx = -2; xx <= 2; xx++)
{
const int2 p = make_int2(x + xx * step_size, y + yy * step_size);
const bool inside =
/*__all*/(p.x >= 0 && p.y >= 0) &&
/*__all*/(p.x < img.res_x && p.y < img.res_y);
const float kernel = kernelWeights[cugar::abs(xx)] * kernelWeights[cugar::abs(yy)];
if (inside)
{
const cugar::Vector4f weightP = cugar::max( cugar::Vector4f(w_img(p)), w_min );
const cugar::Vector4f imgP = img(p);
const cugar::Vector4f colorP =
(op & kEAWOpModulateInput) ? imgP * weightP :
(op & kEAWOpDemodulateInput) ? imgP / weightP :
imgP;
const cugar::Vector4f geoP = gb.geo(p);
const cugar::Vector3f normalP = GBufferView::unpack_normal(geoP);
const cugar::Vector3f positionP = GBufferView::unpack_pos(geoP);
if (GBufferView::is_miss(geoP) == false)
{
// compute the color weight
cugar::Vector3f diffCol = colorP.xyz() - colorCenter.xyz();
const float wColor = cugar::dot(diffCol, diffCol) * phiColor;
// compute the normal weight
const float wNormal = norm_diff(normalP, normalCenter) * phiNormal;
// compute the positional weight
cugar::Vector3f diffPosition = (positionP - positionCenter);
const float wPosition = dot(diffPosition, diffPosition) * phiPosition;
const float w = kernel * expf(0.0
- cugar::max(wPosition, 0.0f)
- cugar::max(wNormal, 0.0f)
- cugar::max(wColor, 0.0f)
);
sumWeight += w;
sumColor += w * colorP.xyz();
}
}
}
}
cugar::Vector4f r = (op & kEAWOpAddMode) ? dst(x, y) : cugar::Vector4f(0.0f);
cugar::Vector4f c = (sumWeight ? cugar::Vector4f(sumColor / sumWeight, colorCenter.w) : colorCenter);
r +=
(op & kEAWOpModulateOutput) ? c * weightCenter :
(op & kEAWOpDemodulateOutput) ? c / weightCenter :
c;
dst(x, y) = r;
}
// perform a step of Edge A-trous Wavelet filtering
//
void EAW(FBufferChannelView dst, const FBufferChannelView img, const GBufferView gb, const float* var, const EAWParams params, const uint32 step_size)
{
dim3 blockSize(32, 4);
dim3 gridSize(cugar::divide_ri(dst.res_x, blockSize.x), cugar::divide_ri(dst.res_y, blockSize.y));
EAW_kernel << < gridSize, blockSize >> > (dst, img, gb, var, params, step_size);
CUDA_CHECK(cugar::cuda::sync_and_check_error("EAW_kernel"));
}
// perform a step of Edge A-trous Wavelet filtering, multiplying the result by a weight and adding it to the output, i.e. solving:
//
// dst += w_img * eaw(img)
//
void EAW(FBufferChannelView dst, const EAWOp op, const FBufferChannelView w_img, const float w_min, const FBufferChannelView img, const GBufferView gb, const float* var, const EAWParams params, const uint32 step_size)
{
dim3 blockSize(32, 4);
dim3 gridSize(cugar::divide_ri(dst.res_x, blockSize.x), cugar::divide_ri(dst.res_y, blockSize.y));
EAW_mad_kernel<< < gridSize, blockSize >> > (dst, op, w_img, w_min, img, gb, var, params, step_size);
CUDA_CHECK(cugar::cuda::sync_and_check_error("EAW_mad_kernel"));
}
// perform several iterations of Edge A-trous Wavelet filtering
//
void EAW(const uint32 n_iterations, uint32& in_buffer, FBufferChannelView pingpong[2], const GBufferView gb, const float* var, const EAWParams params)
{
for (uint32 i = 0; i < n_iterations; ++i)
{
const uint32 out_buffer = in_buffer ? 0 : 1;
EAW(
pingpong[out_buffer],
pingpong[in_buffer],
gb,
var,
params,
1u << i);
in_buffer = out_buffer;
}
}
// perform several iterations of Edge A-trous Wavelet filtering
//
void EAW(const uint32 n_iterations, FBufferChannelView dst, const FBufferChannelView img, const GBufferView gb, const float* var, const EAWParams params, FBufferChannelView pingpong[2])
{
uint32 in_buffer = 0;
for (uint32 i = 0; i < n_iterations; ++i)
{
const uint32 out_buffer = in_buffer ? 0 : 1;
EAW(
i == n_iterations - 1 ? dst : pingpong[out_buffer],
i == 0 ? img : pingpong[in_buffer],
gb,
var,
params,
1u << i);
in_buffer = out_buffer;
}
}
// perform several iterations of Edge A-trous Wavelet filtering
//
void EAW(const uint32 n_iterations, FBufferChannelView dst, const FBufferChannelView w_img, const FBufferChannelView img, const GBufferView gb, const float* var, const EAWParams params, FBufferChannelView pingpong[2])
{
uint32 in_buffer = 0;
for (uint32 i = 0; i < n_iterations; ++i)
{
const uint32 out_buffer = in_buffer ? 0 : 1;
if (i == n_iterations - 1)
{
EAW(
dst,
EAWOp(kEAWOpModulateOutput | kEAWOpAddMode),
w_img,
1.0e-4f,
i == 0 ? img : pingpong[in_buffer],
gb,
var,
params,
1u << i);
}
else if (i == 0)
{
EAW(
pingpong[out_buffer],
EAWOp(kEAWOpDemodulateInput | kEAWOpReplaceMode),
w_img,
1.0e-4f,
img,
gb,
var,
params,
1u << i);
}
else
{
EAW(
pingpong[out_buffer],
pingpong[in_buffer],
gb,
var,
params,
1u << i);
}
in_buffer = out_buffer;
}
}
|
68c7cb102a3db2d8c2c3777c362026bd123a857b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/gpu/device/common.hpp"
namespace cv { namespace gpu { namespace device
{
namespace optical_flow
{
#define NEEDLE_MAP_SCALE 16
#define NUM_VERTS_PER_ARROW 6
__global__ void NeedleMapAverageKernel(const PtrStepSzf u, const PtrStepf v, PtrStepf u_avg, PtrStepf v_avg)
{
__shared__ float smem[2 * NEEDLE_MAP_SCALE];
volatile float* u_col_sum = smem;
volatile float* v_col_sum = u_col_sum + NEEDLE_MAP_SCALE;
const int x = blockIdx.x * NEEDLE_MAP_SCALE + threadIdx.x;
const int y = blockIdx.y * NEEDLE_MAP_SCALE;
u_col_sum[threadIdx.x] = 0;
v_col_sum[threadIdx.x] = 0;
#pragma unroll
for(int i = 0; i < NEEDLE_MAP_SCALE; ++i)
{
u_col_sum[threadIdx.x] += u(::min(y + i, u.rows - 1), x);
v_col_sum[threadIdx.x] += v(::min(y + i, u.rows - 1), x);
}
if (threadIdx.x < 8)
{
// now add the column sums
const uint X = threadIdx.x;
if (X | 0xfe == 0xfe) // bit 0 is 0
{
u_col_sum[threadIdx.x] += u_col_sum[threadIdx.x + 1];
v_col_sum[threadIdx.x] += v_col_sum[threadIdx.x + 1];
}
if (X | 0xfe == 0xfc) // bits 0 & 1 == 0
{
u_col_sum[threadIdx.x] += u_col_sum[threadIdx.x + 2];
v_col_sum[threadIdx.x] += v_col_sum[threadIdx.x + 2];
}
if (X | 0xf8 == 0xf8)
{
u_col_sum[threadIdx.x] += u_col_sum[threadIdx.x + 4];
v_col_sum[threadIdx.x] += v_col_sum[threadIdx.x + 4];
}
if (X == 0)
{
u_col_sum[threadIdx.x] += u_col_sum[threadIdx.x + 8];
v_col_sum[threadIdx.x] += v_col_sum[threadIdx.x + 8];
}
}
if (threadIdx.x == 0)
{
const float coeff = 1.0f / (NEEDLE_MAP_SCALE * NEEDLE_MAP_SCALE);
u_col_sum[0] *= coeff;
v_col_sum[0] *= coeff;
u_avg(blockIdx.y, blockIdx.x) = u_col_sum[0];
v_avg(blockIdx.y, blockIdx.x) = v_col_sum[0];
}
}
void NeedleMapAverage_gpu(PtrStepSzf u, PtrStepSzf v, PtrStepSzf u_avg, PtrStepSzf v_avg)
{
const dim3 block(NEEDLE_MAP_SCALE);
const dim3 grid(u_avg.cols, u_avg.rows);
hipLaunchKernelGGL(( NeedleMapAverageKernel), dim3(grid), dim3(block), 0, 0, u, v, u_avg, v_avg);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
__global__ void NeedleMapVertexKernel(const PtrStepSzf u_avg, const PtrStepf v_avg, float* vertex_data, float* color_data, float max_flow, float xscale, float yscale)
{
// test - just draw a triangle at each pixel
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const float arrow_x = x * NEEDLE_MAP_SCALE + NEEDLE_MAP_SCALE / 2.0f;
const float arrow_y = y * NEEDLE_MAP_SCALE + NEEDLE_MAP_SCALE / 2.0f;
float3 v[NUM_VERTS_PER_ARROW];
if (x < u_avg.cols && y < u_avg.rows)
{
const float u_avg_val = u_avg(y, x);
const float v_avg_val = v_avg(y, x);
const float theta = ::atan2f(v_avg_val, u_avg_val);// + CV_PI;
float r = ::sqrtf(v_avg_val * v_avg_val + u_avg_val * u_avg_val);
r = fmin(14.0f * (r / max_flow), 14.0f);
v[0].z = 1.0f;
v[1].z = 0.7f;
v[2].z = 0.7f;
v[3].z = 0.7f;
v[4].z = 0.7f;
v[5].z = 1.0f;
v[0].x = arrow_x;
v[0].y = arrow_y;
v[5].x = arrow_x;
v[5].y = arrow_y;
v[2].x = arrow_x + r * ::cosf(theta);
v[2].y = arrow_y + r * ::sinf(theta);
v[3].x = v[2].x;
v[3].y = v[2].y;
r = ::fmin(r, 2.5f);
v[1].x = arrow_x + r * ::cosf(theta - CV_PI_F / 2.0f);
v[1].y = arrow_y + r * ::sinf(theta - CV_PI_F / 2.0f);
v[4].x = arrow_x + r * ::cosf(theta + CV_PI_F / 2.0f);
v[4].y = arrow_y + r * ::sinf(theta + CV_PI_F / 2.0f);
int indx = (y * u_avg.cols + x) * NUM_VERTS_PER_ARROW * 3;
color_data[indx] = (theta - CV_PI_F) / CV_PI_F * 180.0f;
vertex_data[indx++] = v[0].x * xscale;
vertex_data[indx++] = v[0].y * yscale;
vertex_data[indx++] = v[0].z;
color_data[indx] = (theta - CV_PI_F) / CV_PI_F * 180.0f;
vertex_data[indx++] = v[1].x * xscale;
vertex_data[indx++] = v[1].y * yscale;
vertex_data[indx++] = v[1].z;
color_data[indx] = (theta - CV_PI_F) / CV_PI_F * 180.0f;
vertex_data[indx++] = v[2].x * xscale;
vertex_data[indx++] = v[2].y * yscale;
vertex_data[indx++] = v[2].z;
color_data[indx] = (theta - CV_PI_F) / CV_PI_F * 180.0f;
vertex_data[indx++] = v[3].x * xscale;
vertex_data[indx++] = v[3].y * yscale;
vertex_data[indx++] = v[3].z;
color_data[indx] = (theta - CV_PI_F) / CV_PI_F * 180.0f;
vertex_data[indx++] = v[4].x * xscale;
vertex_data[indx++] = v[4].y * yscale;
vertex_data[indx++] = v[4].z;
color_data[indx] = (theta - CV_PI_F) / CV_PI_F * 180.0f;
vertex_data[indx++] = v[5].x * xscale;
vertex_data[indx++] = v[5].y * yscale;
vertex_data[indx++] = v[5].z;
}
}
void CreateOpticalFlowNeedleMap_gpu(PtrStepSzf u_avg, PtrStepSzf v_avg, float* vertex_buffer, float* color_data, float max_flow, float xscale, float yscale)
{
const dim3 block(16);
const dim3 grid(divUp(u_avg.cols, block.x), divUp(u_avg.rows, block.y));
hipLaunchKernelGGL(( NeedleMapVertexKernel), dim3(grid), dim3(block), 0, 0, u_avg, v_avg, vertex_buffer, color_data, max_flow, xscale, yscale);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
}
}}}
#endif /* CUDA_DISABLER */
| 68c7cb102a3db2d8c2c3777c362026bd123a857b.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/gpu/device/common.hpp"
namespace cv { namespace gpu { namespace device
{
namespace optical_flow
{
#define NEEDLE_MAP_SCALE 16
#define NUM_VERTS_PER_ARROW 6
__global__ void NeedleMapAverageKernel(const PtrStepSzf u, const PtrStepf v, PtrStepf u_avg, PtrStepf v_avg)
{
__shared__ float smem[2 * NEEDLE_MAP_SCALE];
volatile float* u_col_sum = smem;
volatile float* v_col_sum = u_col_sum + NEEDLE_MAP_SCALE;
const int x = blockIdx.x * NEEDLE_MAP_SCALE + threadIdx.x;
const int y = blockIdx.y * NEEDLE_MAP_SCALE;
u_col_sum[threadIdx.x] = 0;
v_col_sum[threadIdx.x] = 0;
#pragma unroll
for(int i = 0; i < NEEDLE_MAP_SCALE; ++i)
{
u_col_sum[threadIdx.x] += u(::min(y + i, u.rows - 1), x);
v_col_sum[threadIdx.x] += v(::min(y + i, u.rows - 1), x);
}
if (threadIdx.x < 8)
{
// now add the column sums
const uint X = threadIdx.x;
if (X | 0xfe == 0xfe) // bit 0 is 0
{
u_col_sum[threadIdx.x] += u_col_sum[threadIdx.x + 1];
v_col_sum[threadIdx.x] += v_col_sum[threadIdx.x + 1];
}
if (X | 0xfe == 0xfc) // bits 0 & 1 == 0
{
u_col_sum[threadIdx.x] += u_col_sum[threadIdx.x + 2];
v_col_sum[threadIdx.x] += v_col_sum[threadIdx.x + 2];
}
if (X | 0xf8 == 0xf8)
{
u_col_sum[threadIdx.x] += u_col_sum[threadIdx.x + 4];
v_col_sum[threadIdx.x] += v_col_sum[threadIdx.x + 4];
}
if (X == 0)
{
u_col_sum[threadIdx.x] += u_col_sum[threadIdx.x + 8];
v_col_sum[threadIdx.x] += v_col_sum[threadIdx.x + 8];
}
}
if (threadIdx.x == 0)
{
const float coeff = 1.0f / (NEEDLE_MAP_SCALE * NEEDLE_MAP_SCALE);
u_col_sum[0] *= coeff;
v_col_sum[0] *= coeff;
u_avg(blockIdx.y, blockIdx.x) = u_col_sum[0];
v_avg(blockIdx.y, blockIdx.x) = v_col_sum[0];
}
}
void NeedleMapAverage_gpu(PtrStepSzf u, PtrStepSzf v, PtrStepSzf u_avg, PtrStepSzf v_avg)
{
const dim3 block(NEEDLE_MAP_SCALE);
const dim3 grid(u_avg.cols, u_avg.rows);
NeedleMapAverageKernel<<<grid, block>>>(u, v, u_avg, v_avg);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
__global__ void NeedleMapVertexKernel(const PtrStepSzf u_avg, const PtrStepf v_avg, float* vertex_data, float* color_data, float max_flow, float xscale, float yscale)
{
// test - just draw a triangle at each pixel
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const float arrow_x = x * NEEDLE_MAP_SCALE + NEEDLE_MAP_SCALE / 2.0f;
const float arrow_y = y * NEEDLE_MAP_SCALE + NEEDLE_MAP_SCALE / 2.0f;
float3 v[NUM_VERTS_PER_ARROW];
if (x < u_avg.cols && y < u_avg.rows)
{
const float u_avg_val = u_avg(y, x);
const float v_avg_val = v_avg(y, x);
const float theta = ::atan2f(v_avg_val, u_avg_val);// + CV_PI;
float r = ::sqrtf(v_avg_val * v_avg_val + u_avg_val * u_avg_val);
r = fmin(14.0f * (r / max_flow), 14.0f);
v[0].z = 1.0f;
v[1].z = 0.7f;
v[2].z = 0.7f;
v[3].z = 0.7f;
v[4].z = 0.7f;
v[5].z = 1.0f;
v[0].x = arrow_x;
v[0].y = arrow_y;
v[5].x = arrow_x;
v[5].y = arrow_y;
v[2].x = arrow_x + r * ::cosf(theta);
v[2].y = arrow_y + r * ::sinf(theta);
v[3].x = v[2].x;
v[3].y = v[2].y;
r = ::fmin(r, 2.5f);
v[1].x = arrow_x + r * ::cosf(theta - CV_PI_F / 2.0f);
v[1].y = arrow_y + r * ::sinf(theta - CV_PI_F / 2.0f);
v[4].x = arrow_x + r * ::cosf(theta + CV_PI_F / 2.0f);
v[4].y = arrow_y + r * ::sinf(theta + CV_PI_F / 2.0f);
int indx = (y * u_avg.cols + x) * NUM_VERTS_PER_ARROW * 3;
color_data[indx] = (theta - CV_PI_F) / CV_PI_F * 180.0f;
vertex_data[indx++] = v[0].x * xscale;
vertex_data[indx++] = v[0].y * yscale;
vertex_data[indx++] = v[0].z;
color_data[indx] = (theta - CV_PI_F) / CV_PI_F * 180.0f;
vertex_data[indx++] = v[1].x * xscale;
vertex_data[indx++] = v[1].y * yscale;
vertex_data[indx++] = v[1].z;
color_data[indx] = (theta - CV_PI_F) / CV_PI_F * 180.0f;
vertex_data[indx++] = v[2].x * xscale;
vertex_data[indx++] = v[2].y * yscale;
vertex_data[indx++] = v[2].z;
color_data[indx] = (theta - CV_PI_F) / CV_PI_F * 180.0f;
vertex_data[indx++] = v[3].x * xscale;
vertex_data[indx++] = v[3].y * yscale;
vertex_data[indx++] = v[3].z;
color_data[indx] = (theta - CV_PI_F) / CV_PI_F * 180.0f;
vertex_data[indx++] = v[4].x * xscale;
vertex_data[indx++] = v[4].y * yscale;
vertex_data[indx++] = v[4].z;
color_data[indx] = (theta - CV_PI_F) / CV_PI_F * 180.0f;
vertex_data[indx++] = v[5].x * xscale;
vertex_data[indx++] = v[5].y * yscale;
vertex_data[indx++] = v[5].z;
}
}
void CreateOpticalFlowNeedleMap_gpu(PtrStepSzf u_avg, PtrStepSzf v_avg, float* vertex_buffer, float* color_data, float max_flow, float xscale, float yscale)
{
const dim3 block(16);
const dim3 grid(divUp(u_avg.cols, block.x), divUp(u_avg.rows, block.y));
NeedleMapVertexKernel<<<grid, block>>>(u_avg, v_avg, vertex_buffer, color_data, max_flow, xscale, yscale);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
}
}}}
#endif /* CUDA_DISABLER */
|
93c9baf1e35bd09d049ccba6fd049cf72a42ca38.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp"
#include "HugeCTR/include/utils.hpp"
namespace HugeCTR {
namespace {
// reorder operation after all2all in forward propagation
template <typename TypeEmbeddingComp>
__global__ void forward_reorder_kernel(int batch_size_per_gpu, int slot_num, int embedding_vec_size,
int gpu_num, const TypeEmbeddingComp *input,
TypeEmbeddingComp *output) {
// blockDim.x = embedding_vec_size; // each thread corresponding to one element of embedding
// vector gridDim.x = batch_size / gpu_num = samples_per_gpu; // each block corresponding to one
// sample on each GPU Each thread needs to process slot_num slots
int tid = threadIdx.x;
int bid = blockIdx.x;
int sample_id = bid; // sample_id on the current GPU
if ((bid < batch_size_per_gpu) && (tid < embedding_vec_size)) {
int dst_offset =
sample_id * slot_num * embedding_vec_size; // offset for the first slot of one sample
int dst_stride = embedding_vec_size; // stride from slot to slot
for (int slot_id = 0; slot_id < slot_num; slot_id++) {
int gpu_id = slot_id % gpu_num;
int offset_pre = 0; // offset in previous gpus
for (int id = 0; id < gpu_id; id++) {
int slot_num_per_gpu = slot_num / gpu_num + ((id < (slot_num % gpu_num)) ? 1 : 0);
int stride = batch_size_per_gpu * slot_num_per_gpu;
offset_pre += stride;
}
int slot_num_per_gpu = slot_num / gpu_num + ((gpu_id < (slot_num % gpu_num)) ? 1 : 0);
int offset_cur = sample_id * slot_num_per_gpu; // offset in current gpu
int src_addr = (offset_cur + offset_pre + (int)(slot_id / gpu_num)) * embedding_vec_size;
int dst_addr = dst_offset + dst_stride * slot_id;
output[dst_addr + tid] = input[src_addr + tid];
}
}
}
// reorder operation after all2all in forward propagation
__global__ void forward_reorder_align2_kernel(int batch_size_per_gpu, int slot_num,
int embedding_vec_size, int gpu_num,
const __half *input, __half *output) {
// blockDim.x = embedding_vec_size; // each thread corresponding to one element of embedding
// vector gridDim.x = batch_size / gpu_num = samples_per_gpu; // each block corresponding to one
// sample on each GPU Each thread needs to process slot_num slots
int tid = threadIdx.x;
int bid = blockIdx.x;
int sample_id = bid; // sample_id on the current GPU
if ((bid < batch_size_per_gpu) && (tid < embedding_vec_size)) {
const __half2 *input2 = reinterpret_cast<const __half2 *>(input);
__half2 *output2 = reinterpret_cast<__half2 *>(output);
int dst_offset =
sample_id * slot_num * embedding_vec_size; // offset for the first slot of one sample
int dst_stride = embedding_vec_size; // stride from slot to slot
for (int slot_id = 0; slot_id < slot_num; slot_id++) {
int gpu_id = slot_id % gpu_num;
int offset_pre = 0; // offset in previous gpus
for (int id = 0; id < gpu_id; id++) {
int slot_num_per_gpu = slot_num / gpu_num + ((id < (slot_num % gpu_num)) ? 1 : 0);
int stride = batch_size_per_gpu * slot_num_per_gpu;
offset_pre += stride;
}
int slot_num_per_gpu = slot_num / gpu_num + ((gpu_id < (slot_num % gpu_num)) ? 1 : 0);
int offset_cur = sample_id * slot_num_per_gpu; // offset in current gpu
int src_addr = (offset_cur + offset_pre + (int)(slot_id / gpu_num)) * embedding_vec_size;
int dst_addr = dst_offset + dst_stride * slot_id;
output2[dst_addr + tid] = input2[src_addr + tid];
}
}
}
template <typename TypeEmbeddingComp>
void do_forward_reorder(size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
size_t total_gpu_count, const TypeEmbeddingComp *input,
TypeEmbeddingComp *output, hipStream_t stream) {
const size_t grid_size = batch_size_per_gpu;
const size_t block_size = embedding_vec_size;
hipLaunchKernelGGL(( forward_reorder_kernel), dim3(grid_size), dim3(block_size), 0, stream,
batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count, input, output);
}
void do_forward_reorder(size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
size_t total_gpu_count, const __half *input, __half *output,
hipStream_t stream) {
const size_t grid_size = batch_size_per_gpu;
if (embedding_vec_size % 2 == 0) {
const size_t block_size = embedding_vec_size / 2;
hipLaunchKernelGGL(( forward_reorder_align2_kernel), dim3(grid_size), dim3(block_size), 0, stream,
batch_size_per_gpu, slot_num, embedding_vec_size / 2, total_gpu_count, input, output);
} else {
const size_t block_size = embedding_vec_size;
hipLaunchKernelGGL(( forward_reorder_kernel), dim3(grid_size), dim3(block_size), 0, stream,
batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count, input, output);
}
}
} // namespace
/**
* reoder the sequence of data after all2all operation in forward propagation
* @param batch_size_per_gpu batch size per GPU
* @param slot_num the number of localized slots
* @param embedding_vec_size embedding vector size.
* @param src_tensors the source tensors before reorder
* @param dst_tensors the destination tensors after reorder
* @param device_resources all gpus device resources.
* @param context gpu device context, for switching device.
*/
template <typename TypeEmbeddingComp>
void SparseEmbeddingFunctors::forward_reorder(size_t batch_size_per_gpu, size_t slot_num,
size_t embedding_vec_size,
const Tensors2<TypeEmbeddingComp> &src_tensors,
Tensors2<TypeEmbeddingComp> &dst_tensors,
const ResourceManager &resource_manager) {
CudaDeviceContext context;
size_t local_gpu_count = resource_manager.get_local_gpu_count();
size_t total_gpu_count = resource_manager.get_global_gpu_count();
forward_reorder<TypeEmbeddingComp>(batch_size_per_gpu, slot_num, embedding_vec_size,
total_gpu_count, src_tensors, dst_tensors, resource_manager);
}
template void SparseEmbeddingFunctors::forward_reorder<float>(
size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
const Tensors2<float> &src_tensors, Tensors2<float> &dst_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::forward_reorder<__half>(
size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
const Tensors2<__half> &src_tensors, Tensors2<__half> &dst_tensors,
const ResourceManager &resource_manager);
template <typename TypeEmbeddingComp>
void SparseEmbeddingFunctors::forward_reorder(size_t batch_size_per_gpu, size_t slot_num,
size_t embedding_vec_size, size_t total_gpu_count,
const Tensors2<TypeEmbeddingComp> &src_tensors,
Tensors2<TypeEmbeddingComp> &dst_tensors,
const ResourceManager &resource_manager) {
CudaDeviceContext context;
size_t local_gpu_count = resource_manager.get_local_gpu_count();
for (size_t id = 0; id < local_gpu_count; id++) {
const auto &local_gpu = resource_manager.get_local_gpu(id);
context.set_device(local_gpu->get_device_id());
do_forward_reorder(batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count,
src_tensors[id].get_ptr(), dst_tensors[id].get_ptr(),
local_gpu->get_stream());
}
}
template void SparseEmbeddingFunctors::forward_reorder<float>(
size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size, size_t total_gpu_count,
const Tensors2<float> &src_tensors, Tensors2<float> &dst_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::forward_reorder<__half>(
size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size, size_t total_gpu_count,
const Tensors2<__half> &src_tensors, Tensors2<__half> &dst_tensors,
const ResourceManager &resource_manager);
} // namespace HugeCTR
| 93c9baf1e35bd09d049ccba6fd049cf72a42ca38.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp"
#include "HugeCTR/include/utils.hpp"
namespace HugeCTR {
namespace {
// reorder operation after all2all in forward propagation
template <typename TypeEmbeddingComp>
__global__ void forward_reorder_kernel(int batch_size_per_gpu, int slot_num, int embedding_vec_size,
int gpu_num, const TypeEmbeddingComp *input,
TypeEmbeddingComp *output) {
// blockDim.x = embedding_vec_size; // each thread corresponding to one element of embedding
// vector gridDim.x = batch_size / gpu_num = samples_per_gpu; // each block corresponding to one
// sample on each GPU Each thread needs to process slot_num slots
int tid = threadIdx.x;
int bid = blockIdx.x;
int sample_id = bid; // sample_id on the current GPU
if ((bid < batch_size_per_gpu) && (tid < embedding_vec_size)) {
int dst_offset =
sample_id * slot_num * embedding_vec_size; // offset for the first slot of one sample
int dst_stride = embedding_vec_size; // stride from slot to slot
for (int slot_id = 0; slot_id < slot_num; slot_id++) {
int gpu_id = slot_id % gpu_num;
int offset_pre = 0; // offset in previous gpus
for (int id = 0; id < gpu_id; id++) {
int slot_num_per_gpu = slot_num / gpu_num + ((id < (slot_num % gpu_num)) ? 1 : 0);
int stride = batch_size_per_gpu * slot_num_per_gpu;
offset_pre += stride;
}
int slot_num_per_gpu = slot_num / gpu_num + ((gpu_id < (slot_num % gpu_num)) ? 1 : 0);
int offset_cur = sample_id * slot_num_per_gpu; // offset in current gpu
int src_addr = (offset_cur + offset_pre + (int)(slot_id / gpu_num)) * embedding_vec_size;
int dst_addr = dst_offset + dst_stride * slot_id;
output[dst_addr + tid] = input[src_addr + tid];
}
}
}
// reorder operation after all2all in forward propagation
__global__ void forward_reorder_align2_kernel(int batch_size_per_gpu, int slot_num,
int embedding_vec_size, int gpu_num,
const __half *input, __half *output) {
// blockDim.x = embedding_vec_size; // each thread corresponding to one element of embedding
// vector gridDim.x = batch_size / gpu_num = samples_per_gpu; // each block corresponding to one
// sample on each GPU Each thread needs to process slot_num slots
int tid = threadIdx.x;
int bid = blockIdx.x;
int sample_id = bid; // sample_id on the current GPU
if ((bid < batch_size_per_gpu) && (tid < embedding_vec_size)) {
const __half2 *input2 = reinterpret_cast<const __half2 *>(input);
__half2 *output2 = reinterpret_cast<__half2 *>(output);
int dst_offset =
sample_id * slot_num * embedding_vec_size; // offset for the first slot of one sample
int dst_stride = embedding_vec_size; // stride from slot to slot
for (int slot_id = 0; slot_id < slot_num; slot_id++) {
int gpu_id = slot_id % gpu_num;
int offset_pre = 0; // offset in previous gpus
for (int id = 0; id < gpu_id; id++) {
int slot_num_per_gpu = slot_num / gpu_num + ((id < (slot_num % gpu_num)) ? 1 : 0);
int stride = batch_size_per_gpu * slot_num_per_gpu;
offset_pre += stride;
}
int slot_num_per_gpu = slot_num / gpu_num + ((gpu_id < (slot_num % gpu_num)) ? 1 : 0);
int offset_cur = sample_id * slot_num_per_gpu; // offset in current gpu
int src_addr = (offset_cur + offset_pre + (int)(slot_id / gpu_num)) * embedding_vec_size;
int dst_addr = dst_offset + dst_stride * slot_id;
output2[dst_addr + tid] = input2[src_addr + tid];
}
}
}
template <typename TypeEmbeddingComp>
void do_forward_reorder(size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
size_t total_gpu_count, const TypeEmbeddingComp *input,
TypeEmbeddingComp *output, cudaStream_t stream) {
const size_t grid_size = batch_size_per_gpu;
const size_t block_size = embedding_vec_size;
forward_reorder_kernel<<<grid_size, block_size, 0, stream>>>(
batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count, input, output);
}
void do_forward_reorder(size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
size_t total_gpu_count, const __half *input, __half *output,
cudaStream_t stream) {
const size_t grid_size = batch_size_per_gpu;
if (embedding_vec_size % 2 == 0) {
const size_t block_size = embedding_vec_size / 2;
forward_reorder_align2_kernel<<<grid_size, block_size, 0, stream>>>(
batch_size_per_gpu, slot_num, embedding_vec_size / 2, total_gpu_count, input, output);
} else {
const size_t block_size = embedding_vec_size;
forward_reorder_kernel<<<grid_size, block_size, 0, stream>>>(
batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count, input, output);
}
}
} // namespace
/**
* reoder the sequence of data after all2all operation in forward propagation
* @param batch_size_per_gpu batch size per GPU
* @param slot_num the number of localized slots
* @param embedding_vec_size embedding vector size.
* @param src_tensors the source tensors before reorder
* @param dst_tensors the destination tensors after reorder
* @param device_resources all gpus device resources.
* @param context gpu device context, for switching device.
*/
template <typename TypeEmbeddingComp>
void SparseEmbeddingFunctors::forward_reorder(size_t batch_size_per_gpu, size_t slot_num,
size_t embedding_vec_size,
const Tensors2<TypeEmbeddingComp> &src_tensors,
Tensors2<TypeEmbeddingComp> &dst_tensors,
const ResourceManager &resource_manager) {
CudaDeviceContext context;
size_t local_gpu_count = resource_manager.get_local_gpu_count();
size_t total_gpu_count = resource_manager.get_global_gpu_count();
forward_reorder<TypeEmbeddingComp>(batch_size_per_gpu, slot_num, embedding_vec_size,
total_gpu_count, src_tensors, dst_tensors, resource_manager);
}
template void SparseEmbeddingFunctors::forward_reorder<float>(
size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
const Tensors2<float> &src_tensors, Tensors2<float> &dst_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::forward_reorder<__half>(
size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
const Tensors2<__half> &src_tensors, Tensors2<__half> &dst_tensors,
const ResourceManager &resource_manager);
template <typename TypeEmbeddingComp>
void SparseEmbeddingFunctors::forward_reorder(size_t batch_size_per_gpu, size_t slot_num,
size_t embedding_vec_size, size_t total_gpu_count,
const Tensors2<TypeEmbeddingComp> &src_tensors,
Tensors2<TypeEmbeddingComp> &dst_tensors,
const ResourceManager &resource_manager) {
CudaDeviceContext context;
size_t local_gpu_count = resource_manager.get_local_gpu_count();
for (size_t id = 0; id < local_gpu_count; id++) {
const auto &local_gpu = resource_manager.get_local_gpu(id);
context.set_device(local_gpu->get_device_id());
do_forward_reorder(batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count,
src_tensors[id].get_ptr(), dst_tensors[id].get_ptr(),
local_gpu->get_stream());
}
}
template void SparseEmbeddingFunctors::forward_reorder<float>(
size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size, size_t total_gpu_count,
const Tensors2<float> &src_tensors, Tensors2<float> &dst_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::forward_reorder<__half>(
size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size, size_t total_gpu_count,
const Tensors2<__half> &src_tensors, Tensors2<__half> &dst_tensors,
const ResourceManager &resource_manager);
} // namespace HugeCTR
|
66d59e28ce6cbd6b3aa2fefba0c686630cb9b0e7.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "kernel.h"
#include "bboxUtils.h"
pluginStatus_t detectionInference(
hipStream_t stream,
const int N,
const int C1,
const int C2,
const bool shareLocation,
const bool varianceEncodedInTarget,
const int backgroundLabelId,
const int numPredsPerClass,
const int numClasses,
const int topK,
const int keepTopK,
const float confidenceThreshold,
const float nmsThreshold,
const CodeTypeSSD codeType,
const DataType DT_BBOX,
const void* locData,
const void* priorData,
const DataType DT_SCORE,
const void* confData,
void* keepCount,
void* topDetections,
void* workspace,
bool isNormalized,
bool confSigmoid,
int scoreBits,
const bool isBatchAgnostic)
{
// Batch size * number bbox per sample * 4 = total number of bounding boxes * 4
const int locCount = N * C1;
// Do not clip the bounding box that goes outside the image
const bool clipBBox = false;
/*
* shareLocation
* Bounding box are shared among all classes, i.e., a bounding box could be classified as any candidate class.
* Otherwise
* Bounding box are designed for specific classes, i.e., a bounding box could be classified as one certain class or not (binary classification).
*/
const int numLocClasses = shareLocation ? 1 : numClasses;
size_t bboxDataSize = detectionForwardBBoxDataSize(N, C1, DT_BBOX);
void* bboxDataRaw = workspace;
pluginStatus_t status = decodeBBoxes(stream,
locCount,
codeType,
varianceEncodedInTarget,
numPredsPerClass,
shareLocation,
numLocClasses,
backgroundLabelId,
clipBBox,
DT_BBOX,
locData,
priorData,
bboxDataRaw,
isBatchAgnostic);
ASSERT_FAILURE(status == STATUS_SUCCESS);
/*
* bboxDataRaw format:
* [batch size, numPriors (per sample), numLocClasses, 4]
*/
// float for now
void* bboxData;
size_t bboxPermuteSize = detectionForwardBBoxPermuteSize(shareLocation, N, C1, DT_BBOX);
void* bboxPermute = nextWorkspacePtr((int8_t*) bboxDataRaw, bboxDataSize);
/*
* After permutation, bboxData format:
* [batch_size, numLocClasses, numPriors (per sample) (numPredsPerClass), 4]
* This is equivalent to swapping axis
*/
if (!shareLocation)
{
status = permuteData(stream,
locCount,
numLocClasses,
numPredsPerClass,
4,
DT_BBOX,
false,
bboxDataRaw,
bboxPermute);
ASSERT_FAILURE(status == STATUS_SUCCESS);
bboxData = bboxPermute;
}
/*
* If shareLocation, numLocClasses = 1
* No need to permute data on linear memory
*/
else
{
bboxData = bboxDataRaw;
}
/*
* Conf data format
* [batch size, numPriors * param.numClasses, 1, 1]
*/
const int numScores = N * C2;
size_t scoresSize = detectionForwardPreNMSSize(N, C2);
if (DT_SCORE == DataType::kHALF) scoresSize /= 2;
void* scores = nextWorkspacePtr((int8_t*) bboxPermute, bboxPermuteSize);
// need a conf_scores
/*
* After permutation, confData format:
* [batch_size, numClasses, numPredsPerClass, 1]
*/
status = permuteData(stream,
numScores,
numClasses,
numPredsPerClass,
1,
DT_SCORE,
confSigmoid,
confData,
scores);
ASSERT_FAILURE(status == STATUS_SUCCESS);
size_t indicesSize = detectionForwardPreNMSSize(N, C2);
void* indices = nextWorkspacePtr((int8_t*) scores, scoresSize);
size_t postNMSScoresSize = detectionForwardPostNMSSize(N, numClasses, topK);
if (DT_SCORE == DataType::kHALF) postNMSScoresSize /= 2;
size_t postNMSIndicesSize = detectionForwardPostNMSSize(N, numClasses, topK);
void* postNMSScores = nextWorkspacePtr((int8_t*) indices, indicesSize);
void* postNMSIndices = nextWorkspacePtr((int8_t*) postNMSScores, postNMSScoresSize);
//size_t sortingWorkspaceSize = sortScoresPerClassWorkspaceSize(N, numClasses, numPredsPerClass, FLOAT32);
void* sortingWorkspace = nextWorkspacePtr((int8_t*) postNMSIndices, postNMSIndicesSize);
// Sort the scores so that the following NMS could be applied.
float scoreShift = 0.f;
if(DT_SCORE == DataType::kHALF && scoreBits > 0 && scoreBits <= 10)
scoreShift = 1.f;
status = sortScoresPerClass(stream,
N,
numClasses,
numPredsPerClass,
backgroundLabelId,
confidenceThreshold,
DT_SCORE,
scores,
indices,
sortingWorkspace,
scoreBits,
scoreShift);
ASSERT_FAILURE(status == STATUS_SUCCESS);
// NMS
status = allClassNMS(stream,
N,
numClasses,
numPredsPerClass,
topK,
nmsThreshold,
shareLocation,
isNormalized,
DT_SCORE,
DT_BBOX,
bboxData,
scores,
indices,
postNMSScores,
postNMSIndices,
false,
scoreShift);
ASSERT_FAILURE(status == STATUS_SUCCESS);
// Sort the bounding boxes after NMS using scores
status = sortScoresPerImage(stream,
N,
numClasses * topK,
DT_SCORE,
postNMSScores,
postNMSIndices,
scores,
indices,
sortingWorkspace,
scoreBits);
ASSERT_FAILURE(status == STATUS_SUCCESS);
// Gather data from the sorted bounding boxes after NMS
status = gatherTopDetections(stream,
shareLocation,
N,
numPredsPerClass,
numClasses,
topK,
keepTopK,
DT_BBOX,
DT_SCORE,
indices,
scores,
bboxData,
keepCount,
topDetections,
scoreShift);
ASSERT_FAILURE(status == STATUS_SUCCESS);
return STATUS_SUCCESS;
}
namespace nvinfer1
{
namespace plugin
{
pluginStatus_t detectionInference(
hipStream_t stream,
const int N,
const int C1,
const int C2,
const bool shareLocation,
const bool varianceEncodedInTarget,
const int backgroundLabelId,
const int numPredsPerClass,
const int numClasses,
const int topK,
const int keepTopK,
const float confidenceThreshold,
const float nmsThreshold,
const CodeTypeSSD codeType,
const DataType DT_BBOX,
const void* locData,
const void* priorData,
const DataType DT_SCORE,
const void* confData,
void* keepCount,
void* topDetections,
void* workspace,
bool isNormalized,
bool confSigmoid,
int scoreBits,
const bool isBatchAgnostic)
{
// Batch size * number bbox per sample * 4 = total number of bounding boxes * 4
const int locCount = N * C1;
// Do not clip the bounding box that goes outside the image
const bool clipBBox = false;
/*
* shareLocation
* Bounding box are shared among all classes, i.e., a bounding box could be classified as any candidate class.
* Otherwise
* Bounding box are designed for specific classes, i.e., a bounding box could be classified as one certain class or not (binary classification).
*/
const int numLocClasses = shareLocation ? 1 : numClasses;
size_t bboxDataSize = detectionForwardBBoxDataSize(N, C1, DT_BBOX);
void* bboxDataRaw = workspace;
pluginStatus_t status = decodeBBoxes(stream,
locCount,
codeType,
varianceEncodedInTarget,
numPredsPerClass,
shareLocation,
numLocClasses,
backgroundLabelId,
clipBBox,
DT_BBOX,
locData,
priorData,
bboxDataRaw,
isBatchAgnostic);
ASSERT_FAILURE(status == STATUS_SUCCESS);
/*
* bboxDataRaw format:
* [batch size, numPriors (per sample), numLocClasses, 4]
*/
// float for now
void* bboxData;
size_t bboxPermuteSize = detectionForwardBBoxPermuteSize(shareLocation, N, C1, DT_BBOX);
void* bboxPermute = nextWorkspacePtr((int8_t*) bboxDataRaw, bboxDataSize);
/*
* After permutation, bboxData format:
* [batch_size, numLocClasses, numPriors (per sample) (numPredsPerClass), 4]
* This is equivalent to swapping axis
*/
if (!shareLocation)
{
status = permuteData(stream,
locCount,
numLocClasses,
numPredsPerClass,
4,
DT_BBOX,
false,
bboxDataRaw,
bboxPermute);
ASSERT_FAILURE(status == STATUS_SUCCESS);
bboxData = bboxPermute;
}
/*
* If shareLocation, numLocClasses = 1
* No need to permute data on linear memory
*/
else
{
bboxData = bboxDataRaw;
}
/*
* Conf data format
* [batch size, numPriors * param.numClasses, 1, 1]
*/
const int numScores = N * C2;
size_t scoresSize = detectionForwardPreNMSSize(N, C2);
if (DT_SCORE == DataType::kHALF) scoresSize /= 2;
void* scores = nextWorkspacePtr((int8_t*) bboxPermute, bboxPermuteSize);
// need a conf_scores
/*
* After permutation, confData format:
* [batch_size, numClasses, numPredsPerClass, 1]
*/
status = permuteData(stream,
numScores,
numClasses,
numPredsPerClass,
1,
DT_SCORE,
confSigmoid,
confData,
scores);
ASSERT_FAILURE(status == STATUS_SUCCESS);
size_t indicesSize = detectionForwardPreNMSSize(N, C2);
void* indices = nextWorkspacePtr((int8_t*) scores, scoresSize);
size_t postNMSScoresSize = detectionForwardPostNMSSize(N, numClasses, topK);
if (DT_SCORE == DataType::kHALF) postNMSScoresSize /= 2;
size_t postNMSIndicesSize = detectionForwardPostNMSSize(N, numClasses, topK);
void* postNMSScores = nextWorkspacePtr((int8_t*) indices, indicesSize);
void* postNMSIndices = nextWorkspacePtr((int8_t*) postNMSScores, postNMSScoresSize);
//size_t sortingWorkspaceSize = sortScoresPerClassWorkspaceSize(N, numClasses, numPredsPerClass, FLOAT32);
void* sortingWorkspace = nextWorkspacePtr((int8_t*) postNMSIndices, postNMSIndicesSize);
// Sort the scores so that the following NMS could be applied.
float scoreShift = 0.f;
if(DT_SCORE == DataType::kHALF && scoreBits > 0 && scoreBits <= 10)
scoreShift = 1.f;
status = sortScoresPerClass(stream,
N,
numClasses,
numPredsPerClass,
backgroundLabelId,
confidenceThreshold,
DT_SCORE,
scores,
indices,
sortingWorkspace,
scoreBits,
scoreShift);
ASSERT_FAILURE(status == STATUS_SUCCESS);
// NMS
status = allClassNMS(stream,
N,
numClasses,
numPredsPerClass,
topK,
nmsThreshold,
shareLocation,
isNormalized,
DT_SCORE,
DT_BBOX,
bboxData,
scores,
indices,
postNMSScores,
postNMSIndices,
false,
scoreShift);
ASSERT_FAILURE(status == STATUS_SUCCESS);
// Sort the bounding boxes after NMS using scores
status = sortScoresPerImage(stream,
N,
numClasses * topK,
DT_SCORE,
postNMSScores,
postNMSIndices,
scores,
indices,
sortingWorkspace,
scoreBits);
ASSERT_FAILURE(status == STATUS_SUCCESS);
// Gather data from the sorted bounding boxes after NMS
status = gatherTopDetections(stream,
shareLocation,
N,
numPredsPerClass,
numClasses,
topK,
keepTopK,
DT_BBOX,
DT_SCORE,
indices,
scores,
bboxData,
keepCount,
topDetections,
scoreShift);
ASSERT_FAILURE(status == STATUS_SUCCESS);
return STATUS_SUCCESS;
}
} // namespace plugin
} // namespace nvinfer1
| 66d59e28ce6cbd6b3aa2fefba0c686630cb9b0e7.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "kernel.h"
#include "bboxUtils.h"
pluginStatus_t detectionInference(
cudaStream_t stream,
const int N,
const int C1,
const int C2,
const bool shareLocation,
const bool varianceEncodedInTarget,
const int backgroundLabelId,
const int numPredsPerClass,
const int numClasses,
const int topK,
const int keepTopK,
const float confidenceThreshold,
const float nmsThreshold,
const CodeTypeSSD codeType,
const DataType DT_BBOX,
const void* locData,
const void* priorData,
const DataType DT_SCORE,
const void* confData,
void* keepCount,
void* topDetections,
void* workspace,
bool isNormalized,
bool confSigmoid,
int scoreBits,
const bool isBatchAgnostic)
{
// Batch size * number bbox per sample * 4 = total number of bounding boxes * 4
const int locCount = N * C1;
// Do not clip the bounding box that goes outside the image
const bool clipBBox = false;
/*
* shareLocation
* Bounding box are shared among all classes, i.e., a bounding box could be classified as any candidate class.
* Otherwise
* Bounding box are designed for specific classes, i.e., a bounding box could be classified as one certain class or not (binary classification).
*/
const int numLocClasses = shareLocation ? 1 : numClasses;
size_t bboxDataSize = detectionForwardBBoxDataSize(N, C1, DT_BBOX);
void* bboxDataRaw = workspace;
pluginStatus_t status = decodeBBoxes(stream,
locCount,
codeType,
varianceEncodedInTarget,
numPredsPerClass,
shareLocation,
numLocClasses,
backgroundLabelId,
clipBBox,
DT_BBOX,
locData,
priorData,
bboxDataRaw,
isBatchAgnostic);
ASSERT_FAILURE(status == STATUS_SUCCESS);
/*
* bboxDataRaw format:
* [batch size, numPriors (per sample), numLocClasses, 4]
*/
// float for now
void* bboxData;
size_t bboxPermuteSize = detectionForwardBBoxPermuteSize(shareLocation, N, C1, DT_BBOX);
void* bboxPermute = nextWorkspacePtr((int8_t*) bboxDataRaw, bboxDataSize);
/*
* After permutation, bboxData format:
* [batch_size, numLocClasses, numPriors (per sample) (numPredsPerClass), 4]
* This is equivalent to swapping axis
*/
if (!shareLocation)
{
status = permuteData(stream,
locCount,
numLocClasses,
numPredsPerClass,
4,
DT_BBOX,
false,
bboxDataRaw,
bboxPermute);
ASSERT_FAILURE(status == STATUS_SUCCESS);
bboxData = bboxPermute;
}
/*
* If shareLocation, numLocClasses = 1
* No need to permute data on linear memory
*/
else
{
bboxData = bboxDataRaw;
}
/*
* Conf data format
* [batch size, numPriors * param.numClasses, 1, 1]
*/
const int numScores = N * C2;
size_t scoresSize = detectionForwardPreNMSSize(N, C2);
if (DT_SCORE == DataType::kHALF) scoresSize /= 2;
void* scores = nextWorkspacePtr((int8_t*) bboxPermute, bboxPermuteSize);
// need a conf_scores
/*
* After permutation, confData format:
* [batch_size, numClasses, numPredsPerClass, 1]
*/
status = permuteData(stream,
numScores,
numClasses,
numPredsPerClass,
1,
DT_SCORE,
confSigmoid,
confData,
scores);
ASSERT_FAILURE(status == STATUS_SUCCESS);
size_t indicesSize = detectionForwardPreNMSSize(N, C2);
void* indices = nextWorkspacePtr((int8_t*) scores, scoresSize);
size_t postNMSScoresSize = detectionForwardPostNMSSize(N, numClasses, topK);
if (DT_SCORE == DataType::kHALF) postNMSScoresSize /= 2;
size_t postNMSIndicesSize = detectionForwardPostNMSSize(N, numClasses, topK);
void* postNMSScores = nextWorkspacePtr((int8_t*) indices, indicesSize);
void* postNMSIndices = nextWorkspacePtr((int8_t*) postNMSScores, postNMSScoresSize);
//size_t sortingWorkspaceSize = sortScoresPerClassWorkspaceSize(N, numClasses, numPredsPerClass, FLOAT32);
void* sortingWorkspace = nextWorkspacePtr((int8_t*) postNMSIndices, postNMSIndicesSize);
// Sort the scores so that the following NMS could be applied.
float scoreShift = 0.f;
if(DT_SCORE == DataType::kHALF && scoreBits > 0 && scoreBits <= 10)
scoreShift = 1.f;
status = sortScoresPerClass(stream,
N,
numClasses,
numPredsPerClass,
backgroundLabelId,
confidenceThreshold,
DT_SCORE,
scores,
indices,
sortingWorkspace,
scoreBits,
scoreShift);
ASSERT_FAILURE(status == STATUS_SUCCESS);
// NMS
status = allClassNMS(stream,
N,
numClasses,
numPredsPerClass,
topK,
nmsThreshold,
shareLocation,
isNormalized,
DT_SCORE,
DT_BBOX,
bboxData,
scores,
indices,
postNMSScores,
postNMSIndices,
false,
scoreShift);
ASSERT_FAILURE(status == STATUS_SUCCESS);
// Sort the bounding boxes after NMS using scores
status = sortScoresPerImage(stream,
N,
numClasses * topK,
DT_SCORE,
postNMSScores,
postNMSIndices,
scores,
indices,
sortingWorkspace,
scoreBits);
ASSERT_FAILURE(status == STATUS_SUCCESS);
// Gather data from the sorted bounding boxes after NMS
status = gatherTopDetections(stream,
shareLocation,
N,
numPredsPerClass,
numClasses,
topK,
keepTopK,
DT_BBOX,
DT_SCORE,
indices,
scores,
bboxData,
keepCount,
topDetections,
scoreShift);
ASSERT_FAILURE(status == STATUS_SUCCESS);
return STATUS_SUCCESS;
}
namespace nvinfer1
{
namespace plugin
{
pluginStatus_t detectionInference(
cudaStream_t stream,
const int N,
const int C1,
const int C2,
const bool shareLocation,
const bool varianceEncodedInTarget,
const int backgroundLabelId,
const int numPredsPerClass,
const int numClasses,
const int topK,
const int keepTopK,
const float confidenceThreshold,
const float nmsThreshold,
const CodeTypeSSD codeType,
const DataType DT_BBOX,
const void* locData,
const void* priorData,
const DataType DT_SCORE,
const void* confData,
void* keepCount,
void* topDetections,
void* workspace,
bool isNormalized,
bool confSigmoid,
int scoreBits,
const bool isBatchAgnostic)
{
// Batch size * number bbox per sample * 4 = total number of bounding boxes * 4
const int locCount = N * C1;
// Do not clip the bounding box that goes outside the image
const bool clipBBox = false;
/*
* shareLocation
* Bounding box are shared among all classes, i.e., a bounding box could be classified as any candidate class.
* Otherwise
* Bounding box are designed for specific classes, i.e., a bounding box could be classified as one certain class or not (binary classification).
*/
const int numLocClasses = shareLocation ? 1 : numClasses;
size_t bboxDataSize = detectionForwardBBoxDataSize(N, C1, DT_BBOX);
void* bboxDataRaw = workspace;
pluginStatus_t status = decodeBBoxes(stream,
locCount,
codeType,
varianceEncodedInTarget,
numPredsPerClass,
shareLocation,
numLocClasses,
backgroundLabelId,
clipBBox,
DT_BBOX,
locData,
priorData,
bboxDataRaw,
isBatchAgnostic);
ASSERT_FAILURE(status == STATUS_SUCCESS);
/*
* bboxDataRaw format:
* [batch size, numPriors (per sample), numLocClasses, 4]
*/
// float for now
void* bboxData;
size_t bboxPermuteSize = detectionForwardBBoxPermuteSize(shareLocation, N, C1, DT_BBOX);
void* bboxPermute = nextWorkspacePtr((int8_t*) bboxDataRaw, bboxDataSize);
/*
* After permutation, bboxData format:
* [batch_size, numLocClasses, numPriors (per sample) (numPredsPerClass), 4]
* This is equivalent to swapping axis
*/
if (!shareLocation)
{
status = permuteData(stream,
locCount,
numLocClasses,
numPredsPerClass,
4,
DT_BBOX,
false,
bboxDataRaw,
bboxPermute);
ASSERT_FAILURE(status == STATUS_SUCCESS);
bboxData = bboxPermute;
}
/*
* If shareLocation, numLocClasses = 1
* No need to permute data on linear memory
*/
else
{
bboxData = bboxDataRaw;
}
/*
* Conf data format
* [batch size, numPriors * param.numClasses, 1, 1]
*/
const int numScores = N * C2;
size_t scoresSize = detectionForwardPreNMSSize(N, C2);
if (DT_SCORE == DataType::kHALF) scoresSize /= 2;
void* scores = nextWorkspacePtr((int8_t*) bboxPermute, bboxPermuteSize);
// need a conf_scores
/*
* After permutation, confData format:
* [batch_size, numClasses, numPredsPerClass, 1]
*/
status = permuteData(stream,
numScores,
numClasses,
numPredsPerClass,
1,
DT_SCORE,
confSigmoid,
confData,
scores);
ASSERT_FAILURE(status == STATUS_SUCCESS);
size_t indicesSize = detectionForwardPreNMSSize(N, C2);
void* indices = nextWorkspacePtr((int8_t*) scores, scoresSize);
size_t postNMSScoresSize = detectionForwardPostNMSSize(N, numClasses, topK);
if (DT_SCORE == DataType::kHALF) postNMSScoresSize /= 2;
size_t postNMSIndicesSize = detectionForwardPostNMSSize(N, numClasses, topK);
void* postNMSScores = nextWorkspacePtr((int8_t*) indices, indicesSize);
void* postNMSIndices = nextWorkspacePtr((int8_t*) postNMSScores, postNMSScoresSize);
//size_t sortingWorkspaceSize = sortScoresPerClassWorkspaceSize(N, numClasses, numPredsPerClass, FLOAT32);
void* sortingWorkspace = nextWorkspacePtr((int8_t*) postNMSIndices, postNMSIndicesSize);
// Sort the scores so that the following NMS could be applied.
float scoreShift = 0.f;
if(DT_SCORE == DataType::kHALF && scoreBits > 0 && scoreBits <= 10)
scoreShift = 1.f;
status = sortScoresPerClass(stream,
N,
numClasses,
numPredsPerClass,
backgroundLabelId,
confidenceThreshold,
DT_SCORE,
scores,
indices,
sortingWorkspace,
scoreBits,
scoreShift);
ASSERT_FAILURE(status == STATUS_SUCCESS);
// NMS
status = allClassNMS(stream,
N,
numClasses,
numPredsPerClass,
topK,
nmsThreshold,
shareLocation,
isNormalized,
DT_SCORE,
DT_BBOX,
bboxData,
scores,
indices,
postNMSScores,
postNMSIndices,
false,
scoreShift);
ASSERT_FAILURE(status == STATUS_SUCCESS);
// Sort the bounding boxes after NMS using scores
status = sortScoresPerImage(stream,
N,
numClasses * topK,
DT_SCORE,
postNMSScores,
postNMSIndices,
scores,
indices,
sortingWorkspace,
scoreBits);
ASSERT_FAILURE(status == STATUS_SUCCESS);
// Gather data from the sorted bounding boxes after NMS
status = gatherTopDetections(stream,
shareLocation,
N,
numPredsPerClass,
numClasses,
topK,
keepTopK,
DT_BBOX,
DT_SCORE,
indices,
scores,
bboxData,
keepCount,
topDetections,
scoreShift);
ASSERT_FAILURE(status == STATUS_SUCCESS);
return STATUS_SUCCESS;
}
} // namespace plugin
} // namespace nvinfer1
|
a74b29b7a01d5325e79edef2feb3af66921e2d51.hip | // !!! This is a file automatically generated by hipify!!!
#include "gpu_utils.h"
#include "cuda_c_mmul_shared.h"
#include "bio_utils.h"
#include <string>
#include <hip/hip_runtime.h>
int main() {
/////////////////////
// Encoding Matrix //
/////////////////////
// TODO: Current all dimensions of inputs must be multiple of 32
// Need to add bounds checking to prevent segfaults in the kernel
// Parameters
const int k = 64;
const int NUM_READS = 10;
const int READ_BATCH_SIZE = 2;
const int MAX_READ_LEN = 100;
//unsigned char *data_type;
// Choose device
hipSetDevice(1);
// Training matrix size
const long T_cols = 1000000;
// Example data
std::string r1 = "TNGGCAGCCCGCCCACGTACAGATGTTGGCGGTGAGCGCTGCGCCTTTACCGGCCCGGCCGGGCATGCTGCGGGTGTGGTGGACGGCGGTCCGGC"; //CGCGC";
std::string r2 = "TNGGCAGCCCGCCCACGTACAGATGTTGGCGGTGAGCGCTGCGCCTTTACCGGCCCGGCCGGGCATGCTGCGGGTGTGGTGGACGGCGGTCCGGC"; //CGCGC";
std::string nucs = "ACGT";
replaceAmbigs( r1 );
replaceAmbigs( r2 );
// Generate example unsigned char encodings
// TODO: Replace with some kind of automated calculation or upper bound on read length
int kmer_count = (r1.length() - k + 1) + (r2.length() - k + 1);
std::cout << kmer_count << std::endl;
unsigned char *F1, *F2, *d_F1, *d_F2;
// Pinned memory for streaming; will have to account for dynamic size at some point
HANDLE_ERROR( hipHostMalloc( (void**)&F1, k * kmer_count * sizeof(unsigned char), hipHostMallocDefault ) );
HANDLE_ERROR( hipHostMalloc( (void**)&F2, k * kmer_count * sizeof(unsigned char), hipHostMallocDefault ) );
// Device memory
HANDLE_ERROR( hipMalloc( (void**)&d_F1, k * kmer_count * sizeof(unsigned char) ) );
HANDLE_ERROR( hipMalloc( (void**)&d_F2, k * kmer_count * sizeof(unsigned char) ) );
////////////////////
// Other Matrices //
////////////////////
// Allocate memory on host
unsigned char *T, *R1, *R2, *d_T, *d_R1, *d_R2;
T = (unsigned char*)std::malloc(k * T_cols * sizeof(unsigned char));
HANDLE_ERROR( hipHostMalloc( (void**)&R1, kmer_count * T_cols * sizeof(unsigned char), hipHostMallocDefault ) );
HANDLE_ERROR( hipHostMalloc( (void**)&R2, kmer_count * T_cols * sizeof(unsigned char), hipHostMallocDefault ) );
// Device alloc
HANDLE_ERROR( hipMalloc( &d_T, k * T_cols * sizeof(unsigned char) ) );
HANDLE_ERROR( hipMalloc( &d_R1, kmer_count * T_cols * sizeof(unsigned char) ) );
HANDLE_ERROR( hipMalloc( &d_R2, kmer_count * T_cols * sizeof(unsigned char) ) );
// Fill training matrix randomly
int idx;
for(unsigned long i = 0; i < k * T_cols; ++i) {
idx = std::rand() % 4;
T[i] = reinterpret_cast<unsigned char&>(nucs[idx]);
//T[i] = 1;
}
// Copy over the training matrix, where it will remain
HANDLE_ERROR( hipMemcpy(d_T, T, k * T_cols * sizeof(unsigned char), hipMemcpyHostToDevice) );
// We don't need T on host anymore
std::free(T);
/* Usually the below would be something like this:
* int N_THREADS_PER_BLOCK = 256; (or 16x16, 32x8 for 2D)
* int N_BLOCKS_PER_GRID = (N + threadsPerBlock - 1) / threadsPerBlock; for 1D
*/
std::cout << "Input dimensions:" << std::endl;
std::cout << "Number of total elements in feature arrays: " << 2 * k * kmer_count;
std::cout << ", with size " << (double)(2 * k * kmer_count * sizeof(unsigned char)) / 1000000 << " MB" << std::endl;
std::cout << "Number of elements in training array: " << k * T_cols;
std::cout << ", with size " << (double)(k * T_cols * sizeof(unsigned char)) / 1000000 << " MB" << std::endl;
std::cout << "Number of elements in result array: " << kmer_count * T_cols;
std::cout << ", with size " << (double)(kmer_count * T_cols * sizeof(unsigned char)) / 1000000 << " MB" << std::endl;
dim3 dimBlock(BLOCK_SIZE, 4); // Based on comments on StackOverflow for 2D threads
dim3 dimGrid((T_cols + BLOCK_SIZE - 1) / (BLOCK_SIZE * 4), (kmer_count + BLOCK_SIZE - 1) / BLOCK_SIZE);
std::cout << std::endl << "Grid/Block setup:" << std::endl;
std::cout << dimGrid.x << ',' << dimGrid.y << ' ' << dimBlock.x << ',' << dimBlock.y << std::endl;
//////////////////
// CUDA Streams //
//////////////////
// Keep track of training time
uint64_t start, end;
QueryPerformanceCounter(&start);
// Check stream compatability
hipDeviceProp_t prop;
int device_num;
HANDLE_ERROR( hipGetDevice( &device_num ) );
HANDLE_ERROR( hipGetDeviceProperties( &prop, device_num ) );
if(!prop.deviceOverlap)
std::cout << "Device streaming overlap disabled, therefore no speedup expected" << std::endl;
// Initialize streams and copy event markers
hipStream_t stream0, stream1;
hipEvent_t cp0, cp1;
HANDLE_ERROR( hipStreamCreate( &stream0 ) );
HANDLE_ERROR( hipStreamCreate( &stream1 ) );
// HANDLE_ERROR( hipEventCreateWithFlags( &cp0, hipEventDisableTiming | hipEventBlockingSync ) );
// HANDLE_ERROR( hipEventCreateWithFlags( &cp1, hipEventDisableTiming | hipEventBlockingSync ) );
HANDLE_ERROR( hipEventCreateWithFlags( &cp0, hipEventDisableTiming ) );
HANDLE_ERROR( hipEventCreateWithFlags( &cp1, hipEventDisableTiming ) );
// Set up pointers for zeroing memory using the driver API
hipDeviceptr_t ptr1 = (hipDeviceptr_t) d_R1;
hipDeviceptr_t ptr2 = (hipDeviceptr_t) d_R2;
for(unsigned long r = 0; r < NUM_READS; r += READ_BATCH_SIZE) {
// Fill F1 and F2 with new data
// TODO: r1 and r2 would be replaced with a reference to fqpair and an integer READ_BATCH_SIZE to feed into F1
load_kmer_array(r1, r2, F1, k);
load_kmer_array(r1, r2, F2, k);
// Enque the memory streams in breadth-first order such that
// the block scheduler launches kernels optimally
HANDLE_ERROR(hipMemcpyAsync(d_F1, F1, k * kmer_count * sizeof(unsigned char),
hipMemcpyHostToDevice, stream0));
HANDLE_ERROR(hipMemcpyAsync(d_F2, F2, k * kmer_count * sizeof(unsigned char),
hipMemcpyHostToDevice, stream1));
// Insert event markers into the stream to tell host
// when F1 and F2 are safe to overwrite in host memory
// HANDLE_ERROR( hipEventRecord( cp0, stream0 ) );
// HANDLE_ERROR( hipEventRecord( cp1, stream1 ) );
// Enque the kernel launches
hipLaunchKernelGGL(( MatHamm) , dim3(dimGrid), dim3(dimBlock), 0, stream0 , d_F1, d_T, d_R1, k, T_cols);
hipLaunchKernelGGL(( MatHamm) , dim3(dimGrid), dim3(dimBlock), 0, stream1 , d_F2, d_T, d_R2, k, T_cols);
// TODO: DtoH copy is expensive, so we shouldn't do this if we can afford it
// Enque copy back to host
// HANDLE_ERROR(hipMemcpyAsync(R1, d_R1, kmer_count * T_cols * sizeof(unsigned char),
// hipMemcpyDeviceToHost, stream0));
// HANDLE_ERROR(hipMemcpyAsync(R2, d_R2, kmer_count * T_cols * sizeof(unsigned char),
// hipMemcpyDeviceToHost, stream1));
// Block host from proceeding until copy to GPU is complete
// HANDLE_ERROR( hipEventSynchronize( cp0 ) );
// HANDLE_ERROR( hipEventSynchronize( cp1 ) );
// Clear previous values
hipMemsetD8Async( ptr1, 0, kmer_count * T_cols, stream0);
hipMemsetD8Async( ptr2, 0, kmer_count * T_cols, stream1);
}
// Synchronize to ensure work is complete
HANDLE_ERROR( hipStreamSynchronize( stream0 ) );
HANDLE_ERROR( hipStreamSynchronize( stream1 ) );
QueryPerformanceCounter(&end);
std::cout << "\nGPU pipeline took: \nTotal: " << double(end - start) / 1000000 << " sec" << std::endl;
std::cout << "Per seq pair: " << double(end - start) / 1000000 / NUM_READS / READ_BATCH_SIZE << " sec" << std::endl;
/////////////
// Cleanup //
/////////////
// Free device memory
HANDLE_ERROR( hipFree( d_F1 ) );
HANDLE_ERROR( hipFree( d_F2 ) );
HANDLE_ERROR( hipFree( d_T ) );
HANDLE_ERROR( hipFree( d_R1 ) );
HANDLE_ERROR( hipFree( d_R2 ) );
HANDLE_ERROR( hipStreamDestroy( stream0 ) );
HANDLE_ERROR( hipStreamDestroy( stream1 ) );
HANDLE_ERROR( hipEventDestroy( cp0 ) );
HANDLE_ERROR( hipEventDestroy( cp1 ) );
// Output to test results
// write_matrix(R1, 1, T_cols);
// std::cout << std::endl;
// Produce gold standard on CPU
// MatHammOnHost(F1, T, R1, kmer_count, k, k, T_cols, kmer_count, T_cols);
// write_matrix(R1, 1, T_cols);
// std::free(T);
// Free host memory here if dynamically allocated
HANDLE_ERROR( hipHostFree( F1 ) );
HANDLE_ERROR( hipHostFree( F2 ) );
HANDLE_ERROR( hipHostFree( R1 ) );
HANDLE_ERROR( hipHostFree( R2 ) );
return 0;
}
| a74b29b7a01d5325e79edef2feb3af66921e2d51.cu | #include "gpu_utils.h"
#include "cuda_c_mmul_shared.h"
#include "bio_utils.h"
#include <string>
#include <cuda.h>
int main() {
/////////////////////
// Encoding Matrix //
/////////////////////
// TODO: Current all dimensions of inputs must be multiple of 32
// Need to add bounds checking to prevent segfaults in the kernel
// Parameters
const int k = 64;
const int NUM_READS = 10;
const int READ_BATCH_SIZE = 2;
const int MAX_READ_LEN = 100;
//unsigned char *data_type;
// Choose device
cudaSetDevice(1);
// Training matrix size
const long T_cols = 1000000;
// Example data
std::string r1 = "TNGGCAGCCCGCCCACGTACAGATGTTGGCGGTGAGCGCTGCGCCTTTACCGGCCCGGCCGGGCATGCTGCGGGTGTGGTGGACGGCGGTCCGGC"; //CGCGC";
std::string r2 = "TNGGCAGCCCGCCCACGTACAGATGTTGGCGGTGAGCGCTGCGCCTTTACCGGCCCGGCCGGGCATGCTGCGGGTGTGGTGGACGGCGGTCCGGC"; //CGCGC";
std::string nucs = "ACGT";
replaceAmbigs( r1 );
replaceAmbigs( r2 );
// Generate example unsigned char encodings
// TODO: Replace with some kind of automated calculation or upper bound on read length
int kmer_count = (r1.length() - k + 1) + (r2.length() - k + 1);
std::cout << kmer_count << std::endl;
unsigned char *F1, *F2, *d_F1, *d_F2;
// Pinned memory for streaming; will have to account for dynamic size at some point
HANDLE_ERROR( cudaHostAlloc( (void**)&F1, k * kmer_count * sizeof(unsigned char), cudaHostAllocDefault ) );
HANDLE_ERROR( cudaHostAlloc( (void**)&F2, k * kmer_count * sizeof(unsigned char), cudaHostAllocDefault ) );
// Device memory
HANDLE_ERROR( cudaMalloc( (void**)&d_F1, k * kmer_count * sizeof(unsigned char) ) );
HANDLE_ERROR( cudaMalloc( (void**)&d_F2, k * kmer_count * sizeof(unsigned char) ) );
////////////////////
// Other Matrices //
////////////////////
// Allocate memory on host
unsigned char *T, *R1, *R2, *d_T, *d_R1, *d_R2;
T = (unsigned char*)std::malloc(k * T_cols * sizeof(unsigned char));
HANDLE_ERROR( cudaHostAlloc( (void**)&R1, kmer_count * T_cols * sizeof(unsigned char), cudaHostAllocDefault ) );
HANDLE_ERROR( cudaHostAlloc( (void**)&R2, kmer_count * T_cols * sizeof(unsigned char), cudaHostAllocDefault ) );
// Device alloc
HANDLE_ERROR( cudaMalloc( &d_T, k * T_cols * sizeof(unsigned char) ) );
HANDLE_ERROR( cudaMalloc( &d_R1, kmer_count * T_cols * sizeof(unsigned char) ) );
HANDLE_ERROR( cudaMalloc( &d_R2, kmer_count * T_cols * sizeof(unsigned char) ) );
// Fill training matrix randomly
int idx;
for(unsigned long i = 0; i < k * T_cols; ++i) {
idx = std::rand() % 4;
T[i] = reinterpret_cast<unsigned char&>(nucs[idx]);
//T[i] = 1;
}
// Copy over the training matrix, where it will remain
HANDLE_ERROR( cudaMemcpy(d_T, T, k * T_cols * sizeof(unsigned char), cudaMemcpyHostToDevice) );
// We don't need T on host anymore
std::free(T);
/* Usually the below would be something like this:
* int N_THREADS_PER_BLOCK = 256; (or 16x16, 32x8 for 2D)
* int N_BLOCKS_PER_GRID = (N + threadsPerBlock - 1) / threadsPerBlock; for 1D
*/
std::cout << "Input dimensions:" << std::endl;
std::cout << "Number of total elements in feature arrays: " << 2 * k * kmer_count;
std::cout << ", with size " << (double)(2 * k * kmer_count * sizeof(unsigned char)) / 1000000 << " MB" << std::endl;
std::cout << "Number of elements in training array: " << k * T_cols;
std::cout << ", with size " << (double)(k * T_cols * sizeof(unsigned char)) / 1000000 << " MB" << std::endl;
std::cout << "Number of elements in result array: " << kmer_count * T_cols;
std::cout << ", with size " << (double)(kmer_count * T_cols * sizeof(unsigned char)) / 1000000 << " MB" << std::endl;
dim3 dimBlock(BLOCK_SIZE, 4); // Based on comments on StackOverflow for 2D threads
dim3 dimGrid((T_cols + BLOCK_SIZE - 1) / (BLOCK_SIZE * 4), (kmer_count + BLOCK_SIZE - 1) / BLOCK_SIZE);
std::cout << std::endl << "Grid/Block setup:" << std::endl;
std::cout << dimGrid.x << ',' << dimGrid.y << ' ' << dimBlock.x << ',' << dimBlock.y << std::endl;
//////////////////
// CUDA Streams //
//////////////////
// Keep track of training time
uint64_t start, end;
QueryPerformanceCounter(&start);
// Check stream compatability
cudaDeviceProp prop;
int device_num;
HANDLE_ERROR( cudaGetDevice( &device_num ) );
HANDLE_ERROR( cudaGetDeviceProperties( &prop, device_num ) );
if(!prop.deviceOverlap)
std::cout << "Device streaming overlap disabled, therefore no speedup expected" << std::endl;
// Initialize streams and copy event markers
cudaStream_t stream0, stream1;
cudaEvent_t cp0, cp1;
HANDLE_ERROR( cudaStreamCreate( &stream0 ) );
HANDLE_ERROR( cudaStreamCreate( &stream1 ) );
// HANDLE_ERROR( cudaEventCreateWithFlags( &cp0, cudaEventDisableTiming | cudaEventBlockingSync ) );
// HANDLE_ERROR( cudaEventCreateWithFlags( &cp1, cudaEventDisableTiming | cudaEventBlockingSync ) );
HANDLE_ERROR( cudaEventCreateWithFlags( &cp0, cudaEventDisableTiming ) );
HANDLE_ERROR( cudaEventCreateWithFlags( &cp1, cudaEventDisableTiming ) );
// Set up pointers for zeroing memory using the driver API
CUdeviceptr ptr1 = (CUdeviceptr) d_R1;
CUdeviceptr ptr2 = (CUdeviceptr) d_R2;
for(unsigned long r = 0; r < NUM_READS; r += READ_BATCH_SIZE) {
// Fill F1 and F2 with new data
// TODO: r1 and r2 would be replaced with a reference to fqpair and an integer READ_BATCH_SIZE to feed into F1
load_kmer_array(r1, r2, F1, k);
load_kmer_array(r1, r2, F2, k);
// Enque the memory streams in breadth-first order such that
// the block scheduler launches kernels optimally
HANDLE_ERROR(cudaMemcpyAsync(d_F1, F1, k * kmer_count * sizeof(unsigned char),
cudaMemcpyHostToDevice, stream0));
HANDLE_ERROR(cudaMemcpyAsync(d_F2, F2, k * kmer_count * sizeof(unsigned char),
cudaMemcpyHostToDevice, stream1));
// Insert event markers into the stream to tell host
// when F1 and F2 are safe to overwrite in host memory
// HANDLE_ERROR( cudaEventRecord( cp0, stream0 ) );
// HANDLE_ERROR( cudaEventRecord( cp1, stream1 ) );
// Enque the kernel launches
MatHamm <<< dimGrid, dimBlock, 0, stream0 >>> (d_F1, d_T, d_R1, k, T_cols);
MatHamm <<< dimGrid, dimBlock, 0, stream1 >>> (d_F2, d_T, d_R2, k, T_cols);
// TODO: DtoH copy is expensive, so we shouldn't do this if we can afford it
// Enque copy back to host
// HANDLE_ERROR(cudaMemcpyAsync(R1, d_R1, kmer_count * T_cols * sizeof(unsigned char),
// cudaMemcpyDeviceToHost, stream0));
// HANDLE_ERROR(cudaMemcpyAsync(R2, d_R2, kmer_count * T_cols * sizeof(unsigned char),
// cudaMemcpyDeviceToHost, stream1));
// Block host from proceeding until copy to GPU is complete
// HANDLE_ERROR( cudaEventSynchronize( cp0 ) );
// HANDLE_ERROR( cudaEventSynchronize( cp1 ) );
// Clear previous values
cuMemsetD8Async( ptr1, 0, kmer_count * T_cols, stream0);
cuMemsetD8Async( ptr2, 0, kmer_count * T_cols, stream1);
}
// Synchronize to ensure work is complete
HANDLE_ERROR( cudaStreamSynchronize( stream0 ) );
HANDLE_ERROR( cudaStreamSynchronize( stream1 ) );
QueryPerformanceCounter(&end);
std::cout << "\nGPU pipeline took: \nTotal: " << double(end - start) / 1000000 << " sec" << std::endl;
std::cout << "Per seq pair: " << double(end - start) / 1000000 / NUM_READS / READ_BATCH_SIZE << " sec" << std::endl;
/////////////
// Cleanup //
/////////////
// Free device memory
HANDLE_ERROR( cudaFree( d_F1 ) );
HANDLE_ERROR( cudaFree( d_F2 ) );
HANDLE_ERROR( cudaFree( d_T ) );
HANDLE_ERROR( cudaFree( d_R1 ) );
HANDLE_ERROR( cudaFree( d_R2 ) );
HANDLE_ERROR( cudaStreamDestroy( stream0 ) );
HANDLE_ERROR( cudaStreamDestroy( stream1 ) );
HANDLE_ERROR( cudaEventDestroy( cp0 ) );
HANDLE_ERROR( cudaEventDestroy( cp1 ) );
// Output to test results
// write_matrix(R1, 1, T_cols);
// std::cout << std::endl;
// Produce gold standard on CPU
// MatHammOnHost(F1, T, R1, kmer_count, k, k, T_cols, kmer_count, T_cols);
// write_matrix(R1, 1, T_cols);
// std::free(T);
// Free host memory here if dynamically allocated
HANDLE_ERROR( cudaFreeHost( F1 ) );
HANDLE_ERROR( cudaFreeHost( F2 ) );
HANDLE_ERROR( cudaFreeHost( R1 ) );
HANDLE_ERROR( cudaFreeHost( R2 ) );
return 0;
}
|
f050e845b00669bdddc9678873316cbe2d257a43.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "shared4RNops.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
float *C = NULL;
hipMalloc(&C, XSIZE*YSIZE);
const int N = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
shared4RNops), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,N);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
shared4RNops), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
shared4RNops), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | f050e845b00669bdddc9678873316cbe2d257a43.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "shared4RNops.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
float *C = NULL;
cudaMalloc(&C, XSIZE*YSIZE);
const int N = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
shared4RNops<<<gridBlock,threadBlock>>>(A,B,C,N);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
shared4RNops<<<gridBlock,threadBlock>>>(A,B,C,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
shared4RNops<<<gridBlock,threadBlock>>>(A,B,C,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
1a000ac868109a05a673a68a21e53169034050d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void sd_t_s1_4_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p5ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) {
int h1,h2,h3,p6,p5;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
rest_x=rest_x/p6d;
p5=rest_x%p5d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p5*p5ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2+p5*p5ld_v2];
}
}
__syncthreads();
} | 1a000ac868109a05a673a68a21e53169034050d5.cu | #include "includes.h"
__global__ void sd_t_s1_4_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p5ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) {
int h1,h2,h3,p6,p5;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
rest_x=rest_x/p6d;
p5=rest_x%p5d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p5*p5ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2+p5*p5ld_v2];
}
}
__syncthreads();
} |
ecc5401a0bbf784720868523b6c52e40c5aa385f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file elemwise_unary_op_basic.cu
* \brief GPU Implementation of unary functions.
*/
#include "./elemwise_binary_op.h"
namespace mxnet {
namespace op {
NNVM_REGISTER_OP(relu)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::relu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::relu>);
NNVM_REGISTER_OP(_backward_relu)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::relu_grad>>);
NNVM_REGISTER_OP(sigmoid)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::sigmoid>);
NNVM_REGISTER_OP(_backward_sigmoid)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::sigmoid_grad>>);
NNVM_REGISTER_OP(hard_sigmoid)
.set_attr<FCompute>("FCompute<gpu>", HardSigmoidForward<gpu>);
NNVM_REGISTER_OP(_backward_hard_sigmoid)
.set_attr<FCompute>("FCompute<gpu>", HardSigmoidBackward<gpu>);
// softsign
NNVM_REGISTER_OP(softsign)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::softsign>);
NNVM_REGISTER_OP(_backward_softsign)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::softsign_grad>>);
// erf
NNVM_REGISTER_OP(erf)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::erf>);
NNVM_REGISTER_OP(_backward_erf)
.set_attr<FCompute>("FCompute<gpu>",
ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::erf_grad>>);
// copy
NNVM_REGISTER_OP(_copy)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>);
NNVM_REGISTER_OP(_backward_copy)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>);
NNVM_REGISTER_OP(_backward_reshape)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
NNVM_REGISTER_OP(BlockGrad)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
NNVM_REGISTER_OP(make_loss)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>);
// identity output as first input, but attributes are constrainted to be like rhs
NNVM_REGISTER_OP(_identity_with_attr_like_rhs)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeFirstItemEx<gpu>);
NNVM_REGISTER_OP(reshape_like)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
void ShapeComputeGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
hipMemcpyAsync(out_data.dptr_,
in_data.shape_.data(),
in_data.ndim() * sizeof(int64_t),
hipMemcpyHostToDevice,
mshadow::Stream<gpu>::GetStream(s));
}
NNVM_REGISTER_OP(shape_array)
.set_attr<FCompute>("FCompute<gpu>", ShapeComputeGPU);
void SizeComputeGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
const index_t size_var = in_data.Size();
hipMemcpyAsync(out_data.dptr_,
&size_var,
1U * sizeof(int64_t),
hipMemcpyHostToDevice,
mshadow::Stream<gpu>::GetStream(s));
}
NNVM_REGISTER_OP(size_array)
.set_attr<FCompute>("FCompute<gpu>", SizeComputeGPU);
NNVM_REGISTER_OP(Cast)
.set_attr<FCompute>("FCompute<gpu>", CastCompute<gpu>);
NNVM_REGISTER_OP(_backward_cast)
.set_attr<FCompute>("FCompute<gpu>", CastCompute<gpu>);
// negative
NNVM_REGISTER_OP(negative)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::negation>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::negation>);
// reciprocal
NNVM_REGISTER_OP(reciprocal)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::reciprocal>);
NNVM_REGISTER_OP(_backward_reciprocal)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::reciprocal_grad> >);
// abs
NNVM_REGISTER_OP(abs)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::abs>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::abs>);
NNVM_REGISTER_OP(_backward_abs)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::sign> >);
// sign
NNVM_REGISTER_OP(sign)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::sign>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::sign>);
NNVM_REGISTER_OP(_backward_sign)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::sign_grad> >);
// round
NNVM_REGISTER_OP(round)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::round>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::round>);
// ceil
NNVM_REGISTER_OP(ceil)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::ceil>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::ceil>);
// floor
NNVM_REGISTER_OP(floor)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::floor>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::floor>);
// trunc
NNVM_REGISTER_OP(trunc)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::trunc>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::trunc>);
// rint
NNVM_REGISTER_OP(rint)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::rint>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::rint>);
// fix
NNVM_REGISTER_OP(fix)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::fix>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::fix>);
// square
NNVM_REGISTER_OP(square)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::square>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::square>);
NNVM_REGISTER_OP(_backward_square)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::square_grad> >);
// sqrt
NNVM_REGISTER_OP(sqrt)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::square_root>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::square_root>);
NNVM_REGISTER_OP(_backward_sqrt)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::square_root_grad> >);
// rsqrt
NNVM_REGISTER_OP(rsqrt)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::reciprocal_square_root>);
NNVM_REGISTER_OP(_backward_rsqrt)
.set_attr<FCompute>("FCompute<gpu>",
ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::reciprocal_square_root_grad> >);
// cbrt
NNVM_REGISTER_OP(cbrt)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::cube_root>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::cube_root>);
NNVM_REGISTER_OP(_backward_cbrt)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::cube_root_grad> >);
// rcbrt
NNVM_REGISTER_OP(rcbrt)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::reciprocal_cube_root>);
NNVM_REGISTER_OP(_backward_rcbrt)
.set_attr<FCompute>("FCompute<gpu>",
ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::reciprocal_cube_root_grad> >);
// exp
NNVM_REGISTER_OP(exp)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::exp>);
// log
NNVM_REGISTER_OP(log)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::log>);
// log10
NNVM_REGISTER_OP(log10)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::log10>);
// log2
NNVM_REGISTER_OP(log2)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::log2>);
NNVM_REGISTER_OP(_backward_log)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::log_grad> >);
NNVM_REGISTER_OP(_backward_log10)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::log10_grad> >);
NNVM_REGISTER_OP(_backward_log2)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::log2_grad> >);
// log1p
NNVM_REGISTER_OP(log1p)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::log1p>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::log1p>);
NNVM_REGISTER_OP(_backward_log1p)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::log1p_grad> >);
// expm1
NNVM_REGISTER_OP(expm1)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::expm1>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::expm1>);
NNVM_REGISTER_OP(_backward_expm1)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::exp> >);
// gamma
NNVM_REGISTER_OP(gamma)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::gamma>);
NNVM_REGISTER_OP(_backward_gamma)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::gamma_grad> >);
// gammaln
NNVM_REGISTER_OP(gammaln)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::gammaln>);
NNVM_REGISTER_OP(_backward_gammaln)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::gammaln_grad> >);
// logical not
NNVM_REGISTER_OP(logical_not)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::nt>);
} // namespace op
} // namespace mxnet
| ecc5401a0bbf784720868523b6c52e40c5aa385f.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file elemwise_unary_op_basic.cu
* \brief GPU Implementation of unary functions.
*/
#include "./elemwise_binary_op.h"
namespace mxnet {
namespace op {
NNVM_REGISTER_OP(relu)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::relu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::relu>);
NNVM_REGISTER_OP(_backward_relu)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::relu_grad>>);
NNVM_REGISTER_OP(sigmoid)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::sigmoid>);
NNVM_REGISTER_OP(_backward_sigmoid)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::sigmoid_grad>>);
NNVM_REGISTER_OP(hard_sigmoid)
.set_attr<FCompute>("FCompute<gpu>", HardSigmoidForward<gpu>);
NNVM_REGISTER_OP(_backward_hard_sigmoid)
.set_attr<FCompute>("FCompute<gpu>", HardSigmoidBackward<gpu>);
// softsign
NNVM_REGISTER_OP(softsign)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::softsign>);
NNVM_REGISTER_OP(_backward_softsign)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::softsign_grad>>);
// erf
NNVM_REGISTER_OP(erf)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::erf>);
NNVM_REGISTER_OP(_backward_erf)
.set_attr<FCompute>("FCompute<gpu>",
ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::erf_grad>>);
// copy
NNVM_REGISTER_OP(_copy)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>);
NNVM_REGISTER_OP(_backward_copy)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>);
NNVM_REGISTER_OP(_backward_reshape)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
NNVM_REGISTER_OP(BlockGrad)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
NNVM_REGISTER_OP(make_loss)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>);
// identity output as first input, but attributes are constrainted to be like rhs
NNVM_REGISTER_OP(_identity_with_attr_like_rhs)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeFirstItemEx<gpu>);
NNVM_REGISTER_OP(reshape_like)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
void ShapeComputeGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
cudaMemcpyAsync(out_data.dptr_,
in_data.shape_.data(),
in_data.ndim() * sizeof(int64_t),
cudaMemcpyHostToDevice,
mshadow::Stream<gpu>::GetStream(s));
}
NNVM_REGISTER_OP(shape_array)
.set_attr<FCompute>("FCompute<gpu>", ShapeComputeGPU);
void SizeComputeGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
const index_t size_var = in_data.Size();
cudaMemcpyAsync(out_data.dptr_,
&size_var,
1U * sizeof(int64_t),
cudaMemcpyHostToDevice,
mshadow::Stream<gpu>::GetStream(s));
}
NNVM_REGISTER_OP(size_array)
.set_attr<FCompute>("FCompute<gpu>", SizeComputeGPU);
NNVM_REGISTER_OP(Cast)
.set_attr<FCompute>("FCompute<gpu>", CastCompute<gpu>);
NNVM_REGISTER_OP(_backward_cast)
.set_attr<FCompute>("FCompute<gpu>", CastCompute<gpu>);
// negative
NNVM_REGISTER_OP(negative)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::negation>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::negation>);
// reciprocal
NNVM_REGISTER_OP(reciprocal)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::reciprocal>);
NNVM_REGISTER_OP(_backward_reciprocal)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::reciprocal_grad> >);
// abs
NNVM_REGISTER_OP(abs)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::abs>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::abs>);
NNVM_REGISTER_OP(_backward_abs)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::sign> >);
// sign
NNVM_REGISTER_OP(sign)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::sign>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::sign>);
NNVM_REGISTER_OP(_backward_sign)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::sign_grad> >);
// round
NNVM_REGISTER_OP(round)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::round>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::round>);
// ceil
NNVM_REGISTER_OP(ceil)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::ceil>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::ceil>);
// floor
NNVM_REGISTER_OP(floor)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::floor>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::floor>);
// trunc
NNVM_REGISTER_OP(trunc)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::trunc>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::trunc>);
// rint
NNVM_REGISTER_OP(rint)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::rint>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::rint>);
// fix
NNVM_REGISTER_OP(fix)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::fix>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::fix>);
// square
NNVM_REGISTER_OP(square)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::square>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::square>);
NNVM_REGISTER_OP(_backward_square)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::square_grad> >);
// sqrt
NNVM_REGISTER_OP(sqrt)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::square_root>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::square_root>);
NNVM_REGISTER_OP(_backward_sqrt)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::square_root_grad> >);
// rsqrt
NNVM_REGISTER_OP(rsqrt)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::reciprocal_square_root>);
NNVM_REGISTER_OP(_backward_rsqrt)
.set_attr<FCompute>("FCompute<gpu>",
ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::reciprocal_square_root_grad> >);
// cbrt
NNVM_REGISTER_OP(cbrt)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::cube_root>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::cube_root>);
NNVM_REGISTER_OP(_backward_cbrt)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::cube_root_grad> >);
// rcbrt
NNVM_REGISTER_OP(rcbrt)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::reciprocal_cube_root>);
NNVM_REGISTER_OP(_backward_rcbrt)
.set_attr<FCompute>("FCompute<gpu>",
ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::reciprocal_cube_root_grad> >);
// exp
NNVM_REGISTER_OP(exp)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::exp>);
// log
NNVM_REGISTER_OP(log)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::log>);
// log10
NNVM_REGISTER_OP(log10)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::log10>);
// log2
NNVM_REGISTER_OP(log2)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::log2>);
NNVM_REGISTER_OP(_backward_log)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::log_grad> >);
NNVM_REGISTER_OP(_backward_log10)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::log10_grad> >);
NNVM_REGISTER_OP(_backward_log2)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::log2_grad> >);
// log1p
NNVM_REGISTER_OP(log1p)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::log1p>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::log1p>);
NNVM_REGISTER_OP(_backward_log1p)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::log1p_grad> >);
// expm1
NNVM_REGISTER_OP(expm1)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::expm1>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::expm1>);
NNVM_REGISTER_OP(_backward_expm1)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::exp> >);
// gamma
NNVM_REGISTER_OP(gamma)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::gamma>);
NNVM_REGISTER_OP(_backward_gamma)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::gamma_grad> >);
// gammaln
NNVM_REGISTER_OP(gammaln)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::gammaln>);
NNVM_REGISTER_OP(_backward_gammaln)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::gammaln_grad> >);
// logical not
NNVM_REGISTER_OP(logical_not)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::nt>);
} // namespace op
} // namespace mxnet
|
f9bc92264e3e5133dce80fc6805f3f4ec06f3a57.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <tests/utilities/legacy/cudf_test_fixtures.h>
#include <tests/utilities/legacy/cudf_test_utils.cuh>
#include <cudf/cudf.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/device_vector.h>
#include <gtest/gtest.h>
template <class ColumnType>
struct DigitizeTest : public GdfTest {
using gdf_col_pointer =
typename std::unique_ptr<gdf_column, std::function<void(gdf_column*)>>;
std::vector<ColumnType> col_in_data;
std::vector<ColumnType> bins_data;
std::vector<cudf::size_type> out_data;
gdf_col_pointer col_in;
gdf_col_pointer bins;
DigitizeTest(){
// Use constant seed so the psuedo-random order is the same each time
// Each time the class is constructed a new constant seed is used
static size_t number_of_instantiations{0};
std::srand(number_of_instantiations++);
}
~DigitizeTest(){}
void initialize_data(size_t column_length, size_t column_range,
size_t bins_length, size_t bins_range)
{
initialize_vector(col_in_data, column_length, column_range, false);
col_in = create_gdf_column(col_in_data);
initialize_vector(bins_data, bins_length, bins_range, true);
bins = create_gdf_column(bins_data);
}
gdf_error digitize(bool right) {
rmm::device_vector<cudf::size_type> out_indices_dev(col_in->size);
gdf_error result = gdf_digitize(col_in.get(), bins.get(), right, out_indices_dev.data().get());
out_data.resize(out_indices_dev.size());
hipMemcpy(out_data.data(),
out_indices_dev.data().get(),
out_indices_dev.size() * sizeof(cudf::size_type),
hipMemcpyDeviceToHost);
return result;
}
};
typedef ::testing::Types<int8_t, int16_t, int32_t, int64_t, float, double> ValidGdfTypes;
TYPED_TEST_CASE(DigitizeTest, ValidGdfTypes);
TYPED_TEST(DigitizeTest, UpperBound)
{
this->initialize_data(1000, 56, 4, 100);
gdf_error result = this->digitize(true);
EXPECT_EQ(result, GDF_SUCCESS);
}
TYPED_TEST(DigitizeTest, LowerBound)
{
this->initialize_data(10000, 60, 10, 100);
gdf_error result = this->digitize(false);
EXPECT_EQ(result, GDF_SUCCESS);
}
void digitize_detail(bool right, const std::vector<int32_t>& expected) {
std::vector<double> bins_data{0, 2, 5, 7, 8};
gdf_col_pointer bins = create_gdf_column(bins_data);
std::vector<double> col_in_data{-10, 0, 1, 2, 3, 8, 9};
gdf_col_pointer col_in = create_gdf_column(col_in_data);
rmm::device_vector<cudf::size_type> out_indices_dev(col_in_data.size());
gdf_error result = gdf_digitize(col_in.get(), bins.get(), right, out_indices_dev.data().get());
std::vector<cudf::size_type> out_indices(out_indices_dev.size());
hipMemcpy(out_indices.data(),
out_indices_dev.data().get(),
out_indices_dev.size() * sizeof(cudf::size_type),
hipMemcpyDeviceToHost);
EXPECT_EQ(result, GDF_SUCCESS);
const size_t num_rows = col_in_data.size();
for (unsigned int i = 0; i < num_rows; ++i) {
EXPECT_EQ(expected[i], out_indices[i]);
}
}
TYPED_TEST(DigitizeTest, UpperBoundDetail) {
std::vector<int32_t> expected{0, 0, 1, 1, 2, 4, 5};
digitize_detail(true, expected);
}
TYPED_TEST(DigitizeTest, LowerBoundDetail) {
std::vector<int32_t> expected{0, 1, 1, 2, 2, 5, 5};
digitize_detail(false, expected);
}
| f9bc92264e3e5133dce80fc6805f3f4ec06f3a57.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <tests/utilities/legacy/cudf_test_fixtures.h>
#include <tests/utilities/legacy/cudf_test_utils.cuh>
#include <cudf/cudf.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/device_vector.h>
#include <gtest/gtest.h>
template <class ColumnType>
struct DigitizeTest : public GdfTest {
using gdf_col_pointer =
typename std::unique_ptr<gdf_column, std::function<void(gdf_column*)>>;
std::vector<ColumnType> col_in_data;
std::vector<ColumnType> bins_data;
std::vector<cudf::size_type> out_data;
gdf_col_pointer col_in;
gdf_col_pointer bins;
DigitizeTest(){
// Use constant seed so the psuedo-random order is the same each time
// Each time the class is constructed a new constant seed is used
static size_t number_of_instantiations{0};
std::srand(number_of_instantiations++);
}
~DigitizeTest(){}
void initialize_data(size_t column_length, size_t column_range,
size_t bins_length, size_t bins_range)
{
initialize_vector(col_in_data, column_length, column_range, false);
col_in = create_gdf_column(col_in_data);
initialize_vector(bins_data, bins_length, bins_range, true);
bins = create_gdf_column(bins_data);
}
gdf_error digitize(bool right) {
rmm::device_vector<cudf::size_type> out_indices_dev(col_in->size);
gdf_error result = gdf_digitize(col_in.get(), bins.get(), right, out_indices_dev.data().get());
out_data.resize(out_indices_dev.size());
cudaMemcpy(out_data.data(),
out_indices_dev.data().get(),
out_indices_dev.size() * sizeof(cudf::size_type),
cudaMemcpyDeviceToHost);
return result;
}
};
typedef ::testing::Types<int8_t, int16_t, int32_t, int64_t, float, double> ValidGdfTypes;
TYPED_TEST_CASE(DigitizeTest, ValidGdfTypes);
TYPED_TEST(DigitizeTest, UpperBound)
{
this->initialize_data(1000, 56, 4, 100);
gdf_error result = this->digitize(true);
EXPECT_EQ(result, GDF_SUCCESS);
}
TYPED_TEST(DigitizeTest, LowerBound)
{
this->initialize_data(10000, 60, 10, 100);
gdf_error result = this->digitize(false);
EXPECT_EQ(result, GDF_SUCCESS);
}
void digitize_detail(bool right, const std::vector<int32_t>& expected) {
std::vector<double> bins_data{0, 2, 5, 7, 8};
gdf_col_pointer bins = create_gdf_column(bins_data);
std::vector<double> col_in_data{-10, 0, 1, 2, 3, 8, 9};
gdf_col_pointer col_in = create_gdf_column(col_in_data);
rmm::device_vector<cudf::size_type> out_indices_dev(col_in_data.size());
gdf_error result = gdf_digitize(col_in.get(), bins.get(), right, out_indices_dev.data().get());
std::vector<cudf::size_type> out_indices(out_indices_dev.size());
cudaMemcpy(out_indices.data(),
out_indices_dev.data().get(),
out_indices_dev.size() * sizeof(cudf::size_type),
cudaMemcpyDeviceToHost);
EXPECT_EQ(result, GDF_SUCCESS);
const size_t num_rows = col_in_data.size();
for (unsigned int i = 0; i < num_rows; ++i) {
EXPECT_EQ(expected[i], out_indices[i]);
}
}
TYPED_TEST(DigitizeTest, UpperBoundDetail) {
std::vector<int32_t> expected{0, 0, 1, 1, 2, 4, 5};
digitize_detail(true, expected);
}
TYPED_TEST(DigitizeTest, LowerBoundDetail) {
std::vector<int32_t> expected{0, 1, 1, 2, 2, 5, 5};
digitize_detail(false, expected);
}
|
710aface4af6183071ce54ff33b84ff28fba38a6.hip | // !!! This is a file automatically generated by hipify!!!
/*
* CuMatrixUnaryOps.cu
*
* Author: reid
*/
#include "CuMatrix.h"
#include "caps.h"
#include "Kernels.h"
template<typename T> CuMatrix<T> CuMatrix<T>::negate() const {
return unaryOp(Functory<T,negateUnaryOp>::pinch());
}
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class UnaryOp> __host__ CUDART_DEVICE CuMatrix<T> CuMatrix<T>::unaryOp(UnaryOp<T> op, hipStream_t stream ) const
#else
template<typename T> template<int StateDim> __host__ CUDART_DEVICE CuMatrix<T> CuMatrix<T>::unaryOp(UnaryOpF<T,StateDim> op, hipStream_t stream ) const
#endif
{
CuMatrix<T> res(m, n, true, true);
if(checkDebug(debugUnaryOp)) {
prlocf("in unaryOp(UnaryOp,...)\n");
printShortString("unary op, src");
res.printShortString("unary op, targ");
}
unaryOp(res, op, stream);
return res;
}
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<absUnaryOp>(absUnaryOp<float>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<int> CuMatrix<int>::unaryOp<absUnaryOp>(absUnaryOp<int>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<uint> CuMatrix<uint>::unaryOp<absUnaryOp>(absUnaryOp<uint>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<ulong> CuMatrix<ulong>::unaryOp<absUnaryOp>(absUnaryOp<ulong>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<long> CuMatrix<long>::unaryOp<absUnaryOp>(absUnaryOp<long>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<absUnaryOp>(absUnaryOp<double>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<powUnaryOp>(powUnaryOp<float>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<int> CuMatrix<int>::unaryOp<powUnaryOp>(powUnaryOp<int>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<uint> CuMatrix<uint>::unaryOp<powUnaryOp>(powUnaryOp<uint>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<ulong> CuMatrix<ulong>::unaryOp<powUnaryOp>(powUnaryOp<ulong>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<long> CuMatrix<long>::unaryOp<powUnaryOp>(powUnaryOp<long>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<powUnaryOp>(powUnaryOp<double>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<expUnaryOp>(expUnaryOp<float>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<expUnaryOp>(expUnaryOp<double>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<translationUnaryOp>(translationUnaryOp<float>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<translationUnaryOp>(translationUnaryOp<double>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<ulong> CuMatrix<ulong>::unaryOp<translationUnaryOp>(translationUnaryOp<ulong>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<scaleUnaryOp>(scaleUnaryOp<float>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<scaleUnaryOp>(scaleUnaryOp<double>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<ulong> CuMatrix<ulong>::unaryOp<scaleUnaryOp>(scaleUnaryOp<ulong>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<subFromUnaryOp>(subFromUnaryOp<float>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<subFromUnaryOp>(subFromUnaryOp<double>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<ulong> CuMatrix<ulong>::unaryOp<subFromUnaryOp>(subFromUnaryOp<ulong>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<negateUnaryOp>(negateUnaryOp<float>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<negateUnaryOp>(negateUnaryOp<double>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<sigmoidUnaryOp>(sigmoidUnaryOp<float>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<sigmoidUnaryOp>(sigmoidUnaryOp<double>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<sigmoidGradientUnaryOp>(sigmoidGradientUnaryOp<float>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<sigmoidGradientUnaryOp>(sigmoidGradientUnaryOp<double>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<logUnaryOp>(logUnaryOp<float>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<logUnaryOp>(logUnaryOp<double>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<oneOverUnaryOp>(oneOverUnaryOp<float>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<oneOverUnaryOp>(oneOverUnaryOp<double>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<sqrtUnaryOp>(sqrtUnaryOp<float>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<sqrtUnaryOp>(sqrtUnaryOp<double>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<sqrUnaryOp>(sqrUnaryOp<float>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<sqrUnaryOp>(sqrUnaryOp<double>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<divSqrtUnaryOp>(divSqrtUnaryOp<float>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<divSqrtUnaryOp>(divSqrtUnaryOp<double>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<ltUnaryOp>(ltUnaryOp<float>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<ltUnaryOp>(ltUnaryOp<double>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<ulong> CuMatrix<ulong>::unaryOp<ltUnaryOp>(ltUnaryOp<ulong>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<lteUnaryOp>(lteUnaryOp<float>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<lteUnaryOp>(lteUnaryOp<double>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<ulong> CuMatrix<ulong>::unaryOp<lteUnaryOp>(lteUnaryOp<ulong>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<gtUnaryOp>(gtUnaryOp<float>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<gtUnaryOp>(gtUnaryOp<double>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<ulong> CuMatrix<ulong>::unaryOp<gtUnaryOp>(gtUnaryOp<ulong>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<gteUnaryOp>(gteUnaryOp<float>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<gteUnaryOp>(gteUnaryOp<double>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<ulong> CuMatrix<ulong>::unaryOp<gteUnaryOp>(gteUnaryOp<ulong>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<eqUnaryOp>(eqUnaryOp<float>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<eqUnaryOp>(eqUnaryOp<double>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<ulong> CuMatrix<ulong>::unaryOp<eqUnaryOp>(eqUnaryOp<ulong>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<int> CuMatrix<int>::unaryOp<subFromUnaryOp>(subFromUnaryOp<int>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<unsigned int> CuMatrix<unsigned int>::unaryOp<subFromUnaryOp>(subFromUnaryOp<unsigned int>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<int> CuMatrix<int>::unaryOp<ltUnaryOp>(ltUnaryOp<int>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<int> CuMatrix<int>::unaryOp<lteUnaryOp>(lteUnaryOp<int>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<int> CuMatrix<int>::unaryOp<gtUnaryOp>(gtUnaryOp<int>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<int> CuMatrix<int>::unaryOp<gteUnaryOp>(gteUnaryOp<int>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<int> CuMatrix<int>::unaryOp<eqUnaryOp>(eqUnaryOp<int>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<int> CuMatrix<int>::unaryOp<translationUnaryOp>(translationUnaryOp<int>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<int> CuMatrix<int>::unaryOp<scaleUnaryOp>(scaleUnaryOp<int>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<unsigned int> CuMatrix<unsigned int>::unaryOp<ltUnaryOp>(ltUnaryOp<unsigned int>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<unsigned int> CuMatrix<unsigned int>::unaryOp<lteUnaryOp>(lteUnaryOp<unsigned int>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<unsigned int> CuMatrix<unsigned int>::unaryOp<gtUnaryOp>(gtUnaryOp<unsigned int>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<unsigned int> CuMatrix<unsigned int>::unaryOp<gteUnaryOp>(gteUnaryOp<unsigned int>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<unsigned int> CuMatrix<unsigned int>::unaryOp<eqUnaryOp>(eqUnaryOp<unsigned int>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<unsigned int> CuMatrix<unsigned int>::unaryOp<translationUnaryOp>(translationUnaryOp<unsigned int>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<unsigned int> CuMatrix<unsigned int>::unaryOp<scaleUnaryOp>(scaleUnaryOp<unsigned int>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<long> CuMatrix<long>::unaryOp<scaleUnaryOp>(scaleUnaryOp<long>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<long> CuMatrix<long>::unaryOp<gteUnaryOp>(gteUnaryOp<long>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<long> CuMatrix<long>::unaryOp<translationUnaryOp>(translationUnaryOp<long>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<long> CuMatrix<long>::unaryOp<lteUnaryOp>(lteUnaryOp<long>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<long> CuMatrix<long>::unaryOp<ltUnaryOp>(ltUnaryOp<long>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<long> CuMatrix<long>::unaryOp<eqUnaryOp>(eqUnaryOp<long>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<long> CuMatrix<long>::unaryOp<gtUnaryOp>(gtUnaryOp<long>, ihipStream_t*) const;
template __host__ CUDART_DEVICE CuMatrix<long> CuMatrix<long>::unaryOp<subFromUnaryOp>(subFromUnaryOp<long>, ihipStream_t*) const;
#else
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp(UnaryOpF<float,0>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp(UnaryOpF<double,0>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<long> CuMatrix<long>::unaryOp(UnaryOpF<long,0>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<ulong> CuMatrix<ulong>::unaryOp(UnaryOpF<ulong,0>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<int> CuMatrix<int>::unaryOp(UnaryOpF<int,0>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<uint> CuMatrix<uint>::unaryOp(UnaryOpF<uint,0>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp(UnaryOpF<float,1>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp(UnaryOpF<double,1>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<long> CuMatrix<long>::unaryOp(UnaryOpF<long,1>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<ulong> CuMatrix<ulong>::unaryOp(UnaryOpF<ulong,1>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<int> CuMatrix<int>::unaryOp(UnaryOpF<int,1>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<uint> CuMatrix<uint>::unaryOp(UnaryOpF<uint,1>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp(UnaryOpF<float,2>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp(UnaryOpF<double,2>, hipStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<ulong> CuMatrix<ulong>::unaryOp(UnaryOpF<ulong,2>, hipStream_t) const;
#endif
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class UnaryOp> __host__ CUDART_DEVICE void CuMatrix<T>::unaryOp(CuMatrix<T>& res, UnaryOp<T> op, hipStream_t stream) const
#else
template<typename T> template<int StateDim> __host__ CUDART_DEVICE void CuMatrix<T>::unaryOp(CuMatrix<T>& res, UnaryOpF<T,StateDim> op, hipStream_t stream) const
#endif
{
/*
if(checkDebug(debugUnaryOp)) {
flprintf("unaryOp tileCount %d lastMod %s\n", tiler.getTileCount(), b_util::modStr(lastMod));
}
*/
int tileM, tileN, tileP, roff, coff;
tiler.tileDims(tileM, tileN, tileP, tdRows);
int tileCount = DIV_UP(m,tileM);
DMatrix<T> d_A, d_Res;
int lastGpu = ExecCaps::currDev();
hipStream_t streams[] = {stream};
for(int i = 0; i < tileCount; i++) {
if(checkDebug(debugFill))flprintf("tileM %d tileN %d tile %d lastGpu %u\n", tileM, tileN, i, lastGpu);
if(checkDebug(debugFill))flprintf("roff %u coff %u\n",roff, coff);
tiler.tileLike(d_A, roff, coff, tileM, tileN, tileP, i, tdRows, lastMod == mod_host, lastGpu, streams);
if(checkDebug(debugFill))flprintf("after tiler.tileLike for tile %d; roff %u coff %u\n", i, roff, coff);
lastGpu = res.tiler.tileLike(d_Res, roff, coff, tileM, tileN, res.tiler.m_p, i, tdRows, false,lastGpu, streams);
if(checkDebug(debugFill))flprintf("after res.tiler.tileLike for tile %d; roff %u coff %u lastGpu %d\n", i, roff, coff, lastGpu);
if(p == n) {
unaryOpL( d_Res, d_A, op,stream);
} else {
if(checkDebug(debugUnaryOp)) {
printf("invoking DMatrix version of unaryOp\n");
}
unaryOpDmL(d_Res, d_A, op, DefaultWidth2Height , stream);
}
res.tiler.syncTile(d_Res, roff, coff, stream);
}
if(checkDebug(debugUnaryOp)) {
printDevArray(d_Res.elements,"d_Res",-1, MIN(10, m*n));
printColoArray(res.elements,MIN(10, m*n));
}
//res.invalidateHost();
res.lastMod = (tileCount>1) ? mod_host : mod_synced;
}
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE void CuMatrix<float>::unaryOp<approxInvSqrtUnaryOp>(CuMatrix<float>&, approxInvSqrtUnaryOp<float>, hipStream_t) const;
template __host__ CUDART_DEVICE void CuMatrix<double>::unaryOp<approxInvSqrtUnaryOp>(CuMatrix<double>&, approxInvSqrtUnaryOp<double>, hipStream_t) const;
template __host__ CUDART_DEVICE void CuMatrix<float>::unaryOp<slowInvSqrtUnaryOp>(CuMatrix<float>&, slowInvSqrtUnaryOp<float>, hipStream_t) const;
template __host__ CUDART_DEVICE void CuMatrix<double>::unaryOp<slowInvSqrtUnaryOp>(CuMatrix<double>&, slowInvSqrtUnaryOp<double>, hipStream_t) const;
#else
template __host__ CUDART_DEVICE void CuMatrix<float>::unaryOp(CuMatrix<float>&, UnaryOpF<float,0>, hipStream_t) const;
template __host__ CUDART_DEVICE void CuMatrix<double>::unaryOp(CuMatrix<double>&, UnaryOpF<double,0>, hipStream_t) const;
#endif
template<typename T> __host__ CUDART_DEVICE CuMatrix<T> CuMatrix<T>::sigmoid() const {
return unaryOp(Functory<T,sigmoidUnaryOp>::pinch());
}
template<typename T> __host__ CUDART_DEVICE CuMatrix<T> CuMatrix<T>::sigmoidGradient() const {
return unaryOp(Functory<T,sigmoidGradientUnaryOp>::pinch());
}
template<typename T> __host__ CUDART_DEVICE CuMatrix<T> CuMatrix<T>::log() const {
return unaryOp(Functory<T,logUnaryOp>::pinch());
}
template<typename T> __host__ CUDART_DEVICE CuMatrix<T> CuMatrix<T>::ceil() const {
return unaryOp(Functory<T,ceilUnaryOp>::pinch());
}
template<typename T> __host__ CUDART_DEVICE CuMatrix<T> CuMatrix<T>::floor() const {
return unaryOp(Functory<T,floorUnaryOp>::pinch());
}
template<typename T> __host__ CUDART_DEVICE CuMatrix<T> CuMatrix<T>::oneOver() const {
return unaryOp(Functory<T,oneOverUnaryOp>::pinch());
}
template<typename T> __host__ CUDART_DEVICE void CuMatrix<T>::setAll(int val) {
assert(tiler.tileSize == tiler.m_size);
#ifndef __CUDA_ARCH__
checkCudaErrors(hipMemset( tiler.currBuffer(), val, size));
#else
memset(tiler.currBuffer(), val, size);
#endif
lastMod = mod_device;
}
template<typename T> __host__ CUDART_DEVICE CuMatrix<T> CuMatrix<T>::exp() const {
return unaryOp(Functory<T,expUnaryOp>::pinch());
}
template<typename T> __host__ CUDART_DEVICE CuMatrix<T> CuMatrix<T>::sqrt() const {
return unaryOp(Functory<T,sqrtUnaryOp>::pinch());
}
template<typename T> __host__ CUDART_DEVICE CuMatrix<T> CuMatrix<T>::sqr() const {
return unaryOp(Functory<T,sqrUnaryOp>::pinch());
}
template<typename T> __host__ CUDART_DEVICE CuMatrix<T> CuMatrix<T>::pow(T o) const {
powUnaryOp<T> pf = Functory<T,powUnaryOp>::pinch(o);
return unaryOp(pf);
}
template<typename T> __host__ CUDART_DEVICE CuMatrix<T> CuMatrix<T>::qpow(T o) const {
qpowUnaryOp<T> pf = Functory<T,qpowUnaryOp>::pinch(o);
return unaryOp(pf);
}
template<typename T> __host__ CUDART_DEVICE CuMatrix<T> CuMatrix<T>::divSqrt(T divisor) const {
divSqrtUnaryOp<T> dsf = Functory<T,divSqrtUnaryOp>::pinch(divisor);
return unaryOp(dsf);
}
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class BoolUnaryOp> __host__ CUDART_DEVICE bool CuMatrix<T>::all(BoolUnaryOp<T> op) const
{
return gloloReduce(op, andBinaryOp<T>(), true);
}
#else
template<typename T> template <int StateDim> __host__ CUDART_DEVICE bool CuMatrix<T>::all(UnaryOpF<T,StateDim> op) const
{
return gloloReduce(op, Functory<T, andBinaryOp>::pinch(), true);
}
#endif
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE bool CuMatrix<float>::all<almostEqUnaryOp>(almostEqUnaryOp<float>) const;
template __host__ CUDART_DEVICE bool CuMatrix<double>::all<almostEqUnaryOp>(almostEqUnaryOp<double>) const;
template __host__ CUDART_DEVICE bool CuMatrix<ulong>::all<almostEqUnaryOp>(almostEqUnaryOp<ulong>) const;
template __host__ CUDART_DEVICE bool CuMatrix<float>::all<ltUnaryOp>(ltUnaryOp<float>) const;
template __host__ CUDART_DEVICE bool CuMatrix<double>::all<ltUnaryOp>(ltUnaryOp<double>) const;
template __host__ CUDART_DEVICE bool CuMatrix<ulong>::all<ltUnaryOp>(ltUnaryOp<ulong>) const;
template __host__ CUDART_DEVICE bool CuMatrix<int>::all<almostEqUnaryOp>(almostEqUnaryOp<int>) const;
template __host__ CUDART_DEVICE bool CuMatrix<unsigned int>::all<almostEqUnaryOp>(almostEqUnaryOp<unsigned int>) const;
template __host__ CUDART_DEVICE bool CuMatrix<long>::all<almostEqUnaryOp>(almostEqUnaryOp<long>) const;
#else
template __host__ CUDART_DEVICE bool CuMatrix<float>::all<1>(UnaryOpF<float,1>) const;
template __host__ CUDART_DEVICE bool CuMatrix<double>::all<1>(UnaryOpF<double,1>) const;
template __host__ CUDART_DEVICE bool CuMatrix<long>::all<1>(UnaryOpF<long,1>) const;
template __host__ CUDART_DEVICE bool CuMatrix<ulong>::all<1>(UnaryOpF<ulong,1>) const;
template __host__ CUDART_DEVICE bool CuMatrix<float>::all<2>(UnaryOpF<float,2>) const;
template __host__ CUDART_DEVICE bool CuMatrix<double>::all<2>(UnaryOpF<double,2>) const;
template __host__ CUDART_DEVICE bool CuMatrix<int>::all<2>(UnaryOpF<int,2>) const;
template __host__ CUDART_DEVICE bool CuMatrix<uint>::all<2>(UnaryOpF<uint,2>) const;
template __host__ CUDART_DEVICE bool CuMatrix<long>::all<2>(UnaryOpF<long,2>) const;
template __host__ CUDART_DEVICE bool CuMatrix<ulong>::all<2>(UnaryOpF<ulong,2>) const;
#endif
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class BoolUnaryOp> __host__ CUDART_DEVICE bool CuMatrix<T>::any(BoolUnaryOp<T> op) const
{
return gloloReduce(op, orBinaryOp<T>(), false);
}
#else
template<typename T> template <int StateDim> __host__ CUDART_DEVICE bool CuMatrix<T>::any(UnaryOpF<T,StateDim> op) const
{
return gloloReduce(op, Functory<T,orBinaryOp>::pinch(), false);
}
#endif
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE bool CuMatrix<float>::any<almostEqUnaryOp>(almostEqUnaryOp<float>) const;
template __host__ CUDART_DEVICE bool CuMatrix<double>::any<almostEqUnaryOp>(almostEqUnaryOp<double>) const;
template __host__ CUDART_DEVICE bool CuMatrix<ulong>::any<almostEqUnaryOp>(almostEqUnaryOp<ulong>) const;
template __host__ CUDART_DEVICE bool CuMatrix<float>::any<ltUnaryOp>(ltUnaryOp<float>) const;
template __host__ CUDART_DEVICE bool CuMatrix<double>::any<ltUnaryOp>(ltUnaryOp<double>) const;
template __host__ CUDART_DEVICE bool CuMatrix<ulong>::any<ltUnaryOp>(ltUnaryOp<ulong>) const;
#else
template __host__ CUDART_DEVICE bool CuMatrix<float>::any<1>(UnaryOpF<float,1>) const;
template __host__ CUDART_DEVICE bool CuMatrix<double>::any<1>(UnaryOpF<double,1>) const;
template __host__ CUDART_DEVICE bool CuMatrix<ulong>::any<1>(UnaryOpF<ulong,1>) const;
template __host__ CUDART_DEVICE bool CuMatrix<float>::any<2>(UnaryOpF<float,2>) const;
template __host__ CUDART_DEVICE bool CuMatrix<double>::any<2>(UnaryOpF<double,2>) const;
#endif
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class BoolUnaryOp> __host__ CUDART_DEVICE bool CuMatrix<T>::none( BoolUnaryOp<T> fn) const
#else
template<typename T> template <int StateDim> __host__ CUDART_DEVICE bool CuMatrix<T>::none( UnaryOpF<T,StateDim> fn) const
#endif
{
return !any(fn);
}
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE bool CuMatrix<float>::none<almostEqUnaryOp>(almostEqUnaryOp<float>) const;
template __host__ CUDART_DEVICE bool CuMatrix<double>::none<almostEqUnaryOp>(almostEqUnaryOp<double>) const;
template __host__ CUDART_DEVICE bool CuMatrix<ulong>::none<almostEqUnaryOp>(almostEqUnaryOp<ulong>) const;
template __host__ CUDART_DEVICE bool CuMatrix<float>::none<ltUnaryOp>(ltUnaryOp<float>) const;
template __host__ CUDART_DEVICE bool CuMatrix<double>::none<ltUnaryOp>(ltUnaryOp<double>) const;
template __host__ CUDART_DEVICE bool CuMatrix<ulong>::none<ltUnaryOp>(ltUnaryOp<ulong>) const;
template __host__ CUDART_DEVICE bool CuMatrix<float>::none<gtUnaryOp>(gtUnaryOp<float>) const;
template __host__ CUDART_DEVICE bool CuMatrix<double>::none<gtUnaryOp>(gtUnaryOp<double>) const;
template __host__ CUDART_DEVICE bool CuMatrix<ulong>::none<gtUnaryOp>(gtUnaryOp<ulong>) const;
#else
template __host__ CUDART_DEVICE bool CuMatrix<float>::none<1>(UnaryOpF<float,1>) const;
template __host__ CUDART_DEVICE bool CuMatrix<double>::none<1>(UnaryOpF<double,1>) const;
template __host__ CUDART_DEVICE bool CuMatrix<ulong>::none<1>(UnaryOpF<ulong,1>) const;
template __host__ CUDART_DEVICE bool CuMatrix<float>::none<2>(UnaryOpF<float,2>) const;
template __host__ CUDART_DEVICE bool CuMatrix<double>::none<2>(UnaryOpF<double,2>) const;
template __host__ CUDART_DEVICE bool CuMatrix<ulong>::none<2>(UnaryOpF<ulong,2>) const;
#endif
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class BoolUnaryOp> __host__ CUDART_DEVICE long CuMatrix<T>::count(BoolUnaryOp<T> fn) const
{
return gloloReduce(fn, plusBinaryOp<T>(), 0);
}
#else
template<typename T> template <int StateDim> __host__ CUDART_DEVICE long CuMatrix<T>::count( UnaryOpF<T,StateDim> fn) const
{
return gloloReduce(fn, Functory<T, plusBinaryOp>::pinch(), 0);
}
#endif
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE long CuMatrix<float>::count<almostEqUnaryOp>(almostEqUnaryOp<float>) const;
template __host__ CUDART_DEVICE long CuMatrix<double>::count<almostEqUnaryOp>(almostEqUnaryOp<double>) const;
template __host__ CUDART_DEVICE long CuMatrix<ulong>::count<almostEqUnaryOp>(almostEqUnaryOp<ulong>) const;
template __host__ CUDART_DEVICE long CuMatrix<float>::count<notAlmostEqUnaryOp>(notAlmostEqUnaryOp<float>) const;
template __host__ CUDART_DEVICE long CuMatrix<double>::count<notAlmostEqUnaryOp>(notAlmostEqUnaryOp<double>) const;
template __host__ CUDART_DEVICE long CuMatrix<ulong>::count<notAlmostEqUnaryOp>(notAlmostEqUnaryOp<ulong>) const;
template __host__ CUDART_DEVICE long CuMatrix<float>::count<neqUnaryOp>(neqUnaryOp<float>) const;
template __host__ CUDART_DEVICE long CuMatrix<double>::count<neqUnaryOp>(neqUnaryOp<double>) const;
template __host__ CUDART_DEVICE long CuMatrix<ulong>::count<neqUnaryOp>(neqUnaryOp<ulong>) const;
template __host__ CUDART_DEVICE long CuMatrix<float>::count<ltUnaryOp>(ltUnaryOp<float>) const;
template __host__ CUDART_DEVICE long CuMatrix<double>::count<ltUnaryOp>(ltUnaryOp<double>) const;
template __host__ CUDART_DEVICE long CuMatrix<ulong>::count<ltUnaryOp>(ltUnaryOp<ulong>) const;
#else
template __host__ CUDART_DEVICE long CuMatrix<float>::count<1>(UnaryOpF<float,1>) const;
template __host__ CUDART_DEVICE long CuMatrix<double>::count<1>(UnaryOpF<double,1>) const;
template __host__ CUDART_DEVICE long CuMatrix<ulong>::count<1>(UnaryOpF<ulong,1>) const;
template __host__ CUDART_DEVICE long CuMatrix<float>::count<2>(UnaryOpF<float,2>) const;
template __host__ CUDART_DEVICE long CuMatrix<double>::count<2>(UnaryOpF<double,2>) const;
template __host__ CUDART_DEVICE long CuMatrix<ulong>::count<2>(UnaryOpF<ulong,2>) const;
#endif
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class BoolUnaryOp> __host__ CUDART_DEVICE
IndexArray CuMatrix<T>::find(BoolUnaryOp<T> fn) const
#else
template<typename T> template <int StateDim> __host__ CUDART_DEVICE IndexArray CuMatrix<T>::find( UnaryOpF<T,StateDim> fn) const
#endif
{
CuMatrix<T> m = unaryOp(fn);
uint len = m.size/sizeof(T);
int arraySize = 10;
if(checkDebug(debugUnaryOp)) prlocf("creating intial idx array\n");
uint* arry, *temp;
#ifdef __CUDA_ARCH__
cherr(hipMalloc(&arry, arraySize * sizeof(uint)));
#else
checkCudaError(hipHostMalloc(&arry, arraySize * sizeof(uint),0));
#endif
int currIdx = 0;
for(int i =0; i < len; i++ ) {
if(m.get(i)) {
flprintf("adding idx %d\n", i);
arry[currIdx++] = i;
if(currIdx == arraySize) {
arraySize *= 2;
#ifdef __CUDA_ARCH__
cherr(hipMalloc(&temp, arraySize * sizeof(uint)));
cherr(hipMemcpyAsync(temp, arry, (currIdx -1) * sizeof(uint), hipMemcpyDeviceToDevice));
cherr(hipDeviceSynchronize());
cherr(hipFree(arry));
arry = temp;
#else
checkCudaError(hipHostMalloc(&temp, arraySize * sizeof(uint),0));
cherr(hipMemcpy(temp, arry, (currIdx -1) * sizeof(uint), hipMemcpyHostToHost));
if(checkDebug(debugDestr))flprintf("freeing host arry %p\n", arry);
cherr(hipHostFree(arry));
arry = temp;
#endif
}
} else {
if(checkDebug(debugUnaryOp)) flprintf("skipping idx %d\n", i);
}
}
if(currIdx < arraySize) {
if(checkDebug(debugUnaryOp)) flprintf("shrinking idx array from %d to %d\n", arraySize, currIdx);
}
//arry = (uint*) realloc(arry, arraySize);
#ifdef __CUDA_ARCH__
cherr(hipMalloc(&temp, arraySize * sizeof(uint)));
cherr(hipMemcpyAsync(temp, arry, (currIdx -1)* sizeof(uint), hipMemcpyDeviceToDevice));
cherr(hipDeviceSynchronize());
cherr(hipFree(arry));
arry = temp;
#else
checkCudaError(hipHostMalloc(&temp, arraySize * sizeof(uint),0));
cherr(hipMemcpy(temp, arry, (currIdx -1) * sizeof(uint), hipMemcpyHostToHost));
if(checkDebug(debugDestr))flprintf("freeing host arry %p\n", arry);
cherr(hipHostFree(arry));
arry = temp;
#endif
return IndexArray(arry, currIdx);
}
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE IndexArray CuMatrix<float>::find<ltUnaryOp>(ltUnaryOp<float>) const;
template __host__ CUDART_DEVICE IndexArray CuMatrix<double>::find<ltUnaryOp>(ltUnaryOp<double>) const;
#else
template __host__ CUDART_DEVICE IndexArray CuMatrix<float>::find<1>(UnaryOpF<float,1>) const;
template __host__ CUDART_DEVICE IndexArray CuMatrix<double>::find<1>(UnaryOpF<double,1>) const;
#endif
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class BoolUnaryOp> __host__ CUDART_DEVICE
void CuMatrix<T>::findFirstN(IndexArray arry, BoolUnaryOp<T> op) const
#else
template<typename T> template <int StateDim> __host__ CUDART_DEVICE void CuMatrix<T>::findFirstN( IndexArray arry, UnaryOpF<T,StateDim> op) const
#endif
{
CuMatrix<T> m = unaryOp(op);
m.syncBuffers();
uint len = m.size/sizeof(T);
int currIdx = 0;
for(int i =0; i < len; i++ ) {
if(i == len -1 ){
if(checkDebug(debugUnaryOp)) flprintf("lastIdx %d (+tiler.currBuffer() = %p)\n", i, i + tiler.currBuffer());
}
if(m.elements[i]) {
if(checkDebug(debugUnaryOp)) flprintf("adding idx %d\n", i);
if(currIdx < arry.count) {
arry.indices[currIdx++] = i;
} else {
if(checkDebug(debugUnaryOp)) prlocf("exceeded capacity of indexarry; stopping\n");
return;
}
} else {
// if(checkDebug(debugUnaryOp)) flprintf("skipping idx %d\n", i);
}
}
}
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE void CuMatrix<float>::findFirstN<ltUnaryOp>(IndexArray , ltUnaryOp<float>) const;
template __host__ CUDART_DEVICE void CuMatrix<double>::findFirstN<ltUnaryOp>(IndexArray, ltUnaryOp<double>) const;
template __host__ CUDART_DEVICE void CuMatrix<float>::findFirstN<gtUnaryOp>(IndexArray , gtUnaryOp<float>) const;
template __host__ CUDART_DEVICE void CuMatrix<double>::findFirstN<gtUnaryOp>(IndexArray, gtUnaryOp<double>) const;
template __host__ CUDART_DEVICE void CuMatrix<float>::findFirstN<notAlmostEqUnaryOp>(IndexArray , notAlmostEqUnaryOp<float>) const;
template __host__ CUDART_DEVICE void CuMatrix<double>::findFirstN<notAlmostEqUnaryOp>(IndexArray, notAlmostEqUnaryOp<double>) const;
template __host__ CUDART_DEVICE void CuMatrix<float>::findFirstN<neqUnaryOp>(IndexArray , neqUnaryOp<float>) const;
template __host__ CUDART_DEVICE void CuMatrix<double>::findFirstN<neqUnaryOp>(IndexArray, neqUnaryOp<double>) const;
template __host__ CUDART_DEVICE void CuMatrix<unsigned long>::findFirstN<ltUnaryOp>(IndexArray, ltUnaryOp<unsigned long>) const;
#else
template __host__ CUDART_DEVICE void CuMatrix<float>::findFirstN<1>(IndexArray , UnaryOpF<float,1>) const;
template __host__ CUDART_DEVICE void CuMatrix<ulong>::findFirstN<1>(IndexArray , UnaryOpF<ulong,1>) const;
template __host__ CUDART_DEVICE void CuMatrix<double>::findFirstN<1>(IndexArray, UnaryOpF<double,1>) const;
template __host__ CUDART_DEVICE void CuMatrix<float>::findFirstN<2>(IndexArray , UnaryOpF<float,2>) const;
template __host__ CUDART_DEVICE void CuMatrix<double>::findFirstN<2>(IndexArray, UnaryOpF<double,2>) const;
template __host__ CUDART_DEVICE void CuMatrix<ulong>::findFirstN<2>(IndexArray, UnaryOpF<ulong,2>) const;
#endif
#include "CuMatrixInster.cu"
| 710aface4af6183071ce54ff33b84ff28fba38a6.cu | /*
* CuMatrixUnaryOps.cu
*
* Author: reid
*/
#include "CuMatrix.h"
#include "caps.h"
#include "Kernels.h"
template<typename T> CuMatrix<T> CuMatrix<T>::negate() const {
return unaryOp(Functory<T,negateUnaryOp>::pinch());
}
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class UnaryOp> __host__ CUDART_DEVICE CuMatrix<T> CuMatrix<T>::unaryOp(UnaryOp<T> op, cudaStream_t stream ) const
#else
template<typename T> template<int StateDim> __host__ CUDART_DEVICE CuMatrix<T> CuMatrix<T>::unaryOp(UnaryOpF<T,StateDim> op, cudaStream_t stream ) const
#endif
{
CuMatrix<T> res(m, n, true, true);
if(checkDebug(debugUnaryOp)) {
prlocf("in unaryOp(UnaryOp,...)\n");
printShortString("unary op, src");
res.printShortString("unary op, targ");
}
unaryOp(res, op, stream);
return res;
}
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<absUnaryOp>(absUnaryOp<float>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<int> CuMatrix<int>::unaryOp<absUnaryOp>(absUnaryOp<int>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<uint> CuMatrix<uint>::unaryOp<absUnaryOp>(absUnaryOp<uint>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<ulong> CuMatrix<ulong>::unaryOp<absUnaryOp>(absUnaryOp<ulong>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<long> CuMatrix<long>::unaryOp<absUnaryOp>(absUnaryOp<long>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<absUnaryOp>(absUnaryOp<double>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<powUnaryOp>(powUnaryOp<float>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<int> CuMatrix<int>::unaryOp<powUnaryOp>(powUnaryOp<int>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<uint> CuMatrix<uint>::unaryOp<powUnaryOp>(powUnaryOp<uint>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<ulong> CuMatrix<ulong>::unaryOp<powUnaryOp>(powUnaryOp<ulong>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<long> CuMatrix<long>::unaryOp<powUnaryOp>(powUnaryOp<long>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<powUnaryOp>(powUnaryOp<double>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<expUnaryOp>(expUnaryOp<float>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<expUnaryOp>(expUnaryOp<double>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<translationUnaryOp>(translationUnaryOp<float>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<translationUnaryOp>(translationUnaryOp<double>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<ulong> CuMatrix<ulong>::unaryOp<translationUnaryOp>(translationUnaryOp<ulong>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<scaleUnaryOp>(scaleUnaryOp<float>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<scaleUnaryOp>(scaleUnaryOp<double>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<ulong> CuMatrix<ulong>::unaryOp<scaleUnaryOp>(scaleUnaryOp<ulong>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<subFromUnaryOp>(subFromUnaryOp<float>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<subFromUnaryOp>(subFromUnaryOp<double>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<ulong> CuMatrix<ulong>::unaryOp<subFromUnaryOp>(subFromUnaryOp<ulong>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<negateUnaryOp>(negateUnaryOp<float>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<negateUnaryOp>(negateUnaryOp<double>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<sigmoidUnaryOp>(sigmoidUnaryOp<float>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<sigmoidUnaryOp>(sigmoidUnaryOp<double>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<sigmoidGradientUnaryOp>(sigmoidGradientUnaryOp<float>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<sigmoidGradientUnaryOp>(sigmoidGradientUnaryOp<double>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<logUnaryOp>(logUnaryOp<float>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<logUnaryOp>(logUnaryOp<double>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<oneOverUnaryOp>(oneOverUnaryOp<float>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<oneOverUnaryOp>(oneOverUnaryOp<double>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<sqrtUnaryOp>(sqrtUnaryOp<float>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<sqrtUnaryOp>(sqrtUnaryOp<double>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<sqrUnaryOp>(sqrUnaryOp<float>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<sqrUnaryOp>(sqrUnaryOp<double>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<divSqrtUnaryOp>(divSqrtUnaryOp<float>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<divSqrtUnaryOp>(divSqrtUnaryOp<double>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<ltUnaryOp>(ltUnaryOp<float>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<ltUnaryOp>(ltUnaryOp<double>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<ulong> CuMatrix<ulong>::unaryOp<ltUnaryOp>(ltUnaryOp<ulong>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<lteUnaryOp>(lteUnaryOp<float>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<lteUnaryOp>(lteUnaryOp<double>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<ulong> CuMatrix<ulong>::unaryOp<lteUnaryOp>(lteUnaryOp<ulong>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<gtUnaryOp>(gtUnaryOp<float>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<gtUnaryOp>(gtUnaryOp<double>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<ulong> CuMatrix<ulong>::unaryOp<gtUnaryOp>(gtUnaryOp<ulong>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<gteUnaryOp>(gteUnaryOp<float>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<gteUnaryOp>(gteUnaryOp<double>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<ulong> CuMatrix<ulong>::unaryOp<gteUnaryOp>(gteUnaryOp<ulong>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp<eqUnaryOp>(eqUnaryOp<float>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp<eqUnaryOp>(eqUnaryOp<double>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<ulong> CuMatrix<ulong>::unaryOp<eqUnaryOp>(eqUnaryOp<ulong>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<int> CuMatrix<int>::unaryOp<subFromUnaryOp>(subFromUnaryOp<int>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<unsigned int> CuMatrix<unsigned int>::unaryOp<subFromUnaryOp>(subFromUnaryOp<unsigned int>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<int> CuMatrix<int>::unaryOp<ltUnaryOp>(ltUnaryOp<int>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<int> CuMatrix<int>::unaryOp<lteUnaryOp>(lteUnaryOp<int>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<int> CuMatrix<int>::unaryOp<gtUnaryOp>(gtUnaryOp<int>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<int> CuMatrix<int>::unaryOp<gteUnaryOp>(gteUnaryOp<int>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<int> CuMatrix<int>::unaryOp<eqUnaryOp>(eqUnaryOp<int>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<int> CuMatrix<int>::unaryOp<translationUnaryOp>(translationUnaryOp<int>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<int> CuMatrix<int>::unaryOp<scaleUnaryOp>(scaleUnaryOp<int>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<unsigned int> CuMatrix<unsigned int>::unaryOp<ltUnaryOp>(ltUnaryOp<unsigned int>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<unsigned int> CuMatrix<unsigned int>::unaryOp<lteUnaryOp>(lteUnaryOp<unsigned int>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<unsigned int> CuMatrix<unsigned int>::unaryOp<gtUnaryOp>(gtUnaryOp<unsigned int>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<unsigned int> CuMatrix<unsigned int>::unaryOp<gteUnaryOp>(gteUnaryOp<unsigned int>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<unsigned int> CuMatrix<unsigned int>::unaryOp<eqUnaryOp>(eqUnaryOp<unsigned int>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<unsigned int> CuMatrix<unsigned int>::unaryOp<translationUnaryOp>(translationUnaryOp<unsigned int>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<unsigned int> CuMatrix<unsigned int>::unaryOp<scaleUnaryOp>(scaleUnaryOp<unsigned int>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<long> CuMatrix<long>::unaryOp<scaleUnaryOp>(scaleUnaryOp<long>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<long> CuMatrix<long>::unaryOp<gteUnaryOp>(gteUnaryOp<long>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<long> CuMatrix<long>::unaryOp<translationUnaryOp>(translationUnaryOp<long>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<long> CuMatrix<long>::unaryOp<lteUnaryOp>(lteUnaryOp<long>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<long> CuMatrix<long>::unaryOp<ltUnaryOp>(ltUnaryOp<long>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<long> CuMatrix<long>::unaryOp<eqUnaryOp>(eqUnaryOp<long>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<long> CuMatrix<long>::unaryOp<gtUnaryOp>(gtUnaryOp<long>, CUstream_st*) const;
template __host__ CUDART_DEVICE CuMatrix<long> CuMatrix<long>::unaryOp<subFromUnaryOp>(subFromUnaryOp<long>, CUstream_st*) const;
#else
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp(UnaryOpF<float,0>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp(UnaryOpF<double,0>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<long> CuMatrix<long>::unaryOp(UnaryOpF<long,0>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<ulong> CuMatrix<ulong>::unaryOp(UnaryOpF<ulong,0>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<int> CuMatrix<int>::unaryOp(UnaryOpF<int,0>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<uint> CuMatrix<uint>::unaryOp(UnaryOpF<uint,0>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp(UnaryOpF<float,1>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp(UnaryOpF<double,1>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<long> CuMatrix<long>::unaryOp(UnaryOpF<long,1>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<ulong> CuMatrix<ulong>::unaryOp(UnaryOpF<ulong,1>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<int> CuMatrix<int>::unaryOp(UnaryOpF<int,1>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<uint> CuMatrix<uint>::unaryOp(UnaryOpF<uint,1>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<float> CuMatrix<float>::unaryOp(UnaryOpF<float,2>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<double> CuMatrix<double>::unaryOp(UnaryOpF<double,2>, cudaStream_t) const;
template __host__ CUDART_DEVICE CuMatrix<ulong> CuMatrix<ulong>::unaryOp(UnaryOpF<ulong,2>, cudaStream_t) const;
#endif
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class UnaryOp> __host__ CUDART_DEVICE void CuMatrix<T>::unaryOp(CuMatrix<T>& res, UnaryOp<T> op, cudaStream_t stream) const
#else
template<typename T> template<int StateDim> __host__ CUDART_DEVICE void CuMatrix<T>::unaryOp(CuMatrix<T>& res, UnaryOpF<T,StateDim> op, cudaStream_t stream) const
#endif
{
/*
if(checkDebug(debugUnaryOp)) {
flprintf("unaryOp tileCount %d lastMod %s\n", tiler.getTileCount(), b_util::modStr(lastMod));
}
*/
int tileM, tileN, tileP, roff, coff;
tiler.tileDims(tileM, tileN, tileP, tdRows);
int tileCount = DIV_UP(m,tileM);
DMatrix<T> d_A, d_Res;
int lastGpu = ExecCaps::currDev();
cudaStream_t streams[] = {stream};
for(int i = 0; i < tileCount; i++) {
if(checkDebug(debugFill))flprintf("tileM %d tileN %d tile %d lastGpu %u\n", tileM, tileN, i, lastGpu);
if(checkDebug(debugFill))flprintf("roff %u coff %u\n",roff, coff);
tiler.tileLike(d_A, roff, coff, tileM, tileN, tileP, i, tdRows, lastMod == mod_host, lastGpu, streams);
if(checkDebug(debugFill))flprintf("after tiler.tileLike for tile %d; roff %u coff %u\n", i, roff, coff);
lastGpu = res.tiler.tileLike(d_Res, roff, coff, tileM, tileN, res.tiler.m_p, i, tdRows, false,lastGpu, streams);
if(checkDebug(debugFill))flprintf("after res.tiler.tileLike for tile %d; roff %u coff %u lastGpu %d\n", i, roff, coff, lastGpu);
if(p == n) {
unaryOpL( d_Res, d_A, op,stream);
} else {
if(checkDebug(debugUnaryOp)) {
printf("invoking DMatrix version of unaryOp\n");
}
unaryOpDmL(d_Res, d_A, op, DefaultWidth2Height , stream);
}
res.tiler.syncTile(d_Res, roff, coff, stream);
}
if(checkDebug(debugUnaryOp)) {
printDevArray(d_Res.elements,"d_Res",-1, MIN(10, m*n));
printColoArray(res.elements,MIN(10, m*n));
}
//res.invalidateHost();
res.lastMod = (tileCount>1) ? mod_host : mod_synced;
}
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE void CuMatrix<float>::unaryOp<approxInvSqrtUnaryOp>(CuMatrix<float>&, approxInvSqrtUnaryOp<float>, cudaStream_t) const;
template __host__ CUDART_DEVICE void CuMatrix<double>::unaryOp<approxInvSqrtUnaryOp>(CuMatrix<double>&, approxInvSqrtUnaryOp<double>, cudaStream_t) const;
template __host__ CUDART_DEVICE void CuMatrix<float>::unaryOp<slowInvSqrtUnaryOp>(CuMatrix<float>&, slowInvSqrtUnaryOp<float>, cudaStream_t) const;
template __host__ CUDART_DEVICE void CuMatrix<double>::unaryOp<slowInvSqrtUnaryOp>(CuMatrix<double>&, slowInvSqrtUnaryOp<double>, cudaStream_t) const;
#else
template __host__ CUDART_DEVICE void CuMatrix<float>::unaryOp(CuMatrix<float>&, UnaryOpF<float,0>, cudaStream_t) const;
template __host__ CUDART_DEVICE void CuMatrix<double>::unaryOp(CuMatrix<double>&, UnaryOpF<double,0>, cudaStream_t) const;
#endif
template<typename T> __host__ CUDART_DEVICE CuMatrix<T> CuMatrix<T>::sigmoid() const {
return unaryOp(Functory<T,sigmoidUnaryOp>::pinch());
}
template<typename T> __host__ CUDART_DEVICE CuMatrix<T> CuMatrix<T>::sigmoidGradient() const {
return unaryOp(Functory<T,sigmoidGradientUnaryOp>::pinch());
}
template<typename T> __host__ CUDART_DEVICE CuMatrix<T> CuMatrix<T>::log() const {
return unaryOp(Functory<T,logUnaryOp>::pinch());
}
template<typename T> __host__ CUDART_DEVICE CuMatrix<T> CuMatrix<T>::ceil() const {
return unaryOp(Functory<T,ceilUnaryOp>::pinch());
}
template<typename T> __host__ CUDART_DEVICE CuMatrix<T> CuMatrix<T>::floor() const {
return unaryOp(Functory<T,floorUnaryOp>::pinch());
}
template<typename T> __host__ CUDART_DEVICE CuMatrix<T> CuMatrix<T>::oneOver() const {
return unaryOp(Functory<T,oneOverUnaryOp>::pinch());
}
template<typename T> __host__ CUDART_DEVICE void CuMatrix<T>::setAll(int val) {
assert(tiler.tileSize == tiler.m_size);
#ifndef __CUDA_ARCH__
checkCudaErrors(cudaMemset( tiler.currBuffer(), val, size));
#else
memset(tiler.currBuffer(), val, size);
#endif
lastMod = mod_device;
}
template<typename T> __host__ CUDART_DEVICE CuMatrix<T> CuMatrix<T>::exp() const {
return unaryOp(Functory<T,expUnaryOp>::pinch());
}
template<typename T> __host__ CUDART_DEVICE CuMatrix<T> CuMatrix<T>::sqrt() const {
return unaryOp(Functory<T,sqrtUnaryOp>::pinch());
}
template<typename T> __host__ CUDART_DEVICE CuMatrix<T> CuMatrix<T>::sqr() const {
return unaryOp(Functory<T,sqrUnaryOp>::pinch());
}
template<typename T> __host__ CUDART_DEVICE CuMatrix<T> CuMatrix<T>::pow(T o) const {
powUnaryOp<T> pf = Functory<T,powUnaryOp>::pinch(o);
return unaryOp(pf);
}
template<typename T> __host__ CUDART_DEVICE CuMatrix<T> CuMatrix<T>::qpow(T o) const {
qpowUnaryOp<T> pf = Functory<T,qpowUnaryOp>::pinch(o);
return unaryOp(pf);
}
template<typename T> __host__ CUDART_DEVICE CuMatrix<T> CuMatrix<T>::divSqrt(T divisor) const {
divSqrtUnaryOp<T> dsf = Functory<T,divSqrtUnaryOp>::pinch(divisor);
return unaryOp(dsf);
}
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class BoolUnaryOp> __host__ CUDART_DEVICE bool CuMatrix<T>::all(BoolUnaryOp<T> op) const
{
return gloloReduce(op, andBinaryOp<T>(), true);
}
#else
template<typename T> template <int StateDim> __host__ CUDART_DEVICE bool CuMatrix<T>::all(UnaryOpF<T,StateDim> op) const
{
return gloloReduce(op, Functory<T, andBinaryOp>::pinch(), true);
}
#endif
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE bool CuMatrix<float>::all<almostEqUnaryOp>(almostEqUnaryOp<float>) const;
template __host__ CUDART_DEVICE bool CuMatrix<double>::all<almostEqUnaryOp>(almostEqUnaryOp<double>) const;
template __host__ CUDART_DEVICE bool CuMatrix<ulong>::all<almostEqUnaryOp>(almostEqUnaryOp<ulong>) const;
template __host__ CUDART_DEVICE bool CuMatrix<float>::all<ltUnaryOp>(ltUnaryOp<float>) const;
template __host__ CUDART_DEVICE bool CuMatrix<double>::all<ltUnaryOp>(ltUnaryOp<double>) const;
template __host__ CUDART_DEVICE bool CuMatrix<ulong>::all<ltUnaryOp>(ltUnaryOp<ulong>) const;
template __host__ CUDART_DEVICE bool CuMatrix<int>::all<almostEqUnaryOp>(almostEqUnaryOp<int>) const;
template __host__ CUDART_DEVICE bool CuMatrix<unsigned int>::all<almostEqUnaryOp>(almostEqUnaryOp<unsigned int>) const;
template __host__ CUDART_DEVICE bool CuMatrix<long>::all<almostEqUnaryOp>(almostEqUnaryOp<long>) const;
#else
template __host__ CUDART_DEVICE bool CuMatrix<float>::all<1>(UnaryOpF<float,1>) const;
template __host__ CUDART_DEVICE bool CuMatrix<double>::all<1>(UnaryOpF<double,1>) const;
template __host__ CUDART_DEVICE bool CuMatrix<long>::all<1>(UnaryOpF<long,1>) const;
template __host__ CUDART_DEVICE bool CuMatrix<ulong>::all<1>(UnaryOpF<ulong,1>) const;
template __host__ CUDART_DEVICE bool CuMatrix<float>::all<2>(UnaryOpF<float,2>) const;
template __host__ CUDART_DEVICE bool CuMatrix<double>::all<2>(UnaryOpF<double,2>) const;
template __host__ CUDART_DEVICE bool CuMatrix<int>::all<2>(UnaryOpF<int,2>) const;
template __host__ CUDART_DEVICE bool CuMatrix<uint>::all<2>(UnaryOpF<uint,2>) const;
template __host__ CUDART_DEVICE bool CuMatrix<long>::all<2>(UnaryOpF<long,2>) const;
template __host__ CUDART_DEVICE bool CuMatrix<ulong>::all<2>(UnaryOpF<ulong,2>) const;
#endif
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class BoolUnaryOp> __host__ CUDART_DEVICE bool CuMatrix<T>::any(BoolUnaryOp<T> op) const
{
return gloloReduce(op, orBinaryOp<T>(), false);
}
#else
template<typename T> template <int StateDim> __host__ CUDART_DEVICE bool CuMatrix<T>::any(UnaryOpF<T,StateDim> op) const
{
return gloloReduce(op, Functory<T,orBinaryOp>::pinch(), false);
}
#endif
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE bool CuMatrix<float>::any<almostEqUnaryOp>(almostEqUnaryOp<float>) const;
template __host__ CUDART_DEVICE bool CuMatrix<double>::any<almostEqUnaryOp>(almostEqUnaryOp<double>) const;
template __host__ CUDART_DEVICE bool CuMatrix<ulong>::any<almostEqUnaryOp>(almostEqUnaryOp<ulong>) const;
template __host__ CUDART_DEVICE bool CuMatrix<float>::any<ltUnaryOp>(ltUnaryOp<float>) const;
template __host__ CUDART_DEVICE bool CuMatrix<double>::any<ltUnaryOp>(ltUnaryOp<double>) const;
template __host__ CUDART_DEVICE bool CuMatrix<ulong>::any<ltUnaryOp>(ltUnaryOp<ulong>) const;
#else
template __host__ CUDART_DEVICE bool CuMatrix<float>::any<1>(UnaryOpF<float,1>) const;
template __host__ CUDART_DEVICE bool CuMatrix<double>::any<1>(UnaryOpF<double,1>) const;
template __host__ CUDART_DEVICE bool CuMatrix<ulong>::any<1>(UnaryOpF<ulong,1>) const;
template __host__ CUDART_DEVICE bool CuMatrix<float>::any<2>(UnaryOpF<float,2>) const;
template __host__ CUDART_DEVICE bool CuMatrix<double>::any<2>(UnaryOpF<double,2>) const;
#endif
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class BoolUnaryOp> __host__ CUDART_DEVICE bool CuMatrix<T>::none( BoolUnaryOp<T> fn) const
#else
template<typename T> template <int StateDim> __host__ CUDART_DEVICE bool CuMatrix<T>::none( UnaryOpF<T,StateDim> fn) const
#endif
{
return !any(fn);
}
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE bool CuMatrix<float>::none<almostEqUnaryOp>(almostEqUnaryOp<float>) const;
template __host__ CUDART_DEVICE bool CuMatrix<double>::none<almostEqUnaryOp>(almostEqUnaryOp<double>) const;
template __host__ CUDART_DEVICE bool CuMatrix<ulong>::none<almostEqUnaryOp>(almostEqUnaryOp<ulong>) const;
template __host__ CUDART_DEVICE bool CuMatrix<float>::none<ltUnaryOp>(ltUnaryOp<float>) const;
template __host__ CUDART_DEVICE bool CuMatrix<double>::none<ltUnaryOp>(ltUnaryOp<double>) const;
template __host__ CUDART_DEVICE bool CuMatrix<ulong>::none<ltUnaryOp>(ltUnaryOp<ulong>) const;
template __host__ CUDART_DEVICE bool CuMatrix<float>::none<gtUnaryOp>(gtUnaryOp<float>) const;
template __host__ CUDART_DEVICE bool CuMatrix<double>::none<gtUnaryOp>(gtUnaryOp<double>) const;
template __host__ CUDART_DEVICE bool CuMatrix<ulong>::none<gtUnaryOp>(gtUnaryOp<ulong>) const;
#else
template __host__ CUDART_DEVICE bool CuMatrix<float>::none<1>(UnaryOpF<float,1>) const;
template __host__ CUDART_DEVICE bool CuMatrix<double>::none<1>(UnaryOpF<double,1>) const;
template __host__ CUDART_DEVICE bool CuMatrix<ulong>::none<1>(UnaryOpF<ulong,1>) const;
template __host__ CUDART_DEVICE bool CuMatrix<float>::none<2>(UnaryOpF<float,2>) const;
template __host__ CUDART_DEVICE bool CuMatrix<double>::none<2>(UnaryOpF<double,2>) const;
template __host__ CUDART_DEVICE bool CuMatrix<ulong>::none<2>(UnaryOpF<ulong,2>) const;
#endif
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class BoolUnaryOp> __host__ CUDART_DEVICE long CuMatrix<T>::count(BoolUnaryOp<T> fn) const
{
return gloloReduce(fn, plusBinaryOp<T>(), 0);
}
#else
template<typename T> template <int StateDim> __host__ CUDART_DEVICE long CuMatrix<T>::count( UnaryOpF<T,StateDim> fn) const
{
return gloloReduce(fn, Functory<T, plusBinaryOp>::pinch(), 0);
}
#endif
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE long CuMatrix<float>::count<almostEqUnaryOp>(almostEqUnaryOp<float>) const;
template __host__ CUDART_DEVICE long CuMatrix<double>::count<almostEqUnaryOp>(almostEqUnaryOp<double>) const;
template __host__ CUDART_DEVICE long CuMatrix<ulong>::count<almostEqUnaryOp>(almostEqUnaryOp<ulong>) const;
template __host__ CUDART_DEVICE long CuMatrix<float>::count<notAlmostEqUnaryOp>(notAlmostEqUnaryOp<float>) const;
template __host__ CUDART_DEVICE long CuMatrix<double>::count<notAlmostEqUnaryOp>(notAlmostEqUnaryOp<double>) const;
template __host__ CUDART_DEVICE long CuMatrix<ulong>::count<notAlmostEqUnaryOp>(notAlmostEqUnaryOp<ulong>) const;
template __host__ CUDART_DEVICE long CuMatrix<float>::count<neqUnaryOp>(neqUnaryOp<float>) const;
template __host__ CUDART_DEVICE long CuMatrix<double>::count<neqUnaryOp>(neqUnaryOp<double>) const;
template __host__ CUDART_DEVICE long CuMatrix<ulong>::count<neqUnaryOp>(neqUnaryOp<ulong>) const;
template __host__ CUDART_DEVICE long CuMatrix<float>::count<ltUnaryOp>(ltUnaryOp<float>) const;
template __host__ CUDART_DEVICE long CuMatrix<double>::count<ltUnaryOp>(ltUnaryOp<double>) const;
template __host__ CUDART_DEVICE long CuMatrix<ulong>::count<ltUnaryOp>(ltUnaryOp<ulong>) const;
#else
template __host__ CUDART_DEVICE long CuMatrix<float>::count<1>(UnaryOpF<float,1>) const;
template __host__ CUDART_DEVICE long CuMatrix<double>::count<1>(UnaryOpF<double,1>) const;
template __host__ CUDART_DEVICE long CuMatrix<ulong>::count<1>(UnaryOpF<ulong,1>) const;
template __host__ CUDART_DEVICE long CuMatrix<float>::count<2>(UnaryOpF<float,2>) const;
template __host__ CUDART_DEVICE long CuMatrix<double>::count<2>(UnaryOpF<double,2>) const;
template __host__ CUDART_DEVICE long CuMatrix<ulong>::count<2>(UnaryOpF<ulong,2>) const;
#endif
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class BoolUnaryOp> __host__ CUDART_DEVICE
IndexArray CuMatrix<T>::find(BoolUnaryOp<T> fn) const
#else
template<typename T> template <int StateDim> __host__ CUDART_DEVICE IndexArray CuMatrix<T>::find( UnaryOpF<T,StateDim> fn) const
#endif
{
CuMatrix<T> m = unaryOp(fn);
uint len = m.size/sizeof(T);
int arraySize = 10;
if(checkDebug(debugUnaryOp)) prlocf("creating intial idx array\n");
uint* arry, *temp;
#ifdef __CUDA_ARCH__
cherr(cudaMalloc(&arry, arraySize * sizeof(uint)));
#else
checkCudaError(cudaHostAlloc(&arry, arraySize * sizeof(uint),0));
#endif
int currIdx = 0;
for(int i =0; i < len; i++ ) {
if(m.get(i)) {
flprintf("adding idx %d\n", i);
arry[currIdx++] = i;
if(currIdx == arraySize) {
arraySize *= 2;
#ifdef __CUDA_ARCH__
cherr(cudaMalloc(&temp, arraySize * sizeof(uint)));
cherr(cudaMemcpyAsync(temp, arry, (currIdx -1) * sizeof(uint), cudaMemcpyDeviceToDevice));
cherr(cudaDeviceSynchronize());
cherr(cudaFree(arry));
arry = temp;
#else
checkCudaError(cudaHostAlloc(&temp, arraySize * sizeof(uint),0));
cherr(cudaMemcpy(temp, arry, (currIdx -1) * sizeof(uint), cudaMemcpyHostToHost));
if(checkDebug(debugDestr))flprintf("freeing host arry %p\n", arry);
cherr(cudaFreeHost(arry));
arry = temp;
#endif
}
} else {
if(checkDebug(debugUnaryOp)) flprintf("skipping idx %d\n", i);
}
}
if(currIdx < arraySize) {
if(checkDebug(debugUnaryOp)) flprintf("shrinking idx array from %d to %d\n", arraySize, currIdx);
}
//arry = (uint*) realloc(arry, arraySize);
#ifdef __CUDA_ARCH__
cherr(cudaMalloc(&temp, arraySize * sizeof(uint)));
cherr(cudaMemcpyAsync(temp, arry, (currIdx -1)* sizeof(uint), cudaMemcpyDeviceToDevice));
cherr(cudaDeviceSynchronize());
cherr(cudaFree(arry));
arry = temp;
#else
checkCudaError(cudaHostAlloc(&temp, arraySize * sizeof(uint),0));
cherr(cudaMemcpy(temp, arry, (currIdx -1) * sizeof(uint), cudaMemcpyHostToHost));
if(checkDebug(debugDestr))flprintf("freeing host arry %p\n", arry);
cherr(cudaFreeHost(arry));
arry = temp;
#endif
return IndexArray(arry, currIdx);
}
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE IndexArray CuMatrix<float>::find<ltUnaryOp>(ltUnaryOp<float>) const;
template __host__ CUDART_DEVICE IndexArray CuMatrix<double>::find<ltUnaryOp>(ltUnaryOp<double>) const;
#else
template __host__ CUDART_DEVICE IndexArray CuMatrix<float>::find<1>(UnaryOpF<float,1>) const;
template __host__ CUDART_DEVICE IndexArray CuMatrix<double>::find<1>(UnaryOpF<double,1>) const;
#endif
#ifdef CuMatrix_Enable_KTS
template<typename T> template<template <typename> class BoolUnaryOp> __host__ CUDART_DEVICE
void CuMatrix<T>::findFirstN(IndexArray arry, BoolUnaryOp<T> op) const
#else
template<typename T> template <int StateDim> __host__ CUDART_DEVICE void CuMatrix<T>::findFirstN( IndexArray arry, UnaryOpF<T,StateDim> op) const
#endif
{
CuMatrix<T> m = unaryOp(op);
m.syncBuffers();
uint len = m.size/sizeof(T);
int currIdx = 0;
for(int i =0; i < len; i++ ) {
if(i == len -1 ){
if(checkDebug(debugUnaryOp)) flprintf("lastIdx %d (+tiler.currBuffer() = %p)\n", i, i + tiler.currBuffer());
}
if(m.elements[i]) {
if(checkDebug(debugUnaryOp)) flprintf("adding idx %d\n", i);
if(currIdx < arry.count) {
arry.indices[currIdx++] = i;
} else {
if(checkDebug(debugUnaryOp)) prlocf("exceeded capacity of indexarry; stopping\n");
return;
}
} else {
// if(checkDebug(debugUnaryOp)) flprintf("skipping idx %d\n", i);
}
}
}
#ifdef CuMatrix_Enable_KTS
template __host__ CUDART_DEVICE void CuMatrix<float>::findFirstN<ltUnaryOp>(IndexArray , ltUnaryOp<float>) const;
template __host__ CUDART_DEVICE void CuMatrix<double>::findFirstN<ltUnaryOp>(IndexArray, ltUnaryOp<double>) const;
template __host__ CUDART_DEVICE void CuMatrix<float>::findFirstN<gtUnaryOp>(IndexArray , gtUnaryOp<float>) const;
template __host__ CUDART_DEVICE void CuMatrix<double>::findFirstN<gtUnaryOp>(IndexArray, gtUnaryOp<double>) const;
template __host__ CUDART_DEVICE void CuMatrix<float>::findFirstN<notAlmostEqUnaryOp>(IndexArray , notAlmostEqUnaryOp<float>) const;
template __host__ CUDART_DEVICE void CuMatrix<double>::findFirstN<notAlmostEqUnaryOp>(IndexArray, notAlmostEqUnaryOp<double>) const;
template __host__ CUDART_DEVICE void CuMatrix<float>::findFirstN<neqUnaryOp>(IndexArray , neqUnaryOp<float>) const;
template __host__ CUDART_DEVICE void CuMatrix<double>::findFirstN<neqUnaryOp>(IndexArray, neqUnaryOp<double>) const;
template __host__ CUDART_DEVICE void CuMatrix<unsigned long>::findFirstN<ltUnaryOp>(IndexArray, ltUnaryOp<unsigned long>) const;
#else
template __host__ CUDART_DEVICE void CuMatrix<float>::findFirstN<1>(IndexArray , UnaryOpF<float,1>) const;
template __host__ CUDART_DEVICE void CuMatrix<ulong>::findFirstN<1>(IndexArray , UnaryOpF<ulong,1>) const;
template __host__ CUDART_DEVICE void CuMatrix<double>::findFirstN<1>(IndexArray, UnaryOpF<double,1>) const;
template __host__ CUDART_DEVICE void CuMatrix<float>::findFirstN<2>(IndexArray , UnaryOpF<float,2>) const;
template __host__ CUDART_DEVICE void CuMatrix<double>::findFirstN<2>(IndexArray, UnaryOpF<double,2>) const;
template __host__ CUDART_DEVICE void CuMatrix<ulong>::findFirstN<2>(IndexArray, UnaryOpF<ulong,2>) const;
#endif
#include "CuMatrixInster.cu"
|
1847fdfb1ee43f377dd6dfe706a8d674c4423bcf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream> //cout
#include<stdio.h> //printf
#include<string.h> //strlen
#include<string> //string
#include<cstring>
#include<sys/socket.h> //socket
#include<arpa/inet.h> //inet_addr
#include<netdb.h> //hostent
#include<stdlib.h>
#include "allConstant.h"
#include <unistd.h>
#include <thread>
#include <chrono>
#include "safeQueue.h"
#include "quartic.h"
#include <map>
#include <sstream>
#include <iterator>
#include <unordered_set>
#include <vector>
#include <math.h>
#include <future>
#include <complex>
#include <algorithm>
#include "Point.h"
#include "CycleTrial.h"
#include "HyperTrial.h"
#include "generalFunction.h"
#include "userClass.h"
#include "EllipseTrial.h"
#include <experimental/filesystem>
namespace fs = std::experimental::filesystem;
__global__ void goOver3(int n, ellipseTrial *data, float *area,int m){
int index = threadIdx.x+blockIdx.x*blockDim.x;
int stride=blockDim.x*gridDim.x;
for(int k=index;k<n;k+=stride){
float x = data[k].c3X;
float y = data[k].c3Y;
float r = data[k].h3 * data[k].avgD2;
float a = data[k].ah * data[k].avgD1;
float h1x = data[k].c1X;
float h1y = data[k].c1Y;
float h2x = data[k].c2X;
float h2y = data[k].c2Y;
float rr = r*r;
float a2 = (data[k].ah-1.0)*data[k].avgD1;
float total =0.0;
for(int l = 0; l<m;){
float i = area[l++];
float j = area[l++];
float tt = sqrtf((i-h1x)*(i-h1x)+(j-h1y)*(j-h1y))+sqrtf((i-h2x)*(i-h2x)+(j-h2y)*(j-h2y));
float di = x-i;
float dj = y-j;
if(di * di + dj * dj <= rr && tt <= 2*a && tt >= 2* a2) total+=1.0;
}
float rate3 = data[k].rate3;
data[k].grAr=rate3*total;
data[k].acAr = total;
}
}
__global__ void bestEllipse(int n, ellipseTrial *data, float *area,int m){
int index = threadIdx.x+blockIdx.x*blockDim.x;
int stride=blockDim.x*gridDim.x;
for(int k=index;k<n;k+=stride){
float x = data[k].c3X;
float y = data[k].c3Y;
float r = data[k].h3 * data[k].avgD2;
float a = data[k].ah * data[k].avgD1;
float h1x = data[k].c1X;
float h1y = data[k].c1Y;
float h2x = data[k].c2X;
float h2y = data[k].c2Y;
float rr = r*r;
float a2 = (data[k].ah-1.0)*data[k].avgD1;
float total =0.0;
for(int l = 0; l<m;){
float i = area[l++];
float j = area[l++];
float tt = sqrtf((i-h1x)*(i-h1x)+(j-h1y)*(j-h1y))+sqrtf((i-h2x)*(i-h2x)+(j-h2y)*(j-h2y));
float di = x-i;
float dj = y-j;
if(di * di + dj * dj <= rr && tt <= 2*a && tt >= 2* a2) total+=1.0;
}
float rate = total / data[k].tArea;
float rate3 = rate*rate*rate;
if ( rate3<1.1){
data[k].grAr=rate3*total;
data[k].acAr = total;
data[k].rate3 = rate3;
}
}
}
__global__ void goOver2(int n, hyperTrial *data, float *area,int m){
int index = threadIdx.x+blockIdx.x*blockDim.x;
int stride=blockDim.x*gridDim.x;
for(int k=index;k<n;k+=stride){
float x = data[k].c3X;
float y = data[k].c3Y;
float r = data[k].h3 * data[k].avgD2;
float a = data[k].ah * data[k].avgD1;
float h1x = data[k].c1X;
float h1y = data[k].c1Y;
float h2x = data[k].c2X;
float h2y = data[k].c2Y;
float rr = r*r;
float a2 = (data[k].ah-1.0)*data[k].avgD1;
float total =0.0;
for(int l = 0; l<m;){
float i = area[l++];
float j = area[l++];
float tt = sqrtf((i-h1x)*(i-h1x)+(j-h1y)*(j-h1y))-sqrtf((i-h2x)*(i-h2x)+(j-h2y)*(j-h2y));
float di = x-i;
float dj = y-j;
if(di * di + dj * dj <= rr && tt <= 2*a && tt >= 2* a2) total+=1.0;
}
float rate3 = data[k].rate3;
data[k].grAr=rate3*total;
data[k].acAr = total;
}
}
__global__ void bestHyper(int n, hyperTrial *data, float *area,int m){
int index = threadIdx.x+blockIdx.x*blockDim.x;
int stride=blockDim.x*gridDim.x;
for(int k=index;k<n;k+=stride){
float x = data[k].c3X;
float y = data[k].c3Y;
float r = data[k].h3 * data[k].avgD2;
float a = data[k].ah * data[k].avgD1;
float h1x = data[k].c1X;
float h1y = data[k].c1Y;
float h2x = data[k].c2X;
float h2y = data[k].c2Y;
float rr = r*r;
float a2 = (data[k].ah-1.0)*data[k].avgD1;
float total =0.0;
for(int l = 0; l<m;){
float i = area[l++];
float j = area[l++];
float tt = sqrtf((i-h1x)*(i-h1x)+(j-h1y)*(j-h1y))-sqrtf((i-h2x)*(i-h2x)+(j-h2y)*(j-h2y));
float di = x-i;
float dj = y-j;
if(di * di + dj * dj <= rr && tt <= 2*a && tt >= 2* a2) total+=1.0;
}
float rate = total / (data[k].tArea +0.1);
float rate3 = rate*rate*rate;
if ( rate3<1.2){
data[k].grAr=rate3*total;
data[k].acAr = total;
data[k].rate3 = rate3;
}
}
}
__global__ void bestTwoCycle(int n, twoCycleTrial *data, float *area,int m){
int index = threadIdx.x+blockIdx.x*blockDim.x;
int stride=blockDim.x*gridDim.x;
for(int k=index;k<n;k+=stride){
float x1=data[k].c1X;
float y1=data[k].c1Y;
float x2=data[k].c2X;
float y2=data[k].c2Y;
float r1=data[k].h1*data[k].d;
float r2=data[k].h2*data[k].d;
float r3=r1-data[k].d;
float rr3=r3*r3;
float rr1=r1*r1;
float rr2=r2*r2;
float total=0.0;
for(int l=0;l<m;){
float i=area[l++];
float j=area[l++];
float di1=x1-i;
float dj1=y1-j;
float di2=x2-i;
float dj2=y2-j;
if (di1*di1+dj1*dj1<=rr1 && di1*di1+dj1*dj1>rr3 && di2*di2+dj2*dj2<=rr2)
total+=1.0;
}
float rate=total / (data[k].tArea+0.1);
float rate3=rate*rate*rate;
if (rate3<1.1){
data[k].grAr=rate3*total ;
data[k].acAr=total;
data[k].rate3=rate3;
}
}
}
__global__ void goOver1(int n, twoCycleTrial *data, float *area,int m){
int index = threadIdx.x+blockIdx.x*blockDim.x;
int stride=blockDim.x*gridDim.x;
for(int k=index;k<n;k+=stride){
float x1=data[k].c1X;
float y1=data[k].c1Y;
float x2=data[k].c2X;
float y2=data[k].c2Y;
float r1=data[k].h1*data[k].d;
float r2=data[k].h2*data[k].d;
float r3=r1-data[k].d;
float rr3=r3*r3;
float rr1=r1*r1;
float rr2=r2*r2;
float total=0.0;
for(int l=0;l<m;){
float i=area[l++];
float j=area[l++];
float di1=x1-i;
float dj1=y1-j;
float di2=x2-i;
float dj2=y2-j;
if (di1*di1+dj1*dj1<=rr1 && di1*di1+dj1*dj1>rr3 && di2*di2+dj2*dj2<=rr2)
total+=1.0;
}
float rate3=data[k].rate3;
data[k].grAr=rate3*total ;
data[k].acAr=total;
}
}
string getRoutingMSG_without_ellipse(User & curUser,vector<twoCycleTrial> & cycleTrials,vector<hyperTrial> & hyperTrials)
{
string res = "";
float *TAS,*d_TAS;
int tasSize=curUser.TAS.size();
TAS=(float*)malloc(sizeof(float)*tasSize*2);
hipMalloc((void**)&d_TAS, sizeof(float) *tasSize*2);
int counter=0;
for(string t:curUser.TAS)
{
stringstream tt(t);
float x,y;
tt>>x;
tt>>y;
TAS[counter++]=x;
TAS[counter++]=y;
}
hipMemcpy(d_TAS, TAS, sizeof(float) *tasSize*2, hipMemcpyHostToDevice);
//cout<<"I'm OK Here"<<endl;
counter=0;
twoCycleTrial *cTri;
twoCycleTrial *d_cTri;
if(!cycleTrials.empty())
{
cTri=(twoCycleTrial*)malloc(sizeof(twoCycleTrial)*cycleTrials.size());
//cout<<"I'm OK after here"<<endl;
for(twoCycleTrial ct: cycleTrials)
{
cTri[counter++]=ct;
}
hipMalloc((void**)&d_cTri, sizeof(twoCycleTrial) *cycleTrials.size());
hipMemcpy(d_cTri,cTri,sizeof(twoCycleTrial) *cycleTrials.size(),hipMemcpyHostToDevice);
cout<<"finish copy totoal trial: "<<cycleTrials.size()<<endl;
hipLaunchKernelGGL(( bestTwoCycle), dim3(2048),dim3(256), 0, 0, cycleTrials.size(),d_cTri,d_TAS,tasSize*2);
//hipFree(d_cTri);
}
//cout<<"number of hyperTrial: "<<hyperTrials.size()<<endl;
hyperTrial *hTri;
hyperTrial *d_hTri;
int counter2=0;
if(!hyperTrials.empty())
{
hTri=(hyperTrial*)malloc(sizeof(hyperTrial)*hyperTrials.size());
//cout<<"I'm OK after here"<<endl;
for(hyperTrial ht: hyperTrials)
{
hTri[counter2++]=ht;
}
hipMalloc((void**)&d_hTri, sizeof(hyperTrial) *hyperTrials.size());
hipMemcpy(d_hTri,hTri,sizeof(hyperTrial) *hyperTrials.size(),hipMemcpyHostToDevice);
cout<<"finish copy totoal trial: "<<hyperTrials.size()<<endl;
cout<<"********TAS size is: "<<tasSize<<"******"<<endl;
hipLaunchKernelGGL(( bestHyper), dim3(2048),dim3(256), 0, 0, hyperTrials.size(),d_hTri,d_TAS,tasSize*2);
//hipDeviceSynchronize();
//hipFree(d_hTri);
}
if(cycleTrials.empty() || hyperTrials.empty())return "No result";
hipDeviceSynchronize();
hipMemcpy(cTri,d_cTri,sizeof(twoCycleTrial) *cycleTrials.size(),hipMemcpyDeviceToHost);
hipMemcpy(hTri,d_hTri,sizeof(hyperTrial) *hyperTrials.size(),hipMemcpyDeviceToHost);
cout<<counter<<" ||||||| "<<counter2<<" |||||||||| "<<tasSize<<endl;
res+=findBestTry(cTri,hTri, counter, counter2, curUser.TAS);
sort(cTri, cTri+counter, sortCycleTrial);
sort(hTri, hTri+counter2, sortHyperTrial);
counter = 1000000 < counter ? 1000000 : counter;
counter2 = 5000000 < counter2 ? 5000000 : counter2;
//hipMalloc((void**)&d_cTri, sizeof(twoCycleTrial) *counter);
hipMemcpy(d_cTri,cTri,sizeof(twoCycleTrial) *counter, hipMemcpyHostToDevice);
//hipMalloc((void**)&d_hTri, sizeof(hyperTrial) *counter2);
hipMemcpy(d_hTri,hTri,sizeof(hyperTrial) *counter2, hipMemcpyHostToDevice);
int newSize = 0;
do
{
newSize=curUser.TAS.size();
cout<<counter<<" ||||||| "<<counter2<<" |||||||||| "<<newSize<<endl;
int tasInd=0;
for(string t:curUser.TAS)
{
stringstream tt(t);
float x,y;
tt>>x;
tt>>y;
TAS[tasInd++]=x;
TAS[tasInd++]=y;
}
//hipFree(d_TAS);
//hipMalloc((void**)&d_TAS, sizeof(float) *newSize*2);
hipMemcpy(d_TAS, TAS, sizeof(float) *newSize*2, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( goOver1), dim3(2048),dim3(256), 0, 0, counter, d_cTri, d_TAS, newSize*2);
hipLaunchKernelGGL(( goOver2), dim3(2048),dim3(256), 0, 0, counter2, d_hTri, d_TAS, newSize*2);
hipDeviceSynchronize();
//free(cTri);
//free(hTri);
//cTri=(twoCycleTrial*)malloc(sizeof(twoCycleTrial)*counter);
//hTri=(hyperTrial*)malloc(sizeof(hyperTrial)*counter2);
hipMemcpy(cTri,d_cTri,sizeof(twoCycleTrial) *counter,hipMemcpyDeviceToHost);
hipMemcpy(hTri,d_hTri,sizeof(hyperTrial) *counter2,hipMemcpyDeviceToHost);
res+=findBestTry(cTri,hTri, counter, counter2, curUser.TAS);
}while(newSize>0.1*tasSize);
hipFree(d_cTri);
hipFree(d_hTri);
hipFree(d_TAS);
free(TAS);
free(cTri);
free(hTri);
return res;
}
string getRoutingMSG_with_ellipse(User & curUser,vector<ellipseTrial> ellipseTrials,vector<hyperTrial> & hyperTrials)
{
string res="";
float *TAS,*d_TAS;
int tasSize=curUser.TAS.size();
TAS=(float*)malloc(sizeof(float)*tasSize*2);
hipMalloc((void**)&d_TAS, sizeof(float) *tasSize*2);
int counter=0;
for(string t:curUser.TAS)
{
stringstream tt(t);
float x,y;
tt>>x;
tt>>y;
TAS[counter++]=x;
TAS[counter++]=y;
}
hipMemcpy(d_TAS, TAS, sizeof(float) *tasSize*2, hipMemcpyHostToDevice);
counter=0;
ellipseTrial *eTri;
ellipseTrial *d_eTri;
if(!ellipseTrials.empty())
{
eTri=(ellipseTrial*)malloc(sizeof(ellipseTrial)*ellipseTrials.size());
//cout<<"I'm OK after here"<<endl;
for(ellipseTrial et: ellipseTrials)
{
eTri[counter++]=et;
}
hipMalloc((void**)&d_eTri, sizeof(ellipseTrial) * ellipseTrials.size());
hipMemcpy(d_eTri,eTri,sizeof(ellipseTrial) * ellipseTrials.size(),hipMemcpyHostToDevice);
cout<<"finish copy totoal trial: "<<ellipseTrials.size()<<endl;
hipLaunchKernelGGL(( bestEllipse), dim3(2048),dim3(256), 0, 0, ellipseTrials.size(),d_eTri,d_TAS,tasSize*2);
}
hipDeviceSynchronize();
hipMemcpy(eTri,d_eTri,sizeof(ellipseTrial) *ellipseTrials.size(),hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipFree(d_eTri);
hyperTrial *hTri;
hyperTrial *d_hTri;
int counter2=0;
if(!hyperTrials.empty())
{
hTri=(hyperTrial*)malloc(sizeof(hyperTrial)*hyperTrials.size());
//cout<<"I'm OK after here"<<endl;
for(hyperTrial ht: hyperTrials)
{
hTri[counter2++]=ht;
}
hipMalloc((void**)&d_hTri, sizeof(hyperTrial) *hyperTrials.size());
hipMemcpy(d_hTri,hTri,sizeof(hyperTrial) *hyperTrials.size(),hipMemcpyHostToDevice);
cout<<"finish copy totoal trial: "<<hyperTrials.size()<<endl;
cout<<"********TAS size is: "<<tasSize<<"******"<<endl;
hipLaunchKernelGGL(( bestHyper), dim3(2048),dim3(256), 0, 0, hyperTrials.size(),d_hTri,d_TAS,tasSize*2);
//hipDeviceSynchronize();
//hipFree(d_hTri);
}
if(ellipseTrials.empty() || hyperTrials.empty())return "No result";
hipDeviceSynchronize();
hipMemcpy(hTri,d_hTri,sizeof(hyperTrial) *hyperTrials.size(),hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipFree(d_hTri);
cout<<counter<<" ||||||| "<<counter2<<" |||||||||| "<<tasSize<<endl;
res+=findBestTry2(eTri,hTri, counter, counter2, curUser.TAS);
sort(eTri, eTri+counter, sortEllipseTrial);
sort(hTri, hTri+counter2, sortHyperTrial);
counter = 5000000 < counter ? 5000000 : counter;
counter2 = 5000000 < counter2 ? 5000000 : counter2;
hipMalloc((void**)&d_eTri, sizeof(ellipseTrial) *counter);
hipMemcpy(d_eTri,eTri,sizeof(ellipseTrial) *counter, hipMemcpyHostToDevice);
hipMalloc((void**)&d_hTri, sizeof(hyperTrial) *counter2);
hipMemcpy(d_hTri,hTri,sizeof(hyperTrial) *counter2, hipMemcpyHostToDevice);
cout<<"copy to device again"<<endl;
int newSize = curUser.TAS.size();
cout<<"new TAS SIZE is: "<<newSize<<"; old TAS size is"<<tasSize<<endl;
hipDeviceSynchronize();
while(newSize>0.15*tasSize)
{
newSize=curUser.TAS.size();
cout<<counter<<" ||||||| "<<counter2<<" |||||||||| "<<newSize<<endl;
int tasInd=0;
for(string t:curUser.TAS)
{
stringstream tt(t);
float x,y;
tt>>x;
tt>>y;
TAS[tasInd++]=x;
TAS[tasInd++]=y;
}
hipFree(d_TAS);
hipMalloc((void**)&d_TAS, sizeof(float) *newSize*2);
hipMemcpy(d_TAS, TAS, sizeof(float) *newSize*2, hipMemcpyHostToDevice);
hipDeviceSynchronize();
hipLaunchKernelGGL(( goOver3), dim3(2048),dim3(256), 0, 0, counter, d_eTri, d_TAS, newSize*2);
hipDeviceSynchronize();
hipDeviceSynchronize();
hipLaunchKernelGGL(( goOver2), dim3(2048),dim3(256), 0, 0, counter2, d_hTri, d_TAS, newSize*2);
hipDeviceSynchronize();
hipMemcpy(eTri,d_eTri,sizeof(ellipseTrial) *counter,hipMemcpyDeviceToHost);
hipMemcpy(hTri,d_hTri,sizeof(hyperTrial) *counter2,hipMemcpyDeviceToHost);
hipDeviceSynchronize();
//free(cTri);
//free(hTri);
//cTri=(twoCycleTrial*)malloc(sizeof(twoCycleTrial)*counter);
//hTri=(hyperTrial*)malloc(sizeof(hyperTrial)*counter2);
res+=findBestTry2(eTri,hTri, counter, counter2, curUser.TAS);
}
hipDeviceSynchronize();
hipFree(d_eTri);
hipFree(d_hTri);
hipFree(d_TAS);
free(TAS);
free(eTri);
free(hTri);
//cout<<"sending back result!!!!!!!!!!!!!!!"<<endl;
return res;
}
void writeToFile(string msg, int sn, int id, int mode,string network)
{
string filename = "../results/"+network+"_d/"+ to_string(sn)+"_"+to_string(id)+"_";
if(mode==0)filename+="withoutEllipse";
else filename+="withEllipse";
ofstream resFile;
resFile.open(filename);
resFile<<msg;
resFile.close();
}
void writeTrainToFile(string msg, int sn, int id,string train )
{
string filename = "./dataExpect/"+to_string(sn)+"/preTrain_"+to_string(id);
ofstream resFile;
resFile.open(filename);
resFile<<train;
resFile<<msg;
resFile.close();
cout<<"done save to file"<<endl;
}
string getRoutingMSG_train(User & curUser, vector<hyperTrial> & hyperTrials)
{
string res = "";
float *TAS,*d_TAS;
int tasSize=curUser.TAS.size();
TAS=(float*)malloc(sizeof(float)*tasSize*2);
hipMalloc((void**)&d_TAS, sizeof(float) *tasSize*2);
int counter=0;
for(string t:curUser.TAS)
{
stringstream tt(t);
float x,y;
tt>>x;
tt>>y;
TAS[counter++]=x;
TAS[counter++]=y;
}
hipMemcpy(d_TAS, TAS, sizeof(float) *tasSize*2, hipMemcpyHostToDevice);
hyperTrial *hTri;
hyperTrial *d_hTri;
int counter2=0;
if(!hyperTrials.empty())
{
hTri=(hyperTrial*)malloc(sizeof(hyperTrial)*hyperTrials.size());
//cout<<"I'm OK after here"<<endl;
for(hyperTrial ht: hyperTrials)
{
hTri[counter2++]=ht;
}
hipMalloc((void**)&d_hTri, sizeof(hyperTrial) *hyperTrials.size());
hipMemcpy(d_hTri,hTri,sizeof(hyperTrial) *hyperTrials.size(),hipMemcpyHostToDevice);
cout<<"finish copy totoal trial: "<<hyperTrials.size()<<endl;
cout<<"********TAS size is: "<<tasSize<<"******"<<endl;
hipLaunchKernelGGL(( bestHyper), dim3(2048),dim3(256), 0, 0, hyperTrials.size(),d_hTri,d_TAS,tasSize*2);
}
if( hyperTrials.empty())return "No result";
hipDeviceSynchronize();
hipMemcpy(hTri,d_hTri,sizeof(hyperTrial) *hyperTrials.size(),hipMemcpyDeviceToHost);
cout<<counter<<" ||||||| "<<counter2<<" |||||||||| "<<tasSize<<endl;
res+=findBestTrain(hTri, counter2, curUser.TAS);
/*
sort(hTri, hTri+counter2, sortHyperTrial);
counter2 = 5000000 < counter2 ? 5000000 : counter2;
hipMemcpy(d_hTri,hTri,sizeof(hyperTrial) *counter2, hipMemcpyHostToDevice);
int newSize = 0;
do
{
newSize=curUser.TAS.size();
if(newSize < 0.1*tasSize){
cout<<"new TAS size is: "<<newSize<<endl;
break;
}
cout<<counter<<" ||||||| "<<counter2<<" |||||||||| "<<newSize<<endl;
int tasInd=0;
for(string t:curUser.TAS)
{
stringstream tt(t);
float x,y;
tt>>x;
tt>>y;
TAS[tasInd++]=x;
TAS[tasInd++]=y;
}
hipMemcpy(d_TAS, TAS, sizeof(float) *newSize*2, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( goOver2), dim3(2048),dim3(256), 0, 0, counter2, d_hTri, d_TAS, newSize*2);
hipDeviceSynchronize();
hipMemcpy(hTri,d_hTri,sizeof(hyperTrial) *counter2,hipMemcpyDeviceToHost);
res+=findBestTrain(hTri, counter2, curUser.TAS);
}while(newSize>0.1*tasSize);
*/
hipFree(d_hTri);
hipFree(d_TAS);
free(TAS);
free(hTri);
hipDeviceSynchronize();
return res;
}
int main(int argc , char *argv[])
{
int numConfig = 8;
if (argc < numConfig){
printf("wrong command \n");
return 0;
}
int COL=atoi(argv[1]);
int anchors_num = atoi(argv[4]);
int start=atoi(argv[2]);
int end = atoi(argv[3]);
hipSetDevice(1);
int argStart = 5;
vector<int> anchors;
for(; argStart<5+anchors_num;argStart++){
int cur = atoi(argv[argStart]);
float x = (cur-1)%COL;
float y = (cur-1)/COL;
//cout<<"X:"<<x<<" Y:"<<y<<endl;
anchors.push_back(x);
anchors.push_back(y);
}
cout<<"argStart: "<<argStart<<endl;
User curUser;
short hv[anchorSize][anchorSize];
curUser.trainingExample(anchors);
curUser.getTrainInfo(hv,3);
vector<hyperTrial> hyperTrials=curUser.findTrainHyperTrial(hv);
cout<<"number of hyperTrial: "<<hyperTrials.size()<<endl;
for(int cs=start;cs<end;cs++){
for(int fileIndex = 1; fileIndex<100000; fileIndex++){
string fname = "./train/"+to_string(cs)+"/TAS_"+to_string(fileIndex);
string filename = "./dataExpect/"+to_string(cs)+"/preTrain_"+to_string(fileIndex);
if((!fs::exists(filename)) && fs::exists(fname)){
ifstream network;
network.open(fname);
curUser.clearTAS();
int cur;
string values = "";
int TAS_SIZE=0;
while(network>>cur)
{
TAS_SIZE++;
int x = (cur-1)%COL;
int y = (cur-1)/COL;
values += curUser.addToTAS(x,y);
values += "\n";
}
string TAS_STRING = to_string(TAS_SIZE)+"\n";
TAS_STRING+=values;
network.close();
string res = getRoutingMSG_train(curUser,hyperTrials);
cout<<"cs: "<<cs<<"fileIndex: "<<fileIndex<<endl;
writeTrainToFile(res, cs, fileIndex,TAS_STRING);
}
else{
cout<<fname<<" not exists"<<endl;
}
}
}
printf("\n");
/*
srand(time(NULL));
User curUser;
int mode;
while(1){
cout << "type in the mode: "<<endl;
cout << "1: create random network"<<endl;
cout << "2: find route msg for specific random network"<<endl;
cin >> mode;
if(mode ==1){
int nn, an , rr;
cout<< "Please type in number of nodes"<<endl;
cin >> nn;
cout<< "please type in maximum number of anchor nodes"<<endl;
cin >> an;
cout<< "please type in the radio range"<<endl;
cin >> rr;
string nid = curUser.randomNetwork(nn,an,rr);
cout<< "created random network: "<<nid<<endl;
}
else if(mode == 2){
string network;
cout<< "Please type in the network ID"<<endl;
cin >> network;
curUser.loadNetwork(network);
short hv[nodeSize][anchorSize];
curUser.getHopInfo(hv);
vector<twoCycleTrial> cycleTrials=curUser.findTwoCycleTrial(hv);
cout<<"number of cycleTrials: "<<cycleTrials.size()<<endl;
vector<ellipseTrial> ellipseTrials=curUser.findEllipseTrial(hv);
cout<<"number of ellipseTrials: "<<ellipseTrials.size()<<endl;
vector<hyperTrial> hyperTrials=curUser.findHyperTrial(hv);
cout<<"number of hyperTrial: "<<hyperTrials.size()<<endl;
for(int i=100; i>95; i--)
{
for(int j=0; j<5; j++)
{
curUser.updateStroke(i,j,50);
cout<<"curUser TAS: "<<curUser.TAS.size()<<endl;
string res = getRoutingMSG_without_ellipse(curUser,cycleTrials,hyperTrials);
writeToFile(res,i,j,0,network);
cout<<i<<"_"<<j<<" result without ellipse: "<<res<<endl;
//string res2 = getRoutingMSG_with_ellipse(curUser,ellipseTrials,hyperTrials);
//writeToFile(res2,i,j,1,network);
//cout<<i<<"_"<<j<<" result with ellipse: "<< res2<<endl;
}
}
}
else{
cout<<" please type 1 or 2"<<endl;
}
}
*/
return 0;
}
| 1847fdfb1ee43f377dd6dfe706a8d674c4423bcf.cu | #include<iostream> //cout
#include<stdio.h> //printf
#include<string.h> //strlen
#include<string> //string
#include<cstring>
#include<sys/socket.h> //socket
#include<arpa/inet.h> //inet_addr
#include<netdb.h> //hostent
#include<stdlib.h>
#include "allConstant.h"
#include <unistd.h>
#include <thread>
#include <chrono>
#include "safeQueue.h"
#include "quartic.h"
#include <map>
#include <sstream>
#include <iterator>
#include <unordered_set>
#include <vector>
#include <math.h>
#include <future>
#include <complex>
#include <algorithm>
#include "Point.h"
#include "CycleTrial.h"
#include "HyperTrial.h"
#include "generalFunction.h"
#include "userClass.h"
#include "EllipseTrial.h"
#include <experimental/filesystem>
namespace fs = std::experimental::filesystem;
__global__ void goOver3(int n, ellipseTrial *data, float *area,int m){
int index = threadIdx.x+blockIdx.x*blockDim.x;
int stride=blockDim.x*gridDim.x;
for(int k=index;k<n;k+=stride){
float x = data[k].c3X;
float y = data[k].c3Y;
float r = data[k].h3 * data[k].avgD2;
float a = data[k].ah * data[k].avgD1;
float h1x = data[k].c1X;
float h1y = data[k].c1Y;
float h2x = data[k].c2X;
float h2y = data[k].c2Y;
float rr = r*r;
float a2 = (data[k].ah-1.0)*data[k].avgD1;
float total =0.0;
for(int l = 0; l<m;){
float i = area[l++];
float j = area[l++];
float tt = sqrtf((i-h1x)*(i-h1x)+(j-h1y)*(j-h1y))+sqrtf((i-h2x)*(i-h2x)+(j-h2y)*(j-h2y));
float di = x-i;
float dj = y-j;
if(di * di + dj * dj <= rr && tt <= 2*a && tt >= 2* a2) total+=1.0;
}
float rate3 = data[k].rate3;
data[k].grAr=rate3*total;
data[k].acAr = total;
}
}
__global__ void bestEllipse(int n, ellipseTrial *data, float *area,int m){
int index = threadIdx.x+blockIdx.x*blockDim.x;
int stride=blockDim.x*gridDim.x;
for(int k=index;k<n;k+=stride){
float x = data[k].c3X;
float y = data[k].c3Y;
float r = data[k].h3 * data[k].avgD2;
float a = data[k].ah * data[k].avgD1;
float h1x = data[k].c1X;
float h1y = data[k].c1Y;
float h2x = data[k].c2X;
float h2y = data[k].c2Y;
float rr = r*r;
float a2 = (data[k].ah-1.0)*data[k].avgD1;
float total =0.0;
for(int l = 0; l<m;){
float i = area[l++];
float j = area[l++];
float tt = sqrtf((i-h1x)*(i-h1x)+(j-h1y)*(j-h1y))+sqrtf((i-h2x)*(i-h2x)+(j-h2y)*(j-h2y));
float di = x-i;
float dj = y-j;
if(di * di + dj * dj <= rr && tt <= 2*a && tt >= 2* a2) total+=1.0;
}
float rate = total / data[k].tArea;
float rate3 = rate*rate*rate;
if ( rate3<1.1){
data[k].grAr=rate3*total;
data[k].acAr = total;
data[k].rate3 = rate3;
}
}
}
__global__ void goOver2(int n, hyperTrial *data, float *area,int m){
int index = threadIdx.x+blockIdx.x*blockDim.x;
int stride=blockDim.x*gridDim.x;
for(int k=index;k<n;k+=stride){
float x = data[k].c3X;
float y = data[k].c3Y;
float r = data[k].h3 * data[k].avgD2;
float a = data[k].ah * data[k].avgD1;
float h1x = data[k].c1X;
float h1y = data[k].c1Y;
float h2x = data[k].c2X;
float h2y = data[k].c2Y;
float rr = r*r;
float a2 = (data[k].ah-1.0)*data[k].avgD1;
float total =0.0;
for(int l = 0; l<m;){
float i = area[l++];
float j = area[l++];
float tt = sqrtf((i-h1x)*(i-h1x)+(j-h1y)*(j-h1y))-sqrtf((i-h2x)*(i-h2x)+(j-h2y)*(j-h2y));
float di = x-i;
float dj = y-j;
if(di * di + dj * dj <= rr && tt <= 2*a && tt >= 2* a2) total+=1.0;
}
float rate3 = data[k].rate3;
data[k].grAr=rate3*total;
data[k].acAr = total;
}
}
__global__ void bestHyper(int n, hyperTrial *data, float *area,int m){
int index = threadIdx.x+blockIdx.x*blockDim.x;
int stride=blockDim.x*gridDim.x;
for(int k=index;k<n;k+=stride){
float x = data[k].c3X;
float y = data[k].c3Y;
float r = data[k].h3 * data[k].avgD2;
float a = data[k].ah * data[k].avgD1;
float h1x = data[k].c1X;
float h1y = data[k].c1Y;
float h2x = data[k].c2X;
float h2y = data[k].c2Y;
float rr = r*r;
float a2 = (data[k].ah-1.0)*data[k].avgD1;
float total =0.0;
for(int l = 0; l<m;){
float i = area[l++];
float j = area[l++];
float tt = sqrtf((i-h1x)*(i-h1x)+(j-h1y)*(j-h1y))-sqrtf((i-h2x)*(i-h2x)+(j-h2y)*(j-h2y));
float di = x-i;
float dj = y-j;
if(di * di + dj * dj <= rr && tt <= 2*a && tt >= 2* a2) total+=1.0;
}
float rate = total / (data[k].tArea +0.1);
float rate3 = rate*rate*rate;
if ( rate3<1.2){
data[k].grAr=rate3*total;
data[k].acAr = total;
data[k].rate3 = rate3;
}
}
}
__global__ void bestTwoCycle(int n, twoCycleTrial *data, float *area,int m){
int index = threadIdx.x+blockIdx.x*blockDim.x;
int stride=blockDim.x*gridDim.x;
for(int k=index;k<n;k+=stride){
float x1=data[k].c1X;
float y1=data[k].c1Y;
float x2=data[k].c2X;
float y2=data[k].c2Y;
float r1=data[k].h1*data[k].d;
float r2=data[k].h2*data[k].d;
float r3=r1-data[k].d;
float rr3=r3*r3;
float rr1=r1*r1;
float rr2=r2*r2;
float total=0.0;
for(int l=0;l<m;){
float i=area[l++];
float j=area[l++];
float di1=x1-i;
float dj1=y1-j;
float di2=x2-i;
float dj2=y2-j;
if (di1*di1+dj1*dj1<=rr1 && di1*di1+dj1*dj1>rr3 && di2*di2+dj2*dj2<=rr2)
total+=1.0;
}
float rate=total / (data[k].tArea+0.1);
float rate3=rate*rate*rate;
if (rate3<1.1){
data[k].grAr=rate3*total ;
data[k].acAr=total;
data[k].rate3=rate3;
}
}
}
__global__ void goOver1(int n, twoCycleTrial *data, float *area,int m){
int index = threadIdx.x+blockIdx.x*blockDim.x;
int stride=blockDim.x*gridDim.x;
for(int k=index;k<n;k+=stride){
float x1=data[k].c1X;
float y1=data[k].c1Y;
float x2=data[k].c2X;
float y2=data[k].c2Y;
float r1=data[k].h1*data[k].d;
float r2=data[k].h2*data[k].d;
float r3=r1-data[k].d;
float rr3=r3*r3;
float rr1=r1*r1;
float rr2=r2*r2;
float total=0.0;
for(int l=0;l<m;){
float i=area[l++];
float j=area[l++];
float di1=x1-i;
float dj1=y1-j;
float di2=x2-i;
float dj2=y2-j;
if (di1*di1+dj1*dj1<=rr1 && di1*di1+dj1*dj1>rr3 && di2*di2+dj2*dj2<=rr2)
total+=1.0;
}
float rate3=data[k].rate3;
data[k].grAr=rate3*total ;
data[k].acAr=total;
}
}
string getRoutingMSG_without_ellipse(User & curUser,vector<twoCycleTrial> & cycleTrials,vector<hyperTrial> & hyperTrials)
{
string res = "";
float *TAS,*d_TAS;
int tasSize=curUser.TAS.size();
TAS=(float*)malloc(sizeof(float)*tasSize*2);
cudaMalloc((void**)&d_TAS, sizeof(float) *tasSize*2);
int counter=0;
for(string t:curUser.TAS)
{
stringstream tt(t);
float x,y;
tt>>x;
tt>>y;
TAS[counter++]=x;
TAS[counter++]=y;
}
cudaMemcpy(d_TAS, TAS, sizeof(float) *tasSize*2, cudaMemcpyHostToDevice);
//cout<<"I'm OK Here"<<endl;
counter=0;
twoCycleTrial *cTri;
twoCycleTrial *d_cTri;
if(!cycleTrials.empty())
{
cTri=(twoCycleTrial*)malloc(sizeof(twoCycleTrial)*cycleTrials.size());
//cout<<"I'm OK after here"<<endl;
for(twoCycleTrial ct: cycleTrials)
{
cTri[counter++]=ct;
}
cudaMalloc((void**)&d_cTri, sizeof(twoCycleTrial) *cycleTrials.size());
cudaMemcpy(d_cTri,cTri,sizeof(twoCycleTrial) *cycleTrials.size(),cudaMemcpyHostToDevice);
cout<<"finish copy totoal trial: "<<cycleTrials.size()<<endl;
bestTwoCycle<<<2048,256>>>(cycleTrials.size(),d_cTri,d_TAS,tasSize*2);
//cudaFree(d_cTri);
}
//cout<<"number of hyperTrial: "<<hyperTrials.size()<<endl;
hyperTrial *hTri;
hyperTrial *d_hTri;
int counter2=0;
if(!hyperTrials.empty())
{
hTri=(hyperTrial*)malloc(sizeof(hyperTrial)*hyperTrials.size());
//cout<<"I'm OK after here"<<endl;
for(hyperTrial ht: hyperTrials)
{
hTri[counter2++]=ht;
}
cudaMalloc((void**)&d_hTri, sizeof(hyperTrial) *hyperTrials.size());
cudaMemcpy(d_hTri,hTri,sizeof(hyperTrial) *hyperTrials.size(),cudaMemcpyHostToDevice);
cout<<"finish copy totoal trial: "<<hyperTrials.size()<<endl;
cout<<"********TAS size is: "<<tasSize<<"******"<<endl;
bestHyper<<<2048,256>>>(hyperTrials.size(),d_hTri,d_TAS,tasSize*2);
//cudaDeviceSynchronize();
//cudaFree(d_hTri);
}
if(cycleTrials.empty() || hyperTrials.empty())return "No result";
cudaDeviceSynchronize();
cudaMemcpy(cTri,d_cTri,sizeof(twoCycleTrial) *cycleTrials.size(),cudaMemcpyDeviceToHost);
cudaMemcpy(hTri,d_hTri,sizeof(hyperTrial) *hyperTrials.size(),cudaMemcpyDeviceToHost);
cout<<counter<<" ||||||| "<<counter2<<" |||||||||| "<<tasSize<<endl;
res+=findBestTry(cTri,hTri, counter, counter2, curUser.TAS);
sort(cTri, cTri+counter, sortCycleTrial);
sort(hTri, hTri+counter2, sortHyperTrial);
counter = 1000000 < counter ? 1000000 : counter;
counter2 = 5000000 < counter2 ? 5000000 : counter2;
//cudaMalloc((void**)&d_cTri, sizeof(twoCycleTrial) *counter);
cudaMemcpy(d_cTri,cTri,sizeof(twoCycleTrial) *counter, cudaMemcpyHostToDevice);
//cudaMalloc((void**)&d_hTri, sizeof(hyperTrial) *counter2);
cudaMemcpy(d_hTri,hTri,sizeof(hyperTrial) *counter2, cudaMemcpyHostToDevice);
int newSize = 0;
do
{
newSize=curUser.TAS.size();
cout<<counter<<" ||||||| "<<counter2<<" |||||||||| "<<newSize<<endl;
int tasInd=0;
for(string t:curUser.TAS)
{
stringstream tt(t);
float x,y;
tt>>x;
tt>>y;
TAS[tasInd++]=x;
TAS[tasInd++]=y;
}
//cudaFree(d_TAS);
//cudaMalloc((void**)&d_TAS, sizeof(float) *newSize*2);
cudaMemcpy(d_TAS, TAS, sizeof(float) *newSize*2, cudaMemcpyHostToDevice);
goOver1<<<2048,256>>>(counter, d_cTri, d_TAS, newSize*2);
goOver2<<<2048,256>>>(counter2, d_hTri, d_TAS, newSize*2);
cudaDeviceSynchronize();
//free(cTri);
//free(hTri);
//cTri=(twoCycleTrial*)malloc(sizeof(twoCycleTrial)*counter);
//hTri=(hyperTrial*)malloc(sizeof(hyperTrial)*counter2);
cudaMemcpy(cTri,d_cTri,sizeof(twoCycleTrial) *counter,cudaMemcpyDeviceToHost);
cudaMemcpy(hTri,d_hTri,sizeof(hyperTrial) *counter2,cudaMemcpyDeviceToHost);
res+=findBestTry(cTri,hTri, counter, counter2, curUser.TAS);
}while(newSize>0.1*tasSize);
cudaFree(d_cTri);
cudaFree(d_hTri);
cudaFree(d_TAS);
free(TAS);
free(cTri);
free(hTri);
return res;
}
string getRoutingMSG_with_ellipse(User & curUser,vector<ellipseTrial> ellipseTrials,vector<hyperTrial> & hyperTrials)
{
string res="";
float *TAS,*d_TAS;
int tasSize=curUser.TAS.size();
TAS=(float*)malloc(sizeof(float)*tasSize*2);
cudaMalloc((void**)&d_TAS, sizeof(float) *tasSize*2);
int counter=0;
for(string t:curUser.TAS)
{
stringstream tt(t);
float x,y;
tt>>x;
tt>>y;
TAS[counter++]=x;
TAS[counter++]=y;
}
cudaMemcpy(d_TAS, TAS, sizeof(float) *tasSize*2, cudaMemcpyHostToDevice);
counter=0;
ellipseTrial *eTri;
ellipseTrial *d_eTri;
if(!ellipseTrials.empty())
{
eTri=(ellipseTrial*)malloc(sizeof(ellipseTrial)*ellipseTrials.size());
//cout<<"I'm OK after here"<<endl;
for(ellipseTrial et: ellipseTrials)
{
eTri[counter++]=et;
}
cudaMalloc((void**)&d_eTri, sizeof(ellipseTrial) * ellipseTrials.size());
cudaMemcpy(d_eTri,eTri,sizeof(ellipseTrial) * ellipseTrials.size(),cudaMemcpyHostToDevice);
cout<<"finish copy totoal trial: "<<ellipseTrials.size()<<endl;
bestEllipse<<<2048,256>>>(ellipseTrials.size(),d_eTri,d_TAS,tasSize*2);
}
cudaDeviceSynchronize();
cudaMemcpy(eTri,d_eTri,sizeof(ellipseTrial) *ellipseTrials.size(),cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaFree(d_eTri);
hyperTrial *hTri;
hyperTrial *d_hTri;
int counter2=0;
if(!hyperTrials.empty())
{
hTri=(hyperTrial*)malloc(sizeof(hyperTrial)*hyperTrials.size());
//cout<<"I'm OK after here"<<endl;
for(hyperTrial ht: hyperTrials)
{
hTri[counter2++]=ht;
}
cudaMalloc((void**)&d_hTri, sizeof(hyperTrial) *hyperTrials.size());
cudaMemcpy(d_hTri,hTri,sizeof(hyperTrial) *hyperTrials.size(),cudaMemcpyHostToDevice);
cout<<"finish copy totoal trial: "<<hyperTrials.size()<<endl;
cout<<"********TAS size is: "<<tasSize<<"******"<<endl;
bestHyper<<<2048,256>>>(hyperTrials.size(),d_hTri,d_TAS,tasSize*2);
//cudaDeviceSynchronize();
//cudaFree(d_hTri);
}
if(ellipseTrials.empty() || hyperTrials.empty())return "No result";
cudaDeviceSynchronize();
cudaMemcpy(hTri,d_hTri,sizeof(hyperTrial) *hyperTrials.size(),cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaFree(d_hTri);
cout<<counter<<" ||||||| "<<counter2<<" |||||||||| "<<tasSize<<endl;
res+=findBestTry2(eTri,hTri, counter, counter2, curUser.TAS);
sort(eTri, eTri+counter, sortEllipseTrial);
sort(hTri, hTri+counter2, sortHyperTrial);
counter = 5000000 < counter ? 5000000 : counter;
counter2 = 5000000 < counter2 ? 5000000 : counter2;
cudaMalloc((void**)&d_eTri, sizeof(ellipseTrial) *counter);
cudaMemcpy(d_eTri,eTri,sizeof(ellipseTrial) *counter, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_hTri, sizeof(hyperTrial) *counter2);
cudaMemcpy(d_hTri,hTri,sizeof(hyperTrial) *counter2, cudaMemcpyHostToDevice);
cout<<"copy to device again"<<endl;
int newSize = curUser.TAS.size();
cout<<"new TAS SIZE is: "<<newSize<<"; old TAS size is"<<tasSize<<endl;
cudaDeviceSynchronize();
while(newSize>0.15*tasSize)
{
newSize=curUser.TAS.size();
cout<<counter<<" ||||||| "<<counter2<<" |||||||||| "<<newSize<<endl;
int tasInd=0;
for(string t:curUser.TAS)
{
stringstream tt(t);
float x,y;
tt>>x;
tt>>y;
TAS[tasInd++]=x;
TAS[tasInd++]=y;
}
cudaFree(d_TAS);
cudaMalloc((void**)&d_TAS, sizeof(float) *newSize*2);
cudaMemcpy(d_TAS, TAS, sizeof(float) *newSize*2, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
goOver3<<<2048,256>>>(counter, d_eTri, d_TAS, newSize*2);
cudaDeviceSynchronize();
cudaDeviceSynchronize();
goOver2<<<2048,256>>>(counter2, d_hTri, d_TAS, newSize*2);
cudaDeviceSynchronize();
cudaMemcpy(eTri,d_eTri,sizeof(ellipseTrial) *counter,cudaMemcpyDeviceToHost);
cudaMemcpy(hTri,d_hTri,sizeof(hyperTrial) *counter2,cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
//free(cTri);
//free(hTri);
//cTri=(twoCycleTrial*)malloc(sizeof(twoCycleTrial)*counter);
//hTri=(hyperTrial*)malloc(sizeof(hyperTrial)*counter2);
res+=findBestTry2(eTri,hTri, counter, counter2, curUser.TAS);
}
cudaDeviceSynchronize();
cudaFree(d_eTri);
cudaFree(d_hTri);
cudaFree(d_TAS);
free(TAS);
free(eTri);
free(hTri);
//cout<<"sending back result!!!!!!!!!!!!!!!"<<endl;
return res;
}
void writeToFile(string msg, int sn, int id, int mode,string network)
{
string filename = "../results/"+network+"_d/"+ to_string(sn)+"_"+to_string(id)+"_";
if(mode==0)filename+="withoutEllipse";
else filename+="withEllipse";
ofstream resFile;
resFile.open(filename);
resFile<<msg;
resFile.close();
}
void writeTrainToFile(string msg, int sn, int id,string train )
{
string filename = "./dataExpect/"+to_string(sn)+"/preTrain_"+to_string(id);
ofstream resFile;
resFile.open(filename);
resFile<<train;
resFile<<msg;
resFile.close();
cout<<"done save to file"<<endl;
}
string getRoutingMSG_train(User & curUser, vector<hyperTrial> & hyperTrials)
{
string res = "";
float *TAS,*d_TAS;
int tasSize=curUser.TAS.size();
TAS=(float*)malloc(sizeof(float)*tasSize*2);
cudaMalloc((void**)&d_TAS, sizeof(float) *tasSize*2);
int counter=0;
for(string t:curUser.TAS)
{
stringstream tt(t);
float x,y;
tt>>x;
tt>>y;
TAS[counter++]=x;
TAS[counter++]=y;
}
cudaMemcpy(d_TAS, TAS, sizeof(float) *tasSize*2, cudaMemcpyHostToDevice);
hyperTrial *hTri;
hyperTrial *d_hTri;
int counter2=0;
if(!hyperTrials.empty())
{
hTri=(hyperTrial*)malloc(sizeof(hyperTrial)*hyperTrials.size());
//cout<<"I'm OK after here"<<endl;
for(hyperTrial ht: hyperTrials)
{
hTri[counter2++]=ht;
}
cudaMalloc((void**)&d_hTri, sizeof(hyperTrial) *hyperTrials.size());
cudaMemcpy(d_hTri,hTri,sizeof(hyperTrial) *hyperTrials.size(),cudaMemcpyHostToDevice);
cout<<"finish copy totoal trial: "<<hyperTrials.size()<<endl;
cout<<"********TAS size is: "<<tasSize<<"******"<<endl;
bestHyper<<<2048,256>>>(hyperTrials.size(),d_hTri,d_TAS,tasSize*2);
}
if( hyperTrials.empty())return "No result";
cudaDeviceSynchronize();
cudaMemcpy(hTri,d_hTri,sizeof(hyperTrial) *hyperTrials.size(),cudaMemcpyDeviceToHost);
cout<<counter<<" ||||||| "<<counter2<<" |||||||||| "<<tasSize<<endl;
res+=findBestTrain(hTri, counter2, curUser.TAS);
/*
sort(hTri, hTri+counter2, sortHyperTrial);
counter2 = 5000000 < counter2 ? 5000000 : counter2;
cudaMemcpy(d_hTri,hTri,sizeof(hyperTrial) *counter2, cudaMemcpyHostToDevice);
int newSize = 0;
do
{
newSize=curUser.TAS.size();
if(newSize < 0.1*tasSize){
cout<<"new TAS size is: "<<newSize<<endl;
break;
}
cout<<counter<<" ||||||| "<<counter2<<" |||||||||| "<<newSize<<endl;
int tasInd=0;
for(string t:curUser.TAS)
{
stringstream tt(t);
float x,y;
tt>>x;
tt>>y;
TAS[tasInd++]=x;
TAS[tasInd++]=y;
}
cudaMemcpy(d_TAS, TAS, sizeof(float) *newSize*2, cudaMemcpyHostToDevice);
goOver2<<<2048,256>>>(counter2, d_hTri, d_TAS, newSize*2);
cudaDeviceSynchronize();
cudaMemcpy(hTri,d_hTri,sizeof(hyperTrial) *counter2,cudaMemcpyDeviceToHost);
res+=findBestTrain(hTri, counter2, curUser.TAS);
}while(newSize>0.1*tasSize);
*/
cudaFree(d_hTri);
cudaFree(d_TAS);
free(TAS);
free(hTri);
cudaDeviceSynchronize();
return res;
}
int main(int argc , char *argv[])
{
int numConfig = 8;
if (argc < numConfig){
printf("wrong command \n");
return 0;
}
int COL=atoi(argv[1]);
int anchors_num = atoi(argv[4]);
int start=atoi(argv[2]);
int end = atoi(argv[3]);
cudaSetDevice(1);
int argStart = 5;
vector<int> anchors;
for(; argStart<5+anchors_num;argStart++){
int cur = atoi(argv[argStart]);
float x = (cur-1)%COL;
float y = (cur-1)/COL;
//cout<<"X:"<<x<<" Y:"<<y<<endl;
anchors.push_back(x);
anchors.push_back(y);
}
cout<<"argStart: "<<argStart<<endl;
User curUser;
short hv[anchorSize][anchorSize];
curUser.trainingExample(anchors);
curUser.getTrainInfo(hv,3);
vector<hyperTrial> hyperTrials=curUser.findTrainHyperTrial(hv);
cout<<"number of hyperTrial: "<<hyperTrials.size()<<endl;
for(int cs=start;cs<end;cs++){
for(int fileIndex = 1; fileIndex<100000; fileIndex++){
string fname = "./train/"+to_string(cs)+"/TAS_"+to_string(fileIndex);
string filename = "./dataExpect/"+to_string(cs)+"/preTrain_"+to_string(fileIndex);
if((!fs::exists(filename)) && fs::exists(fname)){
ifstream network;
network.open(fname);
curUser.clearTAS();
int cur;
string values = "";
int TAS_SIZE=0;
while(network>>cur)
{
TAS_SIZE++;
int x = (cur-1)%COL;
int y = (cur-1)/COL;
values += curUser.addToTAS(x,y);
values += "\n";
}
string TAS_STRING = to_string(TAS_SIZE)+"\n";
TAS_STRING+=values;
network.close();
string res = getRoutingMSG_train(curUser,hyperTrials);
cout<<"cs: "<<cs<<"fileIndex: "<<fileIndex<<endl;
writeTrainToFile(res, cs, fileIndex,TAS_STRING);
}
else{
cout<<fname<<" not exists"<<endl;
}
}
}
printf("\n");
/*
srand(time(NULL));
User curUser;
int mode;
while(1){
cout << "type in the mode: "<<endl;
cout << "1: create random network"<<endl;
cout << "2: find route msg for specific random network"<<endl;
cin >> mode;
if(mode ==1){
int nn, an , rr;
cout<< "Please type in number of nodes"<<endl;
cin >> nn;
cout<< "please type in maximum number of anchor nodes"<<endl;
cin >> an;
cout<< "please type in the radio range"<<endl;
cin >> rr;
string nid = curUser.randomNetwork(nn,an,rr);
cout<< "created random network: "<<nid<<endl;
}
else if(mode == 2){
string network;
cout<< "Please type in the network ID"<<endl;
cin >> network;
curUser.loadNetwork(network);
short hv[nodeSize][anchorSize];
curUser.getHopInfo(hv);
vector<twoCycleTrial> cycleTrials=curUser.findTwoCycleTrial(hv);
cout<<"number of cycleTrials: "<<cycleTrials.size()<<endl;
vector<ellipseTrial> ellipseTrials=curUser.findEllipseTrial(hv);
cout<<"number of ellipseTrials: "<<ellipseTrials.size()<<endl;
vector<hyperTrial> hyperTrials=curUser.findHyperTrial(hv);
cout<<"number of hyperTrial: "<<hyperTrials.size()<<endl;
for(int i=100; i>95; i--)
{
for(int j=0; j<5; j++)
{
curUser.updateStroke(i,j,50);
cout<<"curUser TAS: "<<curUser.TAS.size()<<endl;
string res = getRoutingMSG_without_ellipse(curUser,cycleTrials,hyperTrials);
writeToFile(res,i,j,0,network);
cout<<i<<"_"<<j<<" result without ellipse: "<<res<<endl;
//string res2 = getRoutingMSG_with_ellipse(curUser,ellipseTrials,hyperTrials);
//writeToFile(res2,i,j,1,network);
//cout<<i<<"_"<<j<<" result with ellipse: "<< res2<<endl;
}
}
}
else{
cout<<" please type 1 or 2"<<endl;
}
}
*/
return 0;
}
|
82fa813c0ef380bcf1964cfd21476b03a20e1b26.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu)
* Mobvoi Inc. (authors: Fangjun Kuang)
* Yiming Wang
*
* See LICENSE for clarification regarding multiple authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cmath>
#include <memory>
#include <vector>
#include "k2/csrc/array_ops.h"
#include "k2/csrc/cub.h"
#include "k2/csrc/macros.h"
#include "k2/csrc/math.h"
#include "k2/csrc/moderngpu_allocator.h"
#include "k2/csrc/ragged.h"
#include "k2/csrc/ragged_ops.h"
#include "k2/csrc/ragged_utils.h"
namespace {
/*
A helper function used in RaggedShape3;
if both first and second are non-NULL, it will check if the context of them
is compatible or not and return that context if compatible;
if one of them is NULL, returns the other one's context.
*/
static k2::ContextPtr GetContext(const k2::Array1<int32_t> *first,
const k2::Array1<int32_t> *second) {
K2_CHECK(first != nullptr || second != nullptr)
<< "At least one of first and second must be non-NULL";
if (first == nullptr)
return second->Context();
else if (second == nullptr)
return first->Context();
else
return k2::GetContext(*first, *second);
}
} // namespace
namespace k2 {
RaggedShape RandomRaggedShape(bool set_row_ids, int32_t min_num_axes,
int32_t max_num_axes, int32_t min_num_elements,
int32_t max_num_elements) {
ContextPtr c = GetCpuContext();
K2_CHECK(min_num_axes >= 2 && max_num_axes >= min_num_axes &&
min_num_elements >= 0 && max_num_elements >= min_num_elements);
int32_t num_axes = RandInt(min_num_axes, max_num_axes);
int32_t num_elements = RandIntGeometric(min_num_elements, max_num_elements);
bool done_repeats = false;
std::vector<RaggedShapeLayer> axes(num_axes - 1);
for (int32_t axis = num_axes - 2; axis >= 0; axis--) {
// this axis will have row_ids of length num_elements and
// row_splits of length to be determined.
int32_t cur_row_split = 0;
std::vector<int32_t> row_splits_vec;
std::vector<int32_t> row_ids_vec;
row_splits_vec.push_back(cur_row_split);
// The reason for "|| RandInt(0, 2) == 0)" is so that even if there
// are no elements we can still potentially generate empty row-splits.
while (cur_row_split < num_elements || RandInt(0, 2) == 0) {
int32_t split_size = RandIntGeometric(0, num_elements - cur_row_split);
cur_row_split += split_size;
// sometimes we have a bunch of empty rows in a row (this will test out
// more of the code), so here we generate a bunch of empty rows, but we
// just do this only once (that's why we declare `done_repeats` here).
if (split_size == 0 && RandInt(0, 30) == 0 && !done_repeats) {
int32_t num_repeats = RandIntGeometric(1, 128);
row_splits_vec.insert(row_splits_vec.end(), num_repeats, cur_row_split);
// don't need to set `row_ids_vec` as there's no element.
done_repeats = true;
}
row_splits_vec.push_back(cur_row_split);
if (set_row_ids) {
int32_t cur_row = static_cast<int32_t>(row_splits_vec.size()) - 2;
row_ids_vec.insert(row_ids_vec.end(), split_size, cur_row);
}
}
axes[axis].row_splits = Array1<int32_t>(c, row_splits_vec);
if (set_row_ids) axes[axis].row_ids = Array1<int32_t>(c, row_ids_vec);
axes[axis].cached_tot_size = num_elements;
num_elements = axes[axis].row_splits.Dim() - 1;
}
// RaggedShape(axes, true) will check the returned RaggedShape for
// consistency.
return RaggedShape(axes, true);
}
RaggedShape RaggedShape2(Array1<int32_t> *row_splits, Array1<int32_t> *row_ids,
int32_t cached_tot_size) {
NVTX_RANGE(K2_FUNC);
K2_CHECK(row_splits != nullptr || row_ids != nullptr)
<< "At least one of row_splits and row_ids must be defined";
ContextPtr ctx = ::GetContext(row_splits, row_ids);
if (cached_tot_size != -1) {
if (row_ids != nullptr) K2_CHECK_EQ(cached_tot_size, row_ids->Dim());
if (row_splits != nullptr) {
// may be slow as it may copy memory from device to host
K2_DCHECK_EQ(cached_tot_size, row_splits->Back())
<< "Bad row splits is: " << *row_splits;
}
}
std::vector<RaggedShapeLayer> axes(1);
if (row_splits != nullptr) {
axes[0].row_splits = *row_splits;
} else {
// we need to work out row_splits as we always require row_splits is not
// empty for RaggedShape. Note here we suppose the last element in row_ids
// is num_rows - 1, i.e. there're no empty rows after row `row_ids[-1]`.
int32_t num_rows = row_ids->Dim() == 0 ? 0 : row_ids->Back() + 1;
Array1<int32_t> row_splits_array(ctx, num_rows + 1);
RowIdsToRowSplits(*row_ids, &row_splits_array);
axes[0].row_splits = row_splits_array;
}
if (row_ids != nullptr) axes[0].row_ids = *row_ids;
if (cached_tot_size == -1) {
cached_tot_size =
row_ids != nullptr ? row_ids->Dim() : axes[0].row_splits.Back();
}
axes[0].cached_tot_size = cached_tot_size;
// note below line will check if row_splits and row_ids are valid and agree
// with each other.
return RaggedShape(axes);
}
RaggedShape ComposeRaggedShapes(const RaggedShape &a, const RaggedShape &b) {
NVTX_RANGE(K2_FUNC);
if (a.NumElements() != b.Dim0()) {
K2_LOG(FATAL) << "ComposeRaggedShapes: shape mismatch: " << a.NumElements()
<< " vs. " << b.Dim0();
}
K2_CHECK(IsCompatible(a, b));
const auto &a_axes = a.Layers();
const auto &b_axes = b.Layers();
std::size_t a_size = a_axes.size(), b_size = b_axes.size();
std::vector<RaggedShapeLayer> axes;
axes.reserve(a_size + b_size);
for (std::size_t i = 0; i < a_size; ++i) axes.emplace_back(a_axes[i]);
for (std::size_t i = 0; i < b_size; ++i) axes.emplace_back(b_axes[i]);
bool validate = false;
return RaggedShape(axes, validate);
}
RaggedShape ComposeRaggedShapes3(const RaggedShape &a, const RaggedShape &b,
const RaggedShape &c) {
NVTX_RANGE(K2_FUNC);
if (a.NumElements() != b.Dim0()) {
K2_LOG(FATAL) << "ComposeRaggedShapes: shape mismatch: " << a.NumElements()
<< " vs. " << b.Dim0();
}
if (b.NumElements() != c.Dim0()) {
K2_LOG(FATAL) << "ComposeRaggedShapes: shape mismatch: " << b.NumElements()
<< " vs. " << c.Dim0();
}
K2_CHECK(IsCompatible(a, b));
K2_CHECK(IsCompatible(b, c));
const auto &a_axes = a.Layers();
const auto &b_axes = b.Layers();
const auto &c_axes = c.Layers();
std::size_t a_size = a_axes.size(), b_size = b_axes.size(),
c_size = c_axes.size();
std::vector<RaggedShapeLayer> axes;
axes.reserve(a_size + b_size + c_size);
for (std::size_t i = 0; i < a_size; ++i) axes.emplace_back(a_axes[i]);
for (std::size_t i = 0; i < b_size; ++i) axes.emplace_back(b_axes[i]);
for (std::size_t i = 0; i < c_size; ++i) axes.emplace_back(c_axes[i]);
bool validate = false;
return RaggedShape(axes, validate);
}
RaggedShape RaggedShape3(Array1<int32_t> *row_splits1,
Array1<int32_t> *row_ids1, int32_t cached_tot_size1,
Array1<int32_t> *row_splits2,
Array1<int32_t> *row_ids2, int32_t cached_tot_size2) {
NVTX_RANGE(K2_FUNC);
RaggedShape shape1 = RaggedShape2(row_splits1, row_ids1, cached_tot_size1);
Array1<int32_t> temp_array;
if (row_splits2 == nullptr) {
K2_CHECK_NE(row_ids2, nullptr)
<< "Either row-splits or row-ids must be defined";
temp_array = Array1<int32_t>(row_ids2->Context(), shape1.NumElements() + 1);
row_splits2 = &temp_array;
RowIdsToRowSplits(*row_ids2, row_splits2);
}
return ComposeRaggedShapes(
shape1, RaggedShape2(row_splits2, row_ids2, cached_tot_size2));
}
RaggedShape RaggedShape4(Array1<int32_t> *row_splits1,
Array1<int32_t> *row_ids1, int32_t cached_tot_size1,
Array1<int32_t> *row_splits2,
Array1<int32_t> *row_ids2, int32_t cached_tot_size2,
Array1<int32_t> *row_splits3,
Array1<int32_t> *row_ids3, int32_t cached_tot_size3) {
NVTX_RANGE(K2_FUNC);
RaggedShape shape12 = RaggedShape3(row_splits1, row_ids1, cached_tot_size1,
row_splits2, row_ids2, cached_tot_size2);
Array1<int32_t> temp_array;
if (row_splits3 == nullptr) {
K2_CHECK_NE(row_ids3, nullptr)
<< "Either row-splits or row-ids must be defined";
temp_array =
Array1<int32_t>(row_ids3->Context(), shape12.NumElements() + 1);
row_splits3 = &temp_array;
RowIdsToRowSplits(*row_ids3, row_splits3);
}
return ComposeRaggedShapes(
shape12, RaggedShape2(row_splits3, row_ids3, cached_tot_size3));
}
RaggedShape RaggedShapeFromTotSizes(ContextPtr c, int32_t num_axes,
const int32_t *tot_sizes) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(num_axes, 2);
std::vector<RaggedShapeLayer> axes(num_axes - 1);
int32_t tot_size = 0;
for (int32_t axis = 1; axis < num_axes; ++axis) {
tot_size += tot_sizes[axis - 1] + 1 + tot_sizes[axis];
}
Array1<int32_t> buf(c, tot_size);
int32_t start = 0;
for (int32_t axis = 1; axis < num_axes; ++axis) {
axes[axis - 1].row_splits = buf.Arange(start,
start + tot_sizes[axis - 1] + 1);
start += tot_sizes[axis - 1] + 1;
axes[axis - 1].row_ids = buf.Arange(start, start + tot_sizes[axis]);
start += tot_sizes[axis];
axes[axis - 1].cached_tot_size = tot_sizes[axis];
}
// Not check here as we did not set the values of row_splits and row_ids
return RaggedShape(axes, false);
}
// See declaration in ragged.h for documentation of its purpose and interface.
RaggedShape Unsqueeze(const RaggedShape &src, int32_t axis) {
// If axis == 0, initial row_splits and row_ids will look like the following,
// if for example src.Dim0() was 5: [ 0 5 ], [ 0 0 0 0 0 ]. The other axes
// would be pushed forward.
//
// If 0 < axis <= src.NumAxes(), the inserted row_splits and row_ids would
// look like the following, if for instance the src.TotSize(axis) = 8:
// [ 0 1 2 3 4 5 6 7 8 ], [ 0 1 2 3 4 5 6 7 ].
//
// The reason why the code is different for axis == 0, is that in that case we
// are really making visible an "implicit" axis of the input `src`; we could
// call it axis 0 of the original RaggedShape. Imagine that "implicit" axis's
// row_splits and row_ids map respectively from an idx_minus1 -> idx0 and from
// an idx_0 to idx_minus1, where idx_minus1 is always 0 and 0 <= idx0 <
// Dim0().
NVTX_RANGE(K2_FUNC);
ContextPtr &c = src.Context();
K2_CHECK(axis >= 0 && axis <= src.NumAxes());
const std::vector<RaggedShapeLayer> &axes_in = src.Layers();
int32_t num_axes_in = src.NumAxes();
// Note: in RaggedShape, the vector of RaggedShapeLayer is of length
// num_axes - 1, so the output will have one more axis than the input.
std::vector<RaggedShapeLayer> axes_out(num_axes_in);
int32_t row_splits_dim, row_ids_dim;
Array1<int32_t> mem;
if (axis == 0) {
row_splits_dim = 2; // e.g. [ 0 5 ]
row_ids_dim = src.Dim0(); // e.g. [ 0 0 0 0 0 ]
mem = Array1<int32_t>(c, row_splits_dim + row_ids_dim);
int32_t *mem_data = mem.Data();
K2_EVAL(
c, mem.Dim(), lambda_set_mem, (int32_t i)->void {
if (i == 1)
mem_data[i] = row_ids_dim;
else
mem_data[i] = 0;
});
} else {
int32_t tot_size = src.TotSize(axis);
row_splits_dim = tot_size + 1;
row_ids_dim = tot_size;
mem = Array1<int32_t>(c, row_splits_dim + row_ids_dim);
int32_t *mem_data = mem.Data();
K2_EVAL(
c, mem.Dim(), lambda_set_mem2,
(int32_t i)->void { mem_data[i] = i % (tot_size + 1); });
}
axes_out[axis].row_splits = mem.Range(0, row_splits_dim);
axes_out[axis].row_ids = mem.Range(row_splits_dim, row_ids_dim);
axes_out[axis].cached_tot_size = row_ids_dim;
for (int32_t i = 0; i < axis; ++i) axes_out[i] = axes_in[i];
// Note: the returned array has `num_axes_in + 1` axes, so its
// array of RaggedShapeLayer is of length `num_axes_in`.
for (int32_t i = axis + 1; i < num_axes_in; ++i) axes_out[i] = axes_in[i - 1];
return RaggedShape(axes_out);
}
std::vector<RaggedShape> UnsqueezeParallel(int32_t num_srcs, RaggedShape **src,
int32_t axis) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(axis, 0);
std::vector<RaggedShape> ans;
if (num_srcs == 0) return ans;
ans.reserve(num_srcs);
ContextPtr &c = src[0]->Context();
std::vector<int32_t> all_row_splits_vec(num_srcs * 2);
int32_t max_dim = 0;
// all_row_splits_vec will contain [ 0 d0 0 d1 0 d2 .. ]
// where d0 == src[0]->Dim0(), d1 == src[1]->Dim0()..
for (int32_t i = 0; i < num_srcs; ++i) {
int32_t this_dim0 = src[i]->Dim0();
if (this_dim0 > max_dim) max_dim = this_dim0;
all_row_splits_vec[i * 2] = 0;
all_row_splits_vec[i * 2 + 1] = this_dim0;
}
Array1<int32_t> all_row_splits(c, all_row_splits_vec);
Array1<int32_t> all_row_ids(c, max_dim, 0);
for (int32_t i = 0; i < num_srcs; ++i) {
int32_t num_axes = src[i]->NumAxes();
std::vector<RaggedShapeLayer> axes;
axes.reserve(num_axes); // note, the size of the `layers` of a RaggedShape
// is its NumAxes() - 1.
axes.resize(1);
int32_t this_old_dim0 = all_row_splits_vec[i * 2 + 1];
axes[0].row_splits = all_row_splits.Range(i * 2, 2);
axes[0].row_ids = all_row_ids.Range(0, this_old_dim0);
axes[0].cached_tot_size = this_old_dim0;
axes.insert(axes.end(), src[i]->Layers().begin(), src[i]->Layers().end());
ans.emplace_back(std::move(axes));
}
return ans;
}
/*
Internal function used in Index(), which gets certain arrays used internally.
@param [in] src Source shape to be indexed
@param [in] new2old Array of indexes into axis 0 of src; elements
equal to -1 will be interpreted as referring to
an empty list.
@param [out] old_offsets Will be set to new Array2 with dimension
(src.NumAxes(), new2old.Dim()), whose (i,j)'th
element contains the offset into axis i of `src`
where the slice of `src` with index0 (i.e. index
into 0'th-axis of `src`) equal to `new2old[j]`
begins.
@param [out] new_offsets Will be set to new Array2 with dimension
(src.NumAxes(), new2old.Dim()+1), whose (i,j)'th
element contains the offset into axis i of `ans`
where the data in `ans` corresponding to
index j (i.e. index j into axis 0 of `ans`) begins.
Note: `ans` is the result of Index(), with
ans.Dim0() == new2old.Dim().
*/
inline void GetOldAndNewOffsets(RaggedShape &src,
const Array1<int32_t> &new2old,
Array2<int32_t> *old_offsets,
Array2<int32_t> *new_offsets) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(src.NumAxes(), 1);
ContextPtr &c = src.Context();
int32_t num_axes = src.NumAxes(), ans_dim0 = new2old.Dim();
// max 5 layers.
RowSplitsAccessor<5> row_splits_acc(src);
const int32_t *new2old_data = new2old.Data();
*old_offsets = Array2<int32_t>(c, num_axes, ans_dim0);
*new_offsets = Array2<int32_t>(c, num_axes, ans_dim0 + 1);
auto old_offsets_acc = old_offsets->Accessor(),
new_offsets_acc = new_offsets->Accessor();
// Set old_offsets; and for now, set new_offsets to the corresponding
// sizes of the output slices.
K2_EVAL(
c, ans_dim0, lambda_set_offsets, (int32_t i)->void {
// 0 <= i < ans_dim0
int32_t old_offset = new2old_data[i],
old_offset_next = old_offset + 1,
offset_diff = 1;
// The following is a special case that interprets -1 as referring to an
// empty list. In this case, old_offset == old_offset_next == 0.
// The specific value 0 is not necessary; they could be equal
// and have any value in [0, src.Dim0() - 1] and still refer to
// the empty list.
if (old_offset == -1)
old_offset = 0;
for (int32_t axis = 0;; axis++) {
old_offsets_acc(axis, i) = old_offset;
// Below, 'new_offsets_acc' currently contains the size rather
// than the offset; we need to do exclusive-sum.
new_offsets_acc(axis, i) = offset_diff;
if (axis + 1 == num_axes) return;
old_offset = row_splits_acc(axis)[old_offset];
old_offset_next = row_splits_acc(axis)[old_offset_next];
offset_diff = old_offset_next - old_offset;
}
});
ExclusiveSum(*new_offsets, new_offsets);
}
// Don't make it static to fix the following error on Windows.
// Error : On Windows, the enclosing parent function ("IndexAxis0") for an
// extended __host__ __device__ lambda cannot have internal or no linkage
/*static*/ RaggedShape IndexAxis0(RaggedShape &src,
const Array1<int32_t> &new2old,
Array1<int32_t> *elem_indexes /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
ContextPtr &c = src.Context();
K2_CHECK(IsCompatible(src, new2old));
int32_t num_axes = src.NumAxes(), src_dim0 = src.Dim0(),
ans_dim0 = new2old.Dim();
if (ans_dim0 == 0) {
if (elem_indexes) *elem_indexes = Array1<int32_t>(c, 0);
return EmptyRaggedShape(c, num_axes);
}
Array2<int32_t> old_offsets, // num_axes by ans_dim0
new_offsets; // num_axes by (ans_dim0 + 1).
GetOldAndNewOffsets(src, new2old, &old_offsets, &new_offsets);
// tot_sizes_out is of dimension (num_axes), tot_sizes_out[i] is
// ans.TotSize(i)
Array1<int32_t> tot_sizes_out =
Array1<int32_t>(new_offsets.Col(ans_dim0)).To(GetCpuContext());
int32_t *tot_sizes_out_cpu_data = tot_sizes_out.Data();
if (elem_indexes)
*elem_indexes = Array1<int32_t>(c, tot_sizes_out_cpu_data[num_axes - 1]);
RaggedShape ans =
RaggedShapeFromTotSizes(c, num_axes, tot_sizes_out_cpu_data);
auto old_offsets_acc = old_offsets.Accessor(),
new_offsets_acc = new_offsets.Accessor();
for (int32_t axis = 1; axis < num_axes; axis++) {
// we are not creating the actual row_ids here, except for axis 1; we are
// creating "composed row_ids" which map to the index on axis 0.
Array1<int32_t> row_ids = ans.RowIds(axis);
RowSplitsToRowIds(new_offsets.Row(axis), &row_ids);
}
ans.Layers()[0].row_splits = new_offsets.Row(1);
// Caution: e.g. old_row_splits_acc(i) == src.RowSplits(i+1).
RowSplitsAccessor<5> old_row_splits_acc(src),
new_row_splits_acc(ans);
RowIdsAccessor<5> old_row_ids_acc(src),
new_row_ids_acc(ans);
SmallVec<int32_t, 6> tot_sizes;
K2_CHECK(num_axes <= 6);
int32_t max_tot_size = 0;
for (int32_t i = 0; i < num_axes; i++) {
tot_sizes.data[i] = tot_sizes_out_cpu_data[i];
max_tot_size = std::max<int32_t>(max_tot_size,
tot_sizes.data[i]);
}
int32_t *elem_indexes_data = (elem_indexes != nullptr ?
elem_indexes->Data() : nullptr);
// Note, the first row_splits vector was set above, ans.Layers()[0].row_splits
// = new_offsets.Row(1).
auto lambda_set_row_splits_and_ids = [=] __host__ __device__(
int32_t axis, int32_t i) -> void {
axis++; // make it one-based.
int32_t tot_size = tot_sizes(axis); // == new_offsets_acc(axis, ans_dim0);
if (i > tot_size)
return;
int32_t *composed_row_ids_data = new_row_ids_acc(axis - 1);
int32_t ans_idx0 = (i == tot_size ? ans_dim0 :
composed_row_ids_data[i]),
job_begin = new_offsets_acc(axis, ans_idx0),
job_this_idx0 = i - job_begin;
K2_CHECK_GE(job_this_idx0, 0);
int32_t row_split_value = 0, new_next_offset = 0;
if (axis + 1 < num_axes)
new_next_offset = new_offsets_acc(axis + 1, ans_idx0);
if (i < tot_size) {
// "prev" means for axis - 1
int32_t new_prev_offset = new_offsets_acc(axis - 1, ans_idx0),
old_prev_offset = old_offsets_acc(axis - 1, ans_idx0),
old_offset = old_offsets_acc(axis, ans_idx0),
old_idx = old_offset + job_this_idx0;
if (axis != 1) {
// Write row-ids.
// Actually doing this for axis == 1 is harmless, but unnecessary, as it
// would write back the same values that were already there. We avoid
// the memory access.
// this_new_row_ids = new_row_ids_acc(axis - 1);
int32_t *this_new_row_ids = composed_row_ids_data;
const int32_t *this_old_row_ids = old_row_ids_acc(axis - 1);
int32_t old_row_id = this_old_row_ids[old_idx],
new_row_id = old_row_id + new_prev_offset - old_prev_offset;
this_new_row_ids[i] = new_row_id;
}
if (elem_indexes_data != nullptr && axis == num_axes - 1)
elem_indexes_data[i] = old_idx;
if (axis + 1 < num_axes) {
int32_t old_next_offset = old_offsets_acc(axis + 1, ans_idx0),
next_offset_diff = new_next_offset - old_next_offset;
const int32_t *old_row_splits_data = old_row_splits_acc(axis);
row_split_value = next_offset_diff + old_row_splits_data[old_idx];
}
} else {
row_split_value = new_next_offset;
}
if (axis + 1 < num_axes) {
int32_t *new_row_splits_data = new_row_splits_acc(axis);
new_row_splits_data[i] = row_split_value;
}
};
constexpr int32_t cutoff = 50000;
if (c->GetDeviceType() == kCpu) {
for (int32_t axis = 0; axis < num_axes - 1; axis++) {
int32_t this_size = tot_sizes(axis + 1);
for (int32_t i = 0; i <= this_size; i++)
lambda_set_row_splits_and_ids(axis, i);
}
} else if (max_tot_size * (num_axes - 1) < cutoff) {
Eval2Device(c, num_axes - 1, max_tot_size + 1,
lambda_set_row_splits_and_ids);
} else {
// Loop in the kernel rather than submitting an excessive number of threads.
auto lambda_loop = [=] __device__(int32_t i) {
for (int32_t axis = 0; axis < num_axes - 1; axis++) {
lambda_set_row_splits_and_ids(axis, i);
}
};
EvalDevice(c, max_tot_size + 1, lambda_loop);
}
#if !defined(NDEBUG)
ans.Check();
#endif
return ans;
}
RaggedShape Index(RaggedShape &src, int32_t axis,
const Array1<int32_t> &indexes,
Array1<int32_t> *elem_indexes /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
int32_t num_axes = src.NumAxes();
K2_CHECK_LT(static_cast<uint32_t>(axis), static_cast<uint32_t>(num_axes));
if (axis == 0) {
return IndexAxis0(src, indexes, elem_indexes);
} else if (axis == src.NumAxes() - 1) {
// This code is related to SubsetRaggedShape(). `indexes` corresponds
// to `new2old`.
Array1<int32_t> last_row_ids = src.RowIds(num_axes - 1)[indexes];
#ifndef NDEBUG
if (!IsMonotonic(last_row_ids)) {
K2_LOG(FATAL) << "Invalid indexes used when indexing RaggedShape";
}
#endif
Array1<int32_t> last_row_splits(last_row_ids.Context(),
src.TotSize(num_axes - 2) + 1);
RowIdsToRowSplits(last_row_ids, &last_row_splits);
if (elem_indexes)
*elem_indexes = indexes;
std::vector<RaggedShapeLayer> axes = src.Layers();
axes.back().row_splits = last_row_splits;
axes.back().row_ids = last_row_ids;
axes.back().cached_tot_size = last_row_ids.Dim();
// TODO: disable checking by changing true to false.
return RaggedShape(axes, true);
} else {
RaggedShape top, bottom;
DecomposeRaggedShape(src, axis, &top, &bottom);
RaggedShape top_indexed = Index(top, axis, indexes, nullptr),
bottom_indexed = IndexAxis0(bottom, indexes, elem_indexes);
return ComposeRaggedShapes(top_indexed, bottom_indexed);
}
}
// returns array of dim (src[0]->NumAxes() + 1) by (num_srcs + 1),
// see documentation in header.
Array2<int32_t> GetOffsets(int32_t num_srcs, RaggedShape **src) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(num_srcs, 0);
int32_t num_axes_in = src[0]->NumAxes();
ContextPtr &ctx = src[0]->Context();
Array2<int32_t> src_offsets(GetCpuContext(), num_axes_in + 1, num_srcs + 1);
int32_t *src_offsets_data = src_offsets.Data();
int32_t src_offsets_stride0 = src_offsets.ElemStride0();
// Check if they have same num-axes and compatible context
for (int32_t i = 1; i < num_srcs; ++i) {
K2_CHECK_EQ(src[i]->NumAxes(), num_axes_in);
K2_CHECK(ctx->IsCompatible(*src[i]->Context()));
}
for (int32_t axis = 0; axis <= num_axes_in; ++axis) {
int32_t sum = 0;
for (int32_t i = 0; i <= num_srcs; ++i) { // i is the column
src_offsets_data[axis * src_offsets_stride0 + i] = sum;
if (i < num_srcs) {
sum += (axis == 0 ? 1 : src[i]->TotSize(axis - 1));
}
}
}
return src_offsets;
}
void GetRowInfo(RaggedShape &src, Array1<int32_t *> *row_splits,
Array1<int32_t *> *row_ids) {
NVTX_RANGE(K2_FUNC);
int32_t axes = src.NumAxes();
K2_CHECK_GE(axes, 2);
src.Populate();
std::vector<int32_t *> row_splits_ptrs(axes - 1);
std::vector<int32_t *> row_ids_ptrs(axes - 1);
for (int32_t i = 1; i != axes; ++i) {
row_splits_ptrs[i - 1] = src.RowSplits(i).Data();
row_ids_ptrs[i - 1] = src.RowIds(i).Data();
}
ContextPtr ctx = src.Context();
*row_splits = Array1<int32_t *>(ctx, row_splits_ptrs);
*row_ids = Array1<int32_t *>(ctx, row_ids_ptrs);
}
void GetRowInfoMulti(int32_t num_srcs, RaggedShape **src,
Array2<int32_t *> *row_splits,
Array2<int32_t *> *row_ids) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(num_srcs, 0);
int32_t num_axes_in = src[0]->NumAxes();
K2_CHECK_GE(num_axes_in, 2);
ContextPtr ctx = src[0]->Context();
// check if they have same num-axes and compatible context
for (int32_t i = 1; i < num_srcs; ++i) {
K2_CHECK_EQ(src[i]->NumAxes(), num_axes_in);
K2_CHECK(ctx->IsCompatible(*src[i]->Context()));
}
Array2<int32_t *> row_splits_ptrs(GetCpuContext(), num_axes_in - 1, num_srcs);
Array2<int32_t *> row_ids_ptrs(GetCpuContext(), num_axes_in - 1, num_srcs);
int32_t **splits_ptr_data = row_splits_ptrs.Data();
int32_t **ids_ptr_data = row_ids_ptrs.Data();
int32_t stride0 = row_splits_ptrs.ElemStride0();
K2_CHECK_EQ(stride0, row_ids_ptrs.ElemStride0());
for (int32_t axis = 0; axis != num_axes_in - 1; ++axis) {
for (int32_t i = 0; i != num_srcs; ++i) {
splits_ptr_data[axis * stride0 + i] = src[i]->RowSplits(axis + 1).Data();
ids_ptr_data[axis * stride0 + i] = src[i]->RowIds(axis + 1).Data();
}
}
*row_splits = row_splits_ptrs.To(ctx);
*row_ids = row_ids_ptrs.To(ctx);
}
/*static*/ RaggedShape StackAxis0(int32_t num_srcs, RaggedShape **src,
Array1<uint32_t> *merge_map /* == nullptr*/) {
NVTX_RANGE(K2_FUNC);
if (num_srcs == 1) {
if (merge_map)
*merge_map =
Arange<uint32_t>(src[0]->Context(), 0, src[0]->NumElements());
RaggedShape top_layer = TrivialShape(src[0]->Context(), src[0]->Dim0());
return ComposeRaggedShapes(top_layer, **src);
}
// We can't handle num_srcs == 0 because we won't have a context object.
K2_CHECK_GT(num_srcs, 1);
int32_t num_axes_in = src[0]->NumAxes(),
num_axes_out = num_axes_in + 1;
ContextPtr c = src[0]->Context();
// Check if they have same num-axes and compatible context
for (int32_t i = 1; i < num_srcs; ++i) {
K2_CHECK_EQ(num_axes_in, src[i]->NumAxes());
K2_CHECK(IsCompatible(*src[0], *src[i]));
}
// `offsets` will be on CPU for now.
// It shape is (num_axes_in + 1 == num_axes_out, num_srcs + 1).
Array2<int32_t> offsets = GetOffsets(num_srcs, src);
auto offsets_acc = offsets.Accessor();
SmallVec<int32_t, 6> tot_sizes_out;
K2_CHECK(num_axes_out <= 6);
int32_t max_tot_size = 0;
for (int32_t axis = 0; axis < num_axes_out; axis++) {
tot_sizes_out.data[axis] = offsets_acc(axis, num_srcs);
max_tot_size = std::max<int32_t>(max_tot_size,
tot_sizes_out.data[axis]);
}
RaggedShape ans = RaggedShapeFromTotSizes(c, num_axes_out,
tot_sizes_out.data);
// src_row_splits and src_row_ids are of dim num_axes_in-1 by num_srcs.
Array2<int32_t *> src_row_splits, src_row_ids;
GetRowInfoMulti(num_srcs, src, &src_row_splits, &src_row_ids);
auto src_row_splits_acc = src_row_splits.Accessor(),
src_row_ids_acc = src_row_ids.Accessor();
offsets = offsets.To(c);
offsets_acc = offsets.Accessor();
for (int32_t axis = 1; axis < num_axes_out; axis++) {
// we are not creating the actual row_ids here, except for axis 1; we are
// creating "composed row_ids" which map to the index on axis 0.
Array1<int32_t> row_ids = ans.RowIds(axis);
RowSplitsToRowIds(offsets.Row(axis), &row_ids);
}
ans.Layers()[0].row_splits = offsets.Row(1);
// Caution: e.g. old_row_splits_acc(i) == src.RowSplits(i+1).
RowSplitsAccessor<5> new_row_splits_acc(ans);
RowIdsAccessor<5> new_row_ids_acc(ans);
uint32_t *merge_map_data;
if (merge_map != nullptr) {
*merge_map = Array1<uint32_t>(c, tot_sizes_out.data[num_axes_out - 1]);
merge_map_data = merge_map->Data();
} else {
merge_map_data = nullptr;
}
// Note, the first row_splits vector was set above, ans.Layers()[0].row_splits
// = new_offsets.Row(1).
auto lambda_set_row_splits_and_ids = [=] __host__ __device__(
int32_t axis, int32_t i) -> void {
++axis; // We want this to be called starting with axis == 1, but Eval2
// doesn't suppor that.
// At this point, 1 < axis < num_axes_out.
// This kernel will be writing one or both of:
// the row-splits for output-layer==`axis`/input-layer==`axis-1`,
// the row-ids for output-layer=`axis-1`/input-layer==`axis-2`.
int32_t tot_size = tot_sizes_out(axis); // == offsets_acc(axis, num_srcs);
if (i > tot_size)
return;
int32_t *composed_row_ids_data = new_row_ids_acc(axis - 1);
int32_t ans_idx0 =
(i == tot_size
? num_srcs
: composed_row_ids_data[i]), // note: ans_idx0 == src_idx.
job_begin = offsets_acc(axis, ans_idx0), job_this_idx0 = i - job_begin;
K2_CHECK_GE(job_this_idx0, 0);
int32_t row_split_value = 0, new_next_offset = 0;
uint32_t *merge_map_data_local = nullptr;
if (axis + 1 < num_axes_out) {
new_next_offset = offsets_acc(axis + 1, ans_idx0);
} else {
merge_map_data_local = merge_map_data;
}
if (i < tot_size) {
// "prev" means for axis - 1
int32_t new_prev_offset = offsets_acc(axis - 1, ans_idx0);
if (axis != 1) {
// Write row-ids.
// this_new_row_ids = new_row_ids_acc(axis - 1);
int32_t *this_new_row_ids = composed_row_ids_data;
const int32_t *this_src_row_ids = src_row_ids_acc(axis - 2, ans_idx0);
int32_t old_row_id = this_src_row_ids[job_this_idx0],
new_row_id = old_row_id + new_prev_offset;
this_new_row_ids[i] = new_row_id;
}
if (merge_map_data_local != nullptr) {
merge_map_data_local[i] = ans_idx0 + num_srcs * job_this_idx0;
}
if (axis + 1 < num_axes_out) {
const int32_t *src_row_splits_data = src_row_splits_acc(axis - 1,
ans_idx0);
int32_t old_row_split = src_row_splits_data[job_this_idx0];
row_split_value = new_next_offset + old_row_split;
}
} else {
row_split_value = new_next_offset;
}
if (axis + 1 < num_axes_out) {
int32_t *new_row_splits_data = new_row_splits_acc(axis);
new_row_splits_data[i] = row_split_value;
}
};
constexpr int32_t cutoff = 50000;
if (c->GetDeviceType() == kCpu) {
for (int32_t axis = 0; axis < num_axes_out - 1; axis++) {
int32_t this_size = tot_sizes_out(axis + 1);
for (int32_t i = 0; i <= this_size; i++)
lambda_set_row_splits_and_ids(axis, i);
}
} else if (max_tot_size * (num_axes_out - 1) < cutoff) {
Eval2Device(c, num_axes_out - 1, max_tot_size + 1,
lambda_set_row_splits_and_ids);
} else {
// Loop in the kernel rather than submitting an excessive number of threads.
auto lambda_loop = [=] __device__(int32_t i) {
for (int32_t axis = 0; axis < num_axes_out - 1; axis++) {
lambda_set_row_splits_and_ids(axis, i);
}
};
EvalDevice(c, max_tot_size + 1, lambda_loop);
}
#if !defined(NDEBUG)
ans.Check();
#endif
return ans;
}
RaggedShape Cat(int32_t axis, int32_t num_srcs, RaggedShape **src,
Array1<uint32_t> *merge_map /* == nullptr*/) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(num_srcs, 0);
if (axis == 0) {
RaggedShape temp = StackAxis0(num_srcs, src, merge_map);
std::vector<RaggedShapeLayer> ans_layers(
temp.Layers().begin() + 1, temp.Layers().end());
return RaggedShape(ans_layers, false);
}
K2_CHECK_LT(static_cast<uint32_t>(axis),
static_cast<uint32_t>(src[0]->NumAxes()));
int32_t num_axes = src[0]->NumAxes();
std::vector<RaggedShapeLayer> ans_layers(num_axes - 1);
// If axis >= 2, some layers of `src` will pass through unchanged (we should
// check that they are identical across all sources).
for (int32_t l = 0; l + 1 < axis; l++) {
CheckLayerEqual(l, num_srcs, src);
ans_layers[l] = src[0]->Layers()[l];
}
Array1<uint32_t> merge_map_local;
Array1<uint32_t> *this_m =
(axis + 1 == num_axes ? merge_map : &merge_map_local);
RaggedShape s = IntersperseRaggedLayer(axis - 1, num_srcs, src, this_m),
t = SubsampleRaggedLayer(s, 0, num_srcs);
ans_layers[axis - 1] = t.Layers()[0];
for (int32_t l = axis; l + 1 < num_axes; l++) {
Array1<uint32_t> merge_map_next;
Array1<uint32_t> *this_m =
(l + 2 == num_axes ? merge_map : &merge_map_next);
RaggedShape r = MergeRaggedLayer(l, num_srcs, src, merge_map_local, this_m);
ans_layers[l] = r.Layers()[0];
merge_map_local = merge_map_next;
}
// TODO(dan) after this is debugged: add ", false".
return RaggedShape(ans_layers);
}
RaggedShape RemoveAxis(RaggedShape &src, int32_t axis) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(src.NumAxes(), 2);
K2_CHECK(axis >= 0 && axis < src.NumAxes());
// note, `axes_in` is of dim src.NumAxes() - 1.
// Also note: axes_in[i] pertains to the relationship between
// axes i and i+1 in the source.
src.Populate();
const std::vector<RaggedShapeLayer> &axes_in = src.Layers();
std::vector<RaggedShapeLayer> axes_out(axes_in.size() - 1);
int32_t axes_out_size = static_cast<int32_t>(axes_out.size());
for (int32_t i = 0; i < axis - 1; ++i) axes_out[i] = axes_in[i];
if (axis > 0 && axis + 1 < src.NumAxes()) {
axes_out[axis - 1].row_ids =
axes_in[axis - 1].row_ids[axes_in[axis].row_ids];
axes_out[axis - 1].row_splits =
axes_in[axis].row_splits[axes_in[axis - 1].row_splits];
axes_out[axis - 1].cached_tot_size = axes_out[axis - 1].row_ids.Dim();
}
for (int32_t i = axis; i < axes_out_size; ++i) axes_out[i] = axes_in[i + 1];
return RaggedShape(axes_out);
}
RaggedShape MakeTransposable(RaggedShape &src) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(src.NumAxes(), 2);
int32_t src_dim0 = src.Dim0(), src_tot_size1 = src.TotSize(1);
if (src_dim0 <= 1) return src;
ContextPtr c = src.Context();
int32_t num_axes = src.NumAxes();
int32_t max_size = src.MaxSize(1);
if (max_size <= 0) return src;
int32_t ans_tot_size1 = max_size * src_dim0;
src.Populate();
const std::vector<RaggedShapeLayer> &axes_in = src.Layers();
std::vector<RaggedShapeLayer> axes_out(num_axes - 1);
const int32_t *src_row_splits1_data = src.RowSplits(1).Data();
const int32_t *src_row_ids1_data = src.RowIds(1).Data();
{
ParallelRunner pr(c);
RaggedShapeLayer &axis1_shape = axes_out[0];
{
// set ans.RowSplits(1);
With w(pr.NewStream());
axis1_shape.row_splits = Range(c, src_dim0 + 1, 0, max_size);
}
{
// set ans.RowIds(1);
With w(pr.NewStream());
axis1_shape.row_ids = Array1<int32_t>(c, ans_tot_size1);
int32_t *row_ids1_data = axis1_shape.row_ids.Data();
axis1_shape.cached_tot_size = ans_tot_size1;
K2_EVAL(
c, ans_tot_size1, lambda_set_row_ids1,
(int32_t i)->void { row_ids1_data[i] = i / max_size; });
}
if (num_axes > 2) {
RaggedShapeLayer &axis2_shape = axes_out[1];
const int32_t *src_row_splits2_data = src.RowSplits(2).Data();
{
// set ans.RowSplits(2);
With w(pr.NewStream());
axis2_shape.cached_tot_size = src.TotSize(2);
axis2_shape.row_splits = Array1<int32_t>(c, ans_tot_size1 + 1);
int32_t *ans_row_splits2_data = axis2_shape.row_splits.Data();
K2_EVAL(
c, ans_tot_size1 + 1, lambda_set_row_splits2,
(int32_t idx01)->void {
if (idx01 == ans_tot_size1) {
ans_row_splits2_data[idx01] =
src_row_splits2_data[src_tot_size1];
return;
}
int32_t idx0 = idx01 / max_size, idx1 = idx01 % max_size;
int32_t idx0x = src_row_splits1_data[idx0],
idx0x_next = src_row_splits1_data[idx0 + 1];
int32_t num_elems_this_row = idx0x_next - idx0x;
if (idx1 < num_elems_this_row)
ans_row_splits2_data[idx01] =
src_row_splits2_data[idx0x + idx1];
else
ans_row_splits2_data[idx01] =
src_row_splits2_data[idx0x_next]; // append empty row
});
}
{
// set ans.RowIds(2);
With w(pr.NewStream());
int32_t tot_size2 = src.TotSize(2);
axis2_shape.row_ids = Array1<int32_t>(c, tot_size2);
int32_t *ans_row_ids2_data = axis2_shape.row_ids.Data();
const int32_t *src_row_ids2_data = src.RowIds(2).Data();
K2_EVAL(
c, tot_size2, lambda_set_row_ids2, (int32_t idx012)->void {
int32_t src_idx01 = src_row_ids2_data[idx012];
int32_t src_idx0 = src_row_ids1_data[src_idx01];
int32_t src_idx1 = src_idx01 - src_row_splits1_data[src_idx0];
ans_row_ids2_data[idx012] = (src_idx0 * max_size) + src_idx1;
});
}
}
}
// copy left row_splits and row_ids;
for (int32_t i = 2; i < num_axes - 1; ++i) axes_out[i] = axes_in[i];
return RaggedShape(axes_out);
}
// transpose axes 0 and 1.
RaggedShape Transpose(RaggedShape &src, Array1<int32_t> *value_indexes) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(src.NumAxes(), 2);
ContextPtr c = src.Context();
int32_t src_dim0 = src.Dim0(), src_tot_size1 = src.TotSize(1);
if (src_dim0 <= 0) {
if (value_indexes) *value_indexes = Array1<int32_t>(c, 0);
return src;
}
int32_t src_dim1 = src_tot_size1 / src_dim0;
K2_CHECK_EQ(src_tot_size1 % src_dim0, 0)
<< "Transpose(): all dims on axis 0 must be the same.\n"
<< "src_tot_size1: " << src_tot_size1 << "\n"
<< "src_dim0: " << src_dim0 << ", array is: " << src;
K2_DCHECK(
Equal(src.RowSplits(1), Range(c, src.RowSplits(1).Dim(), 0, src_dim1)))
<< " Expected row-splits to be evenly spaced: " << src.RowSplits(1);
RaggedShape src_no_axis0 = RemoveAxis(src, 0);
K2_CHECK_EQ(src_no_axis0.Dim0(), src_tot_size1);
// `renumbering` is a `new2old` map, that maps from the first index in
// src_no_axis0_renumbered
// to the first index into src_no_axis0.
Array1<int32_t> renumbering(c, src_tot_size1);
int32_t *renumbering_data = renumbering.Data();
K2_EVAL(
c, src_tot_size1, lambda_set_renumbering, (int32_t i)->void {
int32_t j = i % src_dim0, k = i / src_dim0, i_old = j * src_dim1 + k;
renumbering_data[i] = i_old;
});
RaggedShape src_no_axis0_renumbered =
Index(src_no_axis0, 0, renumbering, value_indexes);
int32_t num_rows = src_dim1, row_splits_dim = num_rows + 1,
row_ids_dim = src_tot_size1;
std::vector<RaggedShapeLayer> ans_axis0(1);
Array1<int32_t> mem(c, row_splits_dim + row_ids_dim);
int32_t *mem_data = mem.Data();
K2_EVAL(
c, row_splits_dim + row_ids_dim, lambda_set_row_info, (int32_t i)->void {
int32_t val;
if (i >= row_splits_dim) {
// row_ids
int32_t elem_idx = i - row_splits_dim;
val = elem_idx / src_dim0;
} else {
// row_splits
int32_t row_idx = i;
val = row_idx * src_dim0;
}
mem_data[i] = val;
});
ans_axis0[0].row_splits = mem.Range(0, row_splits_dim);
ans_axis0[0].row_ids = mem.Range(row_splits_dim, row_ids_dim);
ans_axis0[0].cached_tot_size = row_ids_dim;
RaggedShape temp(ans_axis0);
return ComposeRaggedShapes(temp, src_no_axis0_renumbered);
}
RaggedShape Stack(int32_t axis, int32_t num_srcs, RaggedShape **src,
Array1<uint32_t> *merge_map /* = nullptr*/) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(num_srcs, 0);
K2_CHECK_LT(static_cast<uint32_t>(axis),
static_cast<uint32_t>(src[0]->NumAxes()));
ContextPtr c = src[0]->Context();
if (axis == 0) {
return StackAxis0(num_srcs, src, merge_map);
}
K2_CHECK_LT(static_cast<uint32_t>(axis),
static_cast<uint32_t>(src[0]->NumAxes()));
int32_t num_axes = src[0]->NumAxes();
std::vector<RaggedShapeLayer> ans_layers(num_axes);
// If axis >= 2, some layers of `src` will pass through unchanged (we should
// check that they are identical across all sources).
for (int32_t l = 0; l + 1 < axis; l++) {
CheckLayerEqual(l, num_srcs, src);
ans_layers[l] = src[0]->Layers()[l];
}
Array1<uint32_t> merge_map_local;
Array1<uint32_t> *this_m =
(axis + 1 == num_axes ? merge_map : &merge_map_local);
RaggedShape s = IntersperseRaggedLayer(axis - 1, num_srcs, src, this_m);
// note: s.Dim0() will be a multiple of num_srcs.
ans_layers[axis - 1] =
RegularRaggedShape(c, s.Dim0() / num_srcs, num_srcs).Layers()[0];
ans_layers[axis] = s.Layers()[0];
for (int32_t l = axis; l + 1 < num_axes; l++) {
Array1<uint32_t> merge_map_next;
Array1<uint32_t> *this_m =
(l + 2 == num_axes ? merge_map : &merge_map_next);
RaggedShape r = MergeRaggedLayer(l, num_srcs, src, merge_map_local, this_m);
ans_layers[l + 1] = r.Layers()[0];
merge_map_local = merge_map_next;
}
// TODO(dan) after this is debugged: add ", false".
return RaggedShape(ans_layers);
}
/*
Select ragged tensor's shape on axis 0 with a two axes ragged index.
@param [in] src Source RaggedShape to select.
@param [in] indexes A **TWO** axes ragged tensor containing the indexes
into the axis 0 of src. we also support -1 as an index,
which will result in the empty list (as if it were the
index into a position in `src` that had an empty list)
i.e. with `-1 <= indexes[i] < src.TotSize(0)`.
@param [out] out The container where the output RaggedShape will write to,
MUST NOT be a nullptr. Will be reallocated and the final
size of `out` would equal to `indexes.TotSize(0)`.
Note, The `NumAxes()` of output RaggedShape is the same
as the `NumAxes()` of src.
@param [out] split_map If not nullptr will store the element-index within
src telling where the elements of split RaggedShape
come from. Will be reallocated and the final size of
`split_map` would equal to `indexes.TotSize(0)`.
Suppose indexes is `[ [ 0 3 5 ] [ 1 2 4] [ 6 -1 ] ]`, it means that we will
select elements 0,3,5 of src's axis 0 to construct the first output
RaggedShape, 1,2,4 to construct the second output RaggedShape, 6 and a empty
list to construct the third output RaggedShape.
*/
/*static*/ void SelectAxis0(RaggedShape &src, const Ragged<int32_t> &indexes,
std::vector<RaggedShape> *out, std::vector<Array1<int32_t>> *split_map) {
NVTX_RANGE(K2_FUNC);
ContextPtr &c = src.Context();
K2_CHECK(IsCompatible(src, indexes));
K2_CHECK_EQ(indexes.NumAxes(), 2);
K2_CHECK(out != nullptr);
int32_t num_axes = src.NumAxes(),
out_size = indexes.Dim0(),
tot_elems = indexes.NumElements();
if (out_size == 0) {
*out = std::vector<RaggedShape>();
if (split_map) {
*split_map = std::vector<Array1<int32_t>>();
}
return;
}
Array2<int32_t> old_offsets, // num_axes by tot_elems
new_offsets; // num_axes by (tot_elems + 1).
GetOldAndNewOffsets(src, indexes.values, &old_offsets, &new_offsets);
const int32_t *indexes_row_split1_data = indexes.RowSplits(1).Data(),
*indexes_row_ids1_data = indexes.RowIds(1).Data();
// Contains the `TotSize` of each axes of each output RaggedShape
Array2<int32_t> tot_sizes(c, out_size, num_axes);
Array2Accessor<int32_t> tot_sizes_acc = tot_sizes.Accessor();
Array2Accessor<int32_t> new_offsets_acc = new_offsets.Accessor();
K2_EVAL2(c, out_size, num_axes, lambda_set_tot_sizes,
(int32_t i, int32_t j) -> void {
int32_t idx0 = indexes_row_split1_data[i],
idx0_next = indexes_row_split1_data[i + 1];
tot_sizes_acc(i, j) =
new_offsets_acc(j, idx0_next) - new_offsets_acc(j, idx0);
});
auto tot_sizes_cpu = tot_sizes.To(GetCpuContext());
auto tot_sizes_cpu_acc = tot_sizes_cpu.Accessor();
out->resize(out_size);
if (split_map != nullptr) split_map->resize(out_size);
// We can not avoid this for loop on dim0, as we want to allocate memory
// seperately, may consider using a ThreadPool later.
for (int32_t i = 0; i < out_size; ++i) {
out->at(i) = RaggedShapeFromTotSizes(c,
num_axes, tot_sizes_cpu.Row(i).Data());
if (split_map != nullptr) {
split_map->at(i) =
Array1<int32_t>(c, tot_sizes_cpu_acc(i, num_axes - 1));
};
}
// Caution: e.g. old_row_splits_acc(i) == src.RowSplits(i+1).
RowSplitsAccessor<5> old_row_splits_acc(src);
RowIdsAccessor<5> old_row_ids_acc(src);
auto old_offsets_acc = old_offsets.Accessor();
// axes_elems contains the elements number of each axes before splitting into
// different RaggedShape, it should equal to the Col sum of `tot_sizes` above.
Array1<int32_t> axes_elems =
Array1<int32_t>(new_offsets.Col(tot_elems)).To(GetCpuContext());
for (int32_t axis = 0; axis < num_axes; axis++) {
// Contains the RowSplits & RowIds pointer for current layer,
// has a dimension of dim0 * 2, the layout is splits_pointer0, ids_pointer0,
// splits_pointer1, ids_pointer1, ...
Array1<int32_t *> splits_ids_ptr(GetCpuContext(), out_size * 2);
int32_t **splits_ids_ptr_data = splits_ids_ptr.Data();
// Contains the pointers for split_map
Array1<int32_t *> split_map_ptr;
int32_t **split_map_ptr_data = nullptr;
if (axis == num_axes - 1 && split_map != nullptr) {
split_map_ptr = Array1<int32_t *>(GetCpuContext(), out_size);
split_map_ptr_data = split_map_ptr.Data();
}
for (int32_t i = 0; i < out_size; ++i) {
splits_ids_ptr_data[2 * i] = axis == num_axes - 1 ? nullptr :
out->at(i).RowSplits(axis + 1).Data();
splits_ids_ptr_data[2 * i + 1] =
axis == 0 ? nullptr : out->at(i).RowIds(axis).Data();
if (axis == num_axes - 1 && split_map != nullptr) {
split_map_ptr_data[i] = split_map->at(i).Data();
}
}
// transfer to GPU if we're using a GPU
splits_ids_ptr = splits_ids_ptr.To(c);
splits_ids_ptr_data = splits_ids_ptr.Data();
// set row split1
if (axis == 0) {
K2_EVAL(c, tot_elems, lambda_set_row_split1, (int32_t idx01) {
int32_t index_idx0 = indexes_row_ids1_data[idx01],
idx0x = indexes_row_split1_data[index_idx0];
splits_ids_ptr_data[2 * index_idx0][idx01 - idx0x]
= new_offsets_acc(axis + 1, idx01) -
new_offsets_acc(axis + 1, idx0x);
// Set the last elements of row_splits1 of each output shape
if (idx01 == tot_elems - 1 ||
index_idx0 != indexes_row_ids1_data[idx01 + 1]) {
splits_ids_ptr_data[2 * index_idx0][idx01 - idx0x + 1]
= new_offsets_acc(axis + 1, idx01 + 1) -
new_offsets_acc(axis + 1, idx0x);
}
});
continue;
}
// set last element of each row_splits
// TODO: Integrate this kernel into the kernel below.
if (axis < num_axes - 1) {
K2_EVAL(c, out_size, lambda_set_last_row_splits, (int32_t idx0) {
int32_t idx0x = indexes_row_split1_data[idx0],
idx0x_next = indexes_row_split1_data[idx0 + 1],
value = new_offsets_acc(axis + 1, idx0x_next) -
new_offsets_acc(axis + 1, idx0x),
pos = tot_sizes_acc(idx0, axis);
splits_ids_ptr_data[2 * idx0][pos] = value;
});
}
if (axis == num_axes - 1 && split_map != nullptr) {
split_map_ptr = split_map_ptr.To(c);
split_map_ptr_data = split_map_ptr.Data();
}
int32_t num_elems = axes_elems[axis];
// composed_row_ids maps current idx to idx01 of indexes
Array1<int32_t> composed_row_ids(c, num_elems);
RowSplitsToRowIds(new_offsets.Row(axis), &composed_row_ids);
const int32_t *composed_row_ids_data = composed_row_ids.Data();
K2_EVAL(c, num_elems, lambda_set_row_splits_and_ids, (int32_t i) {
// tot_elems = indexes.NumElements(), so tot_idx0 can be interpreted as
// index_idx01
int32_t tot_idx0 = composed_row_ids_data[i],
index_idx0 = indexes_row_ids1_data[tot_idx0],
index_idx0x = indexes_row_split1_data[index_idx0],
begin_base = new_offsets_acc(axis, index_idx0x),
begin = new_offsets_acc(axis, tot_idx0),
this_idx0 = i - begin,
this_idx01 = i - begin_base;
K2_CHECK_GE(this_idx0, 0);
K2_CHECK_GE(this_idx01, 0);
// "prev" means for axis - 1
int32_t new_prev_offset = new_offsets_acc(axis - 1, tot_idx0),
old_prev_offset = old_offsets_acc(axis - 1, tot_idx0),
old_offset = old_offsets_acc(axis, tot_idx0),
old_idx = old_offset + this_idx0;
if (split_map != nullptr && axis == num_axes - 1)
split_map_ptr_data[index_idx0][this_idx01] = old_idx;
// set row ids
const int32_t *this_old_row_ids = old_row_ids_acc(axis - 1);
int32_t old_row_id = this_old_row_ids[old_idx],
new_row_id = old_row_id + new_prev_offset - old_prev_offset,
new_pre_offset_idx0x = new_offsets_acc(axis - 1, index_idx0x);
splits_ids_ptr_data[2 * index_idx0 + 1][this_idx01] =
new_row_id - new_pre_offset_idx0x;
// set row splits
if (axis + 1 < num_axes) {
int32_t new_next_offset = new_offsets_acc(axis + 1, tot_idx0),
old_next_offset = old_offsets_acc(axis + 1, tot_idx0),
next_offset_diff = new_next_offset - old_next_offset;
const int32_t *old_row_splits_data = old_row_splits_acc(axis);
int32_t row_split_value =
next_offset_diff + old_row_splits_data[old_idx],
new_next_offset_idx0x = new_offsets_acc(axis + 1, index_idx0x);
splits_ids_ptr_data[2 * index_idx0][this_idx01]
= row_split_value - new_next_offset_idx0x;
}
});
}
}
void Unstack(RaggedShape &src, int32_t axis, bool pad_right,
std::vector<RaggedShape> *out,
std::vector<Array1<int32_t>> *split_map) {
NVTX_RANGE(K2_FUNC);
ContextPtr &c = src.Context();
if (axis == 0) {
if (src.NumAxes() == 2) {
auto new_src = ComposeRaggedShapes(
TrivialShape(c, src.TotSize(0)), src);
return Unstack(new_src, 1, pad_right, out, split_map);
}
auto indexes = Ragged<int32_t>(RegularRaggedShape(c, src.Dim0(), 1),
Arange(c, 0, src.Dim0()));
SelectAxis0(src, indexes, out, split_map);
for (size_t i = 0; i < out->size(); ++i) {
out->at(i) = RemoveAxis(out->at(i), 0);
}
} else {
int32_t tot_size_axis_minus1 = src.TotSize(axis - 1),
tot_size_axis = src.TotSize(axis);
const int32_t *row_splits_axis = src.RowSplits(axis).Data(),
*row_ids_axis = src.RowIds(axis).Data();
// Each sublist contains the elements of axis `axis`, unstack operation will
// split all these elements in a sublist to different RaggedShapes, so the
// number of output RaggedShapes is the size of the sublist with max
// elements.
int32_t num_out = src.MaxSize(axis);
out->resize(num_out);
if (split_map != nullptr) split_map->resize(num_out);
// We will select the elements of axis `axis` on each sublist, the number
// of sublits equals to `src.TotSize(axis - 1)`.
// Initialize with -1 here, because not all the sublists have the same size,
// -1s here mean that we don't select anything on those positions
Array1<int32_t> indexes(c, num_out * tot_size_axis_minus1, -1);
int32_t *indexes_data = indexes.Data();
// Decide the elements of axis `axis` will go to which output RaggedShape
K2_EVAL(c, tot_size_axis, lambda_set_indexes, (int32_t idx01) {
int32_t idx0 = row_ids_axis[idx01],
idx0x = row_splits_axis[idx0],
idx1 = idx01 - idx0x,
idx_row = idx1;
if (!pad_right) {
int32_t idx0x_next = row_splits_axis[idx0 + 1],
num_elems = idx0x_next - idx0x;
idx_row = num_out - num_elems + idx1;
}
indexes_data[idx_row * tot_size_axis_minus1 + idx0] = idx01;
});
// To make `DecomposeRaggedShape` work, we add a RegularRaggedShape
// layer after axis `axis` if axis equals to `src.NumAxes() - 1`.
// Of course, we have to remove the added layer finally.
bool remove_last_axis = false;
if (axis == src.NumAxes() - 1) {
src = ComposeRaggedShapes(src,
RegularRaggedShape(c, src.NumElements(), 1));
remove_last_axis = true;
}
RaggedShape top, bottom;
DecomposeRaggedShape(src, axis, &top, &bottom);
// Unstack will remove current axis (the last axis of top after decomposing
// on axis), to make `RemoveAxis` work, we add a TrivialShape layer before
// axix 0, finally we will remove the added layer.
bool remove_axis0 = false;
if (top.NumAxes() == 2) {
top = ComposeRaggedShapes(
TrivialShape(c, top.TotSize(0)), top);
remove_axis0 = true;
}
top = RemoveAxis(top, top.NumAxes() - 1);
auto ragged_indexes = Ragged<int32_t>(RegularRaggedShape(c,
num_out, tot_size_axis_minus1), indexes);
// Select elements according to indexes into corresponding RaggedShape
SelectAxis0(bottom, ragged_indexes, out, split_map);
for (int32_t i = 0; i < num_out; ++i) {
out->at(i) = ComposeRaggedShapes(top, out->at(i));
if (remove_axis0 && !remove_last_axis)
out->at(i) = RemoveAxis(out->at(i), 0);
if (remove_last_axis) {
out->at(i) = RemoveEmptyLists(out->at(i), out->at(i).NumAxes() - 2);
out->at(i) = RemoveAxis(out->at(i), out->at(i).NumAxes() - 1);
}
}
}
}
void Unstack(RaggedShape &src, int32_t axis, std::vector<RaggedShape> *out,
std::vector<Array1<int32_t>> *split_map /*= nullptr*/) {
Unstack(src, axis, true/*pad_right*/, out, split_map);
}
RaggedShape Merge(int32_t num_srcs, RaggedShape **src,
const Array1<uint32_t> &merge_map,
Array1<uint32_t> *merge_map_out) {
NVTX_RANGE(K2_FUNC);
K2_CHECK(num_srcs > 0);
int32_t num_layers = src[0]->NumAxes() - 1;
std::vector<RaggedShapeLayer> ans_layers(num_layers);
// Note: this is a shallow copy.
Array1<uint32_t> merge_map_local = merge_map;
for (int32_t l = 0; l < num_layers; l++) {
Array1<uint32_t> merge_map_next;
Array1<uint32_t> *this_m =
(l + 1 == num_layers ? merge_map_out : &merge_map_next);
RaggedShape r = MergeRaggedLayer(l, num_srcs, src, merge_map_local, this_m);
ans_layers[l] = r.Layers()[0];
merge_map_local = merge_map_next;
}
// TODO(dan) after this is debugged: add ", false".
return RaggedShape(ans_layers);
}
RaggedShape TrivialShape(ContextPtr &c, int32_t num_elems) {
NVTX_RANGE(K2_FUNC);
// row_splits= [
Array1<int32_t> row_splits = Range<int32_t>(c, 2, 0, num_elems);
Array1<int32_t> row_ids(c, num_elems, 0);
return RaggedShape2(&row_splits, &row_ids, num_elems);
}
RaggedShape RegularRaggedShape(ContextPtr &c, int32_t dim0, int32_t dim1) {
NVTX_RANGE(K2_FUNC);
Array1<int32_t> row_splits = Range<int32_t>(c, dim0 + 1, 0, dim1);
Array1<int32_t> row_ids(c, dim0 * dim1);
int32_t *row_ids_data = row_ids.Data();
K2_EVAL2(
c, dim0, dim1, lambda_set_row_ids,
(int32_t i, int32_t j)->void { row_ids_data[i * dim1 + j] = i; });
return RaggedShape2(&row_splits, &row_ids, dim0 * dim1);
}
Ragged<int32_t> GetCountsPartitioned(Ragged<int32_t> &src,
RaggedShape &ans_ragged_shape) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(src.NumAxes(), 2);
K2_CHECK_EQ(ans_ragged_shape.NumAxes(), 2);
K2_CHECK(IsCompatible(src, ans_ragged_shape));
K2_CHECK_EQ(src.Dim0(), ans_ragged_shape.Dim0());
const Array1<int32_t> &values = src.values;
const Array1<int32_t> &row_splits = ans_ragged_shape.RowSplits(1);
int32_t n = ans_ragged_shape.NumElements();
Array1<int32_t> counts = GetCounts(values, n);
return Ragged<int32_t>(ans_ragged_shape, counts);
}
/*static*/ Array1<int32_t> GetTransposeReorderingCpu(Ragged<int32_t> &src,
int32_t num_cols) {
NVTX_RANGE(K2_FUNC);
std::vector<std::vector<int32_t>> column_indexes(num_cols); // [column][row]
const int32_t *values_data = src.values.Data();
int32_t n = src.values.Dim();
for (int32_t i = 0; i != n; ++i) {
int32_t bucket = values_data[i];
column_indexes[bucket].push_back(i);
}
Array1<int32_t> ans(src.Context(), n);
int32_t *ans_data = ans.Data();
for (int32_t i = 0; i != num_cols; ++i) {
std::copy(column_indexes[i].begin(), column_indexes[i].end(), ans_data);
ans_data += column_indexes[i].size();
}
return ans;
}
#ifndef _MSC_VER
/*static*/ Array1<int32_t> GetTransposeReorderingThreeAxesCuda(
Ragged<int32_t> &src, int32_t num_cols) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(src.NumAxes(), 3);
ContextPtr &context = src.Context();
K2_CHECK_EQ(context->GetDeviceType(), kCuda);
const Array1<int32_t> &row_splits1 = src.RowSplits(1);
const int32_t *row_ids2_data = src.RowIds(2).Data();
const int32_t *value_data = src.values.Data();
Array1<int32_t> segments = src.RowSplits(2)[row_splits1];
auto lambda_comp = [=] __device__(int32_t a_idx012,
int32_t b_idx012) -> bool {
int32_t a_col_index = value_data[a_idx012];
int32_t b_col_index = value_data[b_idx012];
if (a_col_index < b_col_index) return true; // sort by column indexes
if (a_col_index > b_col_index) return false;
// at this point, a_idx012 and b_idx012 belong to the same column;
// then we sort by its row indexes
int32_t a_idx01 = row_ids2_data[a_idx012];
int32_t b_idx01 = row_ids2_data[b_idx012];
if (a_idx01 < b_idx01) return true;
if (a_idx01 > b_idx01) return false;
// at this point, a_idx012 and b_idx012 are duplicate elements
return false; // either true or false is fine
};
mgpu::context_t *mgpu_context = GetModernGpuAllocator(context);
int32_t n = src.values.Dim();
Array1<int32_t> ans = Range(context, n, 0);
if (n == 0) return ans;
K2_CUDA_SAFE_CALL(mgpu::segmented_sort(ans.Data(), // keys
ans.Dim(), // count
segments.Data(), // segments
segments.Dim() - 1, // num_segments
lambda_comp, *mgpu_context));
return ans;
}
#endif
/*
// Checks the result of GetTranspoeReordering(), in debug mode and dies if it is wrong.
static void CheckGetTransposeReordering(Ragged<int32_t> &src,
Array1<int32_t> &ans) {
if (!internal::kDisableDebug && !internal::DisableChecks()) {
K2_CHECK(IsPermutation(ans));
K2_CHECK(IsMonotonic(src.values[ans]));
}
}*/
Array1<int32_t> GetTransposeReordering(Ragged<int32_t> &src, int32_t num_cols) {
NVTX_RANGE(K2_FUNC);
ContextPtr &context = src.Context();
if (src.NumAxes() < 2 || src.values.Dim() == 0) {
// src is empty
return Array1<int32_t>(context, 0);
}
DeviceType device_type = context->GetDeviceType();
if (device_type == kCpu) return GetTransposeReorderingCpu(src, num_cols);
K2_CHECK_EQ(device_type, kCuda);
#ifdef _MSC_VER
// See https://github.com/k2-fsa/k2/pull/753
// and
// https://github.com/k2-fsa/k2/pull/571
int32_t num_buckets = num_cols;
int32_t num_elements = src.values.Dim();
int32_t log_buckets = static_cast<int32_t>(ceilf(log2f(num_buckets)));
Array1<int32_t> ans = Range(context, num_elements, 0);
hipStream_t stream = context->GetCudaStream();
size_t temp_storage_bytes = 0;
K2_CUDA_SAFE_CALL(hipcub::DeviceRadixSort::SortPairs(
nullptr, temp_storage_bytes, src.values.Data(),
static_cast<int32_t *>(nullptr), ans.Data(), ans.Data(), num_elements, 0,
log_buckets, stream));
Array1<int8_t> d_temp_storage(
context, temp_storage_bytes + num_elements * sizeof(int32_t));
K2_CUDA_SAFE_CALL(hipcub::DeviceRadixSort::SortPairs(
d_temp_storage.Data() + sizeof(int32_t) * num_elements,
temp_storage_bytes, src.values.Data(),
reinterpret_cast<int32_t *>(d_temp_storage.Data()), ans.Data(),
ans.Data(), num_elements, 0, log_buckets, stream));
return ans;
#else
(void)GetTransposeReorderingThreeAxesCuda; // remove compiler warnings
#if __CUDACC_VER_MAJOR__ > 10 || \
(__CUDACC_VER_MAJOR__ == 10 && \
(__CUDACC_VER_MINOR__ > 1 || \
(__CUDACC_VER_MINOR__ == 1 && __CUDACC_VER_BUILD__ > 105)))
// Enable it only for NVCC > 10.1.105
//
// Refer to https://github.com/LLNL/axom/issues/88
// NVCC 10.1.105 has a known issue for hipcub::DeviceRadixSort
int32_t num_buckets = num_cols;
int32_t num_elements = src.values.Dim();
int32_t log_buckets = static_cast<int32_t>(ceilf(log2f(num_buckets)));
Array1<int32_t> order = Range(context, num_elements, 0);
Array1<int32_t> src_tmp_out(context, num_elements);
Array1<int32_t> ans(context, num_elements);
hipStream_t stream = context->GetCudaStream();
size_t temp_storage_bytes = 0;
K2_CUDA_SAFE_CALL(hipcub::DeviceRadixSort::SortPairs(
nullptr, temp_storage_bytes, src.values.Data(), src_tmp_out.Data(),
order.Data(), ans.Data(), num_elements, 0, log_buckets, stream));
Array1<int8_t> d_temp_storage(context, temp_storage_bytes);
K2_CUDA_SAFE_CALL(hipcub::DeviceRadixSort::SortPairs(
d_temp_storage.Data(), temp_storage_bytes, src.values.Data(),
src_tmp_out.Data(), order.Data(), ans.Data(), num_elements, 0,
log_buckets, stream));
// CheckGetTransposeReordering(src, ans);
return ans;
#else // __CUDACC_VER_MAJOR__
if (src.NumAxes() == 3) {
Array1<int32_t> ans = GetTransposeReorderingThreeAxesCuda(src, num_cols);
// CheckGetTransposeReordering(src, ans);
return ans;
}
const int32_t *row_splits1_data = src.RowSplits(src.NumAxes() - 1).Data();
const int32_t *row_ids1_data = src.RowIds(src.NumAxes() - 1).Data();
const int32_t *value_data = src.values.Data();
int32_t n = src.values.Dim();
Array1<int32_t> ans = Range(context, n, 0);
if (n == 0) return ans;
auto lambda_comp = [=] __device__(int32_t a_idx01, int32_t b_idx01) -> bool {
int32_t a_idx0 = row_ids1_data[a_idx01];
int32_t b_idx0 = row_ids1_data[b_idx01];
int32_t a_col_index = value_data[a_idx01];
int32_t b_col_index = value_data[b_idx01];
if (a_col_index < b_col_index) return true; // sort by column indexes
if (a_col_index > b_col_index) return false;
// now we have a_col_index == b_col_index
if (a_idx0 < b_idx0) return true; // sort by row indexes
if (a_idx0 > b_idx0) return false;
// now we have a_idx0 == b_idx0 && a_col_index == b_col_index
// this entry is duplicated in the sparse matrix.
return false; // we can return either true or false here.
};
mgpu::context_t *mgpu_context = GetModernGpuAllocator(context);
K2_CUDA_SAFE_CALL(mgpu::mergesort(ans.Data(), n, lambda_comp, *mgpu_context));
// CheckGetTransposeReordering(src, ans);
return ans;
#endif
#endif // _MSC_VER
}
RaggedShape ChangeSublistSize(const RaggedShape &src, int32_t size_delta) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(src.NumAxes(), 2);
// the result will have the same num-axes as `src` (the NumAxes() of the
// object is not the same as the number of RaggedShapeLayer axes).
std::vector<RaggedShapeLayer> ans_axes(src.NumAxes() - 1);
int32_t last_axis = src.NumAxes() - 1;
// The following will only do something if src.NumAxes() > 2.
for (int32_t i = 0; i + 1 < last_axis; ++i) ans_axes[i] = src.Layers()[i];
ContextPtr &c = src.Context();
int32_t num_rows = src.TotSize(last_axis - 1),
src_num_elems = src.TotSize(last_axis),
num_elems = src_num_elems + size_delta * num_rows;
ans_axes.back().row_splits = Array1<int32_t>(c, num_rows + 1);
ans_axes.back().row_ids = Array1<int32_t>(c, num_elems);
ans_axes.back().cached_tot_size = num_elems;
const int32_t *src_row_splits_data = src.RowSplits(last_axis).Data(),
*src_row_ids_data = src.RowIds(last_axis).Data();
int32_t *row_splits_data = ans_axes.back().row_splits.Data(),
*row_ids_data = ans_axes.back().row_ids.Data();
{
ParallelRunner pr(c);
{
With w(pr.NewStream());
K2_EVAL(
c, num_rows + 1, lambda_set_row_splits, (int32_t idx0)->void {
row_splits_data[idx0] =
src_row_splits_data[idx0] + size_delta * idx0;
});
}
{
With w(pr.NewStream());
K2_EVAL(
c, src_num_elems, lambda_set_row_ids1, (int32_t src_idx01)->void {
int32_t src_idx0 = src_row_ids_data[src_idx01],
src_idx0x = src_row_splits_data[src_idx0],
src_idx1 = src_idx01 - src_idx0x,
new_idx0x = row_splits_data[src_idx0],
new_idx0x_next = row_splits_data[src_idx0 + 1],
new_idx01 = new_idx0x + src_idx1;
// it's only necessary to guard the next statement with in 'if'
// because size_delta might be negative.
if (new_idx01 < new_idx0x_next) row_ids_data[new_idx01] = src_idx0;
});
}
if (size_delta > 0) {
// This sets the row-ids that are not set by lambda_set_row_ids1.
With w(pr.NewStream());
K2_EVAL(
c, num_rows * size_delta, lambda_set_row_ids2, (int32_t i)->void {
int32_t idx0 = i / size_delta, n = i % size_delta,
next_idx0 = idx0 + 1;
// The following formula is the same as the one in
// lambda_set_row_splits; we want to compute the new value of
// row_splits_data[next_idx0] without waiting for that kernel to
// terminate.
int32_t next_idx0x =
src_row_splits_data[next_idx0] + size_delta * next_idx0;
row_ids_data[next_idx0x - 1 - n] = idx0;
});
}
// make the ParallelRunner go out of scope (should do this before any
// validation code that gets invoked by the constructor of RaggedShape
// below).
}
return RaggedShape(ans_axes);
}
// TODO(dan): this could definitely be made more efficient.
RaggedShape ChangeSublistSizePinned(RaggedShape &src, int32_t size_delta) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(src.NumAxes(), 2);
// the result will have the same num-axes as `src` (the NumAxes() of the
// object is not the same as the number of RaggedShapeLayer axes).
std::vector<RaggedShapeLayer> ans_axes(src.NumAxes() - 1);
int32_t last_axis = src.NumAxes() - 1;
// The following will only do something if src.NumAxes() > 2.
for (int32_t i = 0; i + 1 < last_axis; ++i) ans_axes[i] = src.Layers()[i];
ContextPtr &c = src.Context();
int32_t num_rows = src.TotSize(last_axis - 1);
ans_axes.back().row_splits = Array1<int32_t>(c, num_rows + 1);
const int32_t *src_row_splits_data = src.RowSplits(last_axis).Data();
int32_t *row_splits_data = ans_axes.back().row_splits.Data();
K2_EVAL(
c, num_rows, lambda_set_row_sizes, (int32_t idx0)->void {
int32_t orig_size =
src_row_splits_data[idx0 + 1] - src_row_splits_data[idx0],
size;
if (orig_size == 0 || orig_size + size_delta <= 0)
size = 0;
else
size = orig_size + size_delta;
row_splits_data[idx0] = size;
});
ExclusiveSum(ans_axes.back().row_splits, &ans_axes.back().row_splits);
ans_axes.back().row_ids =
Array1<int32_t>(c, ans_axes.back().row_splits.Back());
RowSplitsToRowIds(ans_axes.back().row_splits, &ans_axes.back().row_ids);
ans_axes.back().cached_tot_size = ans_axes.back().row_ids.Dim();
return RaggedShape(ans_axes);
}
RaggedShape Prefix(RaggedShape &src, int32_t n) {
NVTX_RANGE(K2_FUNC);
int32_t dim0 = src.Dim0();
K2_CHECK(n >= 0 && n <= dim0);
src.Populate();
int32_t num_axes = src.NumAxes();
K2_CHECK_GE(num_axes, 2);
const std::vector<RaggedShapeLayer> &axes_in = src.Layers();
std::vector<RaggedShapeLayer> axes_out(axes_in.size());
int32_t row_end = n;
for (int32_t axis = 0; axis < num_axes - 1; ++axis) {
axes_out[axis].row_splits = axes_in[axis].row_splits.Arange(0, row_end + 1);
// notice here we may do a memory copy from GPU to CPU.
row_end = axes_in[axis].row_splits[row_end];
axes_out[axis].row_ids = axes_in[axis].row_ids.Arange(0, row_end);
axes_out[axis].cached_tot_size = row_end;
}
return RaggedShape(axes_out);
}
std::vector<RaggedShape> GetPrefixes(RaggedShape &src,
const std::vector<int32_t> &sizes) {
NVTX_RANGE(K2_FUNC);
src.Populate();
int32_t dim0 = src.Dim0();
int32_t num_axes = src.NumAxes();
K2_CHECK_GE(num_axes, 2);
ContextPtr &c = src.Context();
const std::vector<RaggedShapeLayer> &axes_in = src.Layers();
// get those row_end elements at each axis.
int32_t ans_size = static_cast<int32_t>(sizes.size());
Array1<int32_t> row_ends(c, num_axes * ans_size);
Array1<int32_t> sizes_array(GetCpuContext(), sizes);
Array1<int32_t> indexes = row_ends.Arange(0, ans_size);
indexes.CopyFrom(sizes_array);
for (int32_t axis = 1; axis < num_axes; ++axis) {
Array1<int32_t> curr_axis_row_ends =
row_ends.Arange(axis * ans_size, (axis + 1) * ans_size);
axes_in[axis - 1].row_splits.Index(indexes, &curr_axis_row_ends);
indexes = curr_axis_row_ends;
}
row_ends = row_ends.To(GetCpuContext());
std::vector<RaggedShape> ans(ans_size);
for (int32_t i = 0; i != ans_size; ++i) {
std::vector<RaggedShapeLayer> axes_out(axes_in.size());
int32_t row_end = row_ends[i];
K2_CHECK(row_end >= 0 && row_end <= dim0);
for (int32_t axis = 0; axis < num_axes - 1; ++axis) {
axes_out[axis].row_splits =
axes_in[axis].row_splits.Arange(0, row_end + 1);
row_end = row_ends[i + (axis + 1) * ans_size];
axes_out[axis].row_ids = axes_in[axis].row_ids.Arange(0, row_end);
axes_out[axis].cached_tot_size = row_end;
}
ans[i] = RaggedShape(axes_out, false);
}
return ans;
}
RaggedShape Arange(RaggedShape &src, int32_t axis, int32_t begin, int32_t end,
std::pair<int32_t, int32_t> *value_range /*= nullptr*/) {
NVTX_RANGE(K2_FUNC);
int32_t num_axes = src.NumAxes();
K2_CHECK_GE(num_axes, 2);
K2_CHECK(axis >= 0 && axis < num_axes - 1);
K2_CHECK(begin >= 0 && begin <= end && end <= src.TotSize(axis));
if (begin == end) {
RaggedShape ans = EmptyRaggedShape(src.Context(), num_axes - axis);
// as begin == end, user always get empty values when doing
// `src.values.Arange(begin, end)`
if (value_range != nullptr) *value_range = std::make_pair(begin, end);
return ans;
}
src.Populate();
ContextPtr &c = src.Context();
const std::vector<RaggedShapeLayer> &axes_in = src.Layers();
int32_t ans_num_axes = num_axes - axis;
// `-1` as Layers().size is NumAxes() - 1
std::vector<RaggedShapeLayer> axes_out(ans_num_axes - 1);
// get those `row_begin` and `row_end` indexes for all axes in a kernel so we
// can do just one GPU to CPU memory transfer.
// the format of `indexes` is: row_begin_axis0, row_end_axis0,
// row_begin_axis1, row_end_axis2, etc. axis0, axis1 here are the axis of ans.
Array1<int32_t> indexes(c, ans_num_axes * 2);
int32_t *indexes_data = indexes.Data();
RowSplitsAccessor<5> src_row_splits_acc(src);
K2_EVAL(
c, 1, lambda_set_indexes, (int32_t i)->void {
// we just start a kernel with only one element here.
K2_CHECK_EQ(i, 0);
int32_t row_begin = begin, row_end = end;
indexes_data[0] = row_begin, indexes_data[1] = row_end;
for (int32_t cur_axis = axis; cur_axis < num_axes - 1; ++cur_axis) {
row_begin = src_row_splits_acc(cur_axis)[row_begin];
row_end = src_row_splits_acc(cur_axis)[row_end];
int32_t indexes_pos = ((cur_axis - axis) + 1) * 2;
indexes_data[indexes_pos] = row_begin;
indexes_data[indexes_pos + 1] = row_end;
}
});
indexes = indexes.To(GetCpuContext());
int32_t row_begin = indexes[0], row_end = indexes[1];
for (int32_t cur_axis = axis; cur_axis < num_axes - 1; ++cur_axis) {
axes_out[cur_axis - axis].row_splits =
axes_in[cur_axis].row_splits.Arange(row_begin, row_end + 1);
int32_t row_id = row_begin;
int32_t indexes_pos = ((cur_axis - axis) + 1) * 2;
row_begin = indexes[indexes_pos];
row_end = indexes[indexes_pos + 1];
axes_out[cur_axis - axis].row_splits =
Minus(axes_out[cur_axis - axis].row_splits, row_begin);
axes_out[cur_axis - axis].row_ids =
axes_in[cur_axis].row_ids.Arange(row_begin, row_end);
axes_out[cur_axis - axis].row_ids =
Minus(axes_out[cur_axis - axis].row_ids, row_id);
axes_out[cur_axis - axis].cached_tot_size = row_end - row_begin;
}
if (value_range != nullptr) *value_range = std::make_pair(row_begin, row_end);
return RaggedShape(axes_out);
}
Ragged<int32_t> AddSuffixToRagged(const Ragged<int32_t> &src,
const Array1<int32_t> &suffix) {
NVTX_RANGE(K2_FUNC);
int32_t num_axes = src.NumAxes();
K2_CHECK_GE(num_axes, 2);
K2_CHECK_EQ(suffix.Dim(), src.TotSize(num_axes - 2));
ContextPtr &c = src.Context();
Array1<int32_t> dst_values(c, src.NumElements() + suffix.Dim());
RaggedShape dst_shape = ChangeSublistSize(src.shape, 1);
// "row_splits1" and "row_ids1" below are actually on the last axis. We name
// them with "1" so that we can use "idx01" and "idx0" for those indexes in
// lambda, following the naming convention explained in k2/csrc/utils.h
const int32_t *dst_row_splits1_data =
dst_shape.RowSplits(num_axes - 1).Data(),
*dst_row_ids1_data = dst_shape.RowIds(num_axes - 1).Data(),
*src_values_data = src.values.Data(),
*suffix_data = suffix.Data();
int32_t *dst_values_data = dst_values.Data();
K2_EVAL(
c, dst_shape.NumElements(), lambda_copy_values, (int32_t idx01)->void {
int32_t idx0 = dst_row_ids1_data[idx01];
if (idx01 == dst_row_splits1_data[idx0 + 1] - 1) {
// idx01 points to the last element of this row; copy from suffix
dst_values_data[idx01] = suffix_data[idx0];
} else {
// copy from src
int32_t src_idx01 = idx01 - dst_row_ids1_data[idx01];
dst_values_data[idx01] = src_values_data[src_idx01];
}
});
return Ragged<int32_t>(dst_shape, dst_values);
}
Ragged<int32_t> AddPrefixToRagged(const Ragged<int32_t> &src,
const Array1<int32_t> &prefix) {
NVTX_RANGE(K2_FUNC);
int32_t num_axes = src.NumAxes();
K2_CHECK_GE(num_axes, 2);
K2_CHECK_EQ(prefix.Dim(), src.TotSize(num_axes - 2));
ContextPtr &c = src.Context();
Array1<int32_t> dst_values(c, src.NumElements() + prefix.Dim());
RaggedShape dst_shape = ChangeSublistSize(src.shape, 1);
// "row_splits1" and "row_ids1" below are actually on the last axis. We name
// them with "1" so that we can use "idx01" and "idx0" for those indexes in
// lambda, following the naming convention explained in k2/csrc/utils.h
const int32_t *dst_row_splits1_data =
dst_shape.RowSplits(num_axes - 1).Data(),
*dst_row_ids1_data = dst_shape.RowIds(num_axes - 1).Data(),
*src_values_data = src.values.Data(),
*prefix_data = prefix.Data();
int32_t *dst_values_data = dst_values.Data();
K2_EVAL(
c, dst_shape.NumElements(), lambda_copy_values, (int32_t idx01)->void {
int32_t idx0 = dst_row_ids1_data[idx01];
if (idx01 == dst_row_splits1_data[idx0]) {
// idx01 points to the first element of this row; copy from prefix
dst_values_data[idx01] = prefix_data[idx0];
} else {
// copy from src
int32_t src_idx01 = idx01 - dst_row_ids1_data[idx01] - 1;
dst_values_data[idx01] = src_values_data[src_idx01];
}
});
return Ragged<int32_t>(dst_shape, dst_values);
}
RaggedShape SubsetRaggedShape(RaggedShape &src, Renumbering &renumbering,
int32_t axis, Array1<int32_t> *elems_new2old) {
NVTX_RANGE(K2_FUNC);
axis = axis < 0 ? src.NumAxes() + axis : axis;
K2_CHECK_EQ(renumbering.NumOldElems(), src.TotSize(axis));
return Index(src, axis, renumbering.New2Old(), elems_new2old);
}
RaggedShape SubsetRaggedShape(RaggedShape &src, Renumbering &r_before_last,
Renumbering &r_last) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(r_before_last.NumOldElems(), src.TotSize(src.NumAxes() - 2));
K2_CHECK_EQ(r_last.NumOldElems(), src.NumElements());
// Make sure final and before-final row-ids are populated.
src.RowIds(src.NumAxes() - 2);
src.RowIds(src.NumAxes() - 1);
std::vector<RaggedShapeLayer> axes = src.Layers();
// Suppose this shape has 3 axes (0,1,2). Its NumAxes()==3;
// axes.size()==2.
// r_before_last deals with the numbering on axis 1.
// r_last deals with the numbering on axis 2.
RaggedShapeLayer &before_last = axes[axes.size() - 2],
&last = axes[axes.size() - 1];
int32_t new_tot_size1 = r_before_last.NumNewElems(),
new_tot_size2 = r_last.NumNewElems();
ContextPtr c = src.Context();
Array1<int32_t> before_last_row_ids(c, new_tot_size1),
last_row_splits(c, new_tot_size1 + 1), last_row_ids(c, new_tot_size2);
// The variable names below use this 3-axis assumption but the
// code will work for greater number of axes.
int32_t *new_row_ids1_data = before_last_row_ids.Data(),
*new_row_splits2_data = last_row_splits.Data(),
*new_row_ids2_data = last_row_ids.Data();
const int32_t *old_row_ids1_data = before_last.row_ids.Data(),
*old_row_splits2_data = last.row_splits.Data(),
*old_row_ids2_data = last.row_ids.Data();
const int32_t *idx01_new2old_data = r_before_last.New2Old().Data(),
*idx01_old2new_data = r_before_last.Old2New().Data(),
*idx012_new2old_data = r_last.New2Old().Data(),
*idx012_old2new_data = r_last.Old2New().Data();
ParallelRunner pr(c);
{
With w(pr.NewStream());
// before_last.row_splits maps from idx0 -> idx01 (contains idx01's). Map
// the idx01's; the idx0s stay the same.
before_last.row_splits = r_before_last.Old2New()[before_last.row_splits];
}
{
With w(pr.NewStream());
K2_EVAL(
c, new_tot_size1 + 1, lambda_set_row_ids1_and_row_splits2,
(int32_t new_idx01)->void {
// row_ids1 maps from idx01 -> idx0. Select subset of
// idx01's; the idx0 stays the same.
int32_t old_idx01 = idx01_new2old_data[new_idx01];
if (new_idx01 < new_tot_size1)
new_row_ids1_data[new_idx01] = old_row_ids1_data[old_idx01];
// row_splits2 maps from idx01 -> idx012. Map both indexes.
// idx01's; the idx0 stays the same.
new_row_splits2_data[new_idx01] =
idx012_old2new_data[old_row_splits2_data[old_idx01]];
});
}
{
With w(pr.NewStream());
K2_EVAL(
c, new_tot_size2, lambda_set_row_ids2, (int32_t new_idx012)->void {
// row_ids2 maps from idx012 -> idx01. Both must be mapped.
int32_t old_idx012 = idx012_new2old_data[new_idx012];
int32_t old_idx01 = old_row_ids2_data[old_idx012],
new_idx01 = idx01_old2new_data[old_idx01];
new_row_ids2_data[new_idx012] = new_idx01;
});
}
before_last.row_ids = before_last_row_ids;
before_last.cached_tot_size = new_tot_size1;
last.row_splits = last_row_splits;
last.row_ids = last_row_ids;
last.cached_tot_size = new_tot_size2;
return RaggedShape(axes);
}
RaggedShape EmptyRaggedShape(ContextPtr &c, int32_t num_axes) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(num_axes, 2);
std::vector<RaggedShapeLayer> axes(num_axes - 1);
axes[0].row_splits = Array1<int32_t>(c, 1, 0);
// row_ids will be the empty vector, with context `c`.
axes[0].row_ids = axes[0].row_splits.Range(0, 0);
axes[0].cached_tot_size = 0;
for (int32_t a = 1; a + 1 < num_axes; ++a) axes[a] = axes[0];
return RaggedShape(axes);
}
Array1<int32_t> GetDecreasingSizeOrder(RaggedShape &shape) {
NVTX_RANGE(K2_FUNC);
ContextPtr &c = shape.Context();
Array1<int32_t> sizes = RowSplitsToSizes(shape.RowSplits(1));
Array1<int32_t> index_map;
Sort<int32_t, GreaterThan<int32_t>>(&sizes, &index_map);
return index_map;
}
RaggedShape GetLayer(const RaggedShape &src, int32_t layer) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(layer, 0);
K2_CHECK_LT(layer, src.NumAxes() - 1);
std::vector<RaggedShapeLayer> layers;
layers.push_back(src.Layers()[layer]);
bool check = false;
return RaggedShape(layers, check);
}
void DecomposeRaggedShape(const RaggedShape &src, int32_t axis,
RaggedShape *top, RaggedShape *bottom) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(axis, 0);
K2_CHECK_LT(axis, src.NumAxes() - 1);
const std::vector<RaggedShapeLayer> &src_layers = src.Layers();
std::vector<RaggedShapeLayer> top_layers(axis),
bottom_layers(src_layers.size() - axis);
int32_t src_size = static_cast<int32_t>(src_layers.size());
for (int32_t i = 0; i < axis; ++i) top_layers[i] = src_layers[i];
for (int32_t i = axis; i < src_size; ++i)
bottom_layers[i - axis] = src_layers[i];
*top = RaggedShape(top_layers);
*bottom = RaggedShape(bottom_layers);
}
RaggedShape RemoveEmptyLists(RaggedShape &src_shape, int32_t axis,
Renumbering *renumbering_out) {
NVTX_RANGE(K2_FUNC);
if (axis == 0) {
return RemoveEmptyListsAxis0(src_shape, renumbering_out);
}
RaggedShape top_shape, bottom_shape;
DecomposeRaggedShape(src_shape, axis, &top_shape, &bottom_shape);
Renumbering r_temp;
if (!renumbering_out) renumbering_out = &r_temp;
bottom_shape = RemoveEmptyListsAxis0(bottom_shape, renumbering_out);
top_shape = SubsetRaggedShape(top_shape, *renumbering_out);
return ComposeRaggedShapes(top_shape, bottom_shape);
}
RaggedShape RemoveSomeEmptyLists(RaggedShape &src_shape, int32_t axis,
Renumbering &renumbering) {
NVTX_RANGE(K2_FUNC);
if (axis == 0) {
return RenumberAxis0Simple(src_shape, renumbering);
}
RaggedShape top_shape, bottom_shape;
DecomposeRaggedShape(src_shape, axis, &top_shape, &bottom_shape);
bottom_shape = RenumberAxis0Simple(bottom_shape, renumbering);
top_shape = SubsetRaggedShape(top_shape, renumbering);
return ComposeRaggedShapes(top_shape, bottom_shape);
}
RaggedShape RemoveEmptyListsAxis0(RaggedShape &src_shape,
Renumbering *renumbering_out) {
NVTX_RANGE(K2_FUNC);
Renumbering r_temp;
if (!renumbering_out) renumbering_out = &r_temp;
ContextPtr &c = src_shape.Context();
int32_t num_lists = src_shape.Dim0();
*renumbering_out = Renumbering(c, num_lists);
const int32_t *row_splits_data = src_shape.RowSplits(1).Data();
char *keep_data = renumbering_out->Keep().Data();
K2_EVAL(
c, num_lists, lambda_set_keep, (int32_t i)->void {
keep_data[i] = (row_splits_data[i + 1] != row_splits_data[i]);
});
return RenumberAxis0Simple(src_shape, *renumbering_out);
}
RaggedShape RenumberAxis0Simple(RaggedShape &src_shape,
Renumbering &renumbering) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(renumbering.NumOldElems(), src_shape.Dim0());
ContextPtr c = src_shape.Context();
src_shape.RowIds(1); // make sure RowIds(1) is populated.
std::vector<RaggedShapeLayer> layers = src_shape.Layers();
int32_t num_layers = layers.size();
int32_t new_num_lists = renumbering.NumNewElems(),
num_elems = src_shape.TotSize(1); // unchanged old vs. new.
Array1<int32_t> new_row_splits(c, new_num_lists + 1),
new_row_ids = renumbering.Old2New()[src_shape.RowIds(1)];
int32_t *new_row_splits_data = new_row_splits.Data();
const int32_t *old_row_splits_data = src_shape.RowSplits(1).Data(),
*new2old_data = renumbering.New2Old().Data();
// set `new_row_splits_data`.
#ifndef NDEBUG
{
Array1<int32_t> is_ok(c, 1, 1);
int32_t *is_ok_data = is_ok.Data();
int32_t old_num_lists = src_shape.Dim0();
const int32_t *old2new_data = renumbering.Old2New().Data();
K2_EVAL(
c, old_num_lists, lambda_check_preconditions, (int32_t i)->void {
if (old2new_data[i + 1] == old2new_data[i]) { // This list not kept
if (old_row_splits_data[i + 1] != old_row_splits_data[i]) {
// this list was nonempty...
is_ok_data[0] = 0;
}
}
});
K2_CHECK_NE(is_ok[0], 0) << "RenumberAxis0Simple(): preconditions not met; "
"renumbering removes nonempty lists.";
}
#endif
K2_EVAL(
c, new_num_lists + 1, lambda_set_new_row_splits, (int32_t new_i)->void {
int32_t j;
if (new_i == new_num_lists) {
j = num_elems;
} else {
int32_t old_i = new2old_data[new_i];
j = old_row_splits_data[old_i];
}
new_row_splits_data[new_i] = j;
});
layers[0].row_splits = new_row_splits;
layers[0].row_ids = new_row_ids;
// no need to set its cached_tot_size; that didn't change.
return RaggedShape(layers);
}
RaggedShape CoveringShape(int32_t num_srcs, RaggedShape **srcs) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(num_srcs, 0);
if (num_srcs == 1) return *srcs[0];
K2_CHECK_EQ(srcs[0]->NumAxes(), 2);
int32_t dim0 = srcs[0]->Dim0();
ContextPtr &c = srcs[0]->Context();
for (int32_t i = 1; i != num_srcs; ++i) {
K2_CHECK_EQ(srcs[i]->NumAxes(), 2);
K2_CHECK_EQ(srcs[i]->Dim0(), dim0);
K2_CHECK(c->IsCompatible(*srcs[i]->Context()));
}
// get row splits of srcs
Array1<int32_t *> row_splits_ptrs(GetCpuContext(), num_srcs);
int32_t **splits_ptr_data = row_splits_ptrs.Data();
for (int32_t i = 0; i != num_srcs; ++i) {
splits_ptr_data[i] = srcs[i]->RowSplits(1).Data();
}
row_splits_ptrs = row_splits_ptrs.To(c);
int32_t **src_row_splits_ptr_data = row_splits_ptrs.Data();
RaggedShape shape = RegularRaggedShape(c, dim0, num_srcs);
Array1<int32_t> values(c, dim0 * num_srcs);
// elements in row i of `sublist_sizes` are the sizes of row i
// of src[0], src[1]...
Ragged<int32_t> sublist_sizes(shape, values);
int32_t *values_data = sublist_sizes.values.Data();
K2_EVAL2(
c, dim0, num_srcs, lambda_set_sublist_sizes,
(int32_t i, int32_t j)->void {
values_data[i * num_srcs + j] =
src_row_splits_ptr_data[j][i + 1] - src_row_splits_ptr_data[j][i];
});
Array1<int32_t> ans_row_splits(c, dim0 + 1);
Array1<int32_t> ans_row_sizes = ans_row_splits.Arange(0, dim0);
MaxPerSublist(sublist_sizes, 0, &ans_row_sizes);
ExclusiveSum(ans_row_sizes, &ans_row_splits);
return RaggedShape2(&ans_row_splits, nullptr, -1);
}
Array1<int32_t> CoveringShapeForwardMap(RaggedShape &src,
RaggedShape &covering) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(src.NumAxes(), 2);
K2_CHECK_EQ(covering.NumAxes(), 2);
K2_CHECK_EQ(src.Dim0(), covering.Dim0());
int32_t num_elems = covering.NumElements();
K2_CHECK_GE(num_elems, src.NumElements());
ContextPtr c = GetContext(src, covering);
Array1<int32_t> ans(c, num_elems);
int32_t *ans_data = ans.Data();
const int32_t *covering_row_splits_data = covering.RowSplits(1).Data(),
*covering_row_ids_data = covering.RowIds(1).Data(),
*src_row_splits_data = src.RowSplits(1).Data();
K2_EVAL(
c, num_elems, lambda_set_value, (int32_t covering_idx01)->void {
int32_t covering_idx0 = covering_row_ids_data[covering_idx01],
covering_idx0x = covering_row_splits_data[covering_idx0],
covering_idx1 = covering_idx01 - covering_idx0x;
// src and covering has the same dim0
int32_t src_idx0x = src_row_splits_data[covering_idx0],
src_cur_row_size =
src_row_splits_data[covering_idx0 + 1] - src_idx0x;
K2_DCHECK_GE(
covering_row_splits_data[covering_idx0 + 1] - covering_idx0x,
src_cur_row_size);
if (covering_idx1 >= src_cur_row_size)
ans_data[covering_idx01] = -1;
else
ans_data[covering_idx01] = src_idx0x + covering_idx1; // src_idx01
});
return ans;
}
void RaggedShapeAxis0Splitter::Init(RaggedShape &src) {
NVTX_RANGE(K2_FUNC);
int32_t num_layers = src.NumLayers(), num_layers_out = num_layers - 1,
dim0 = src.Dim0();
K2_CHECK_LE(num_layers_out, 4); // If this fails, add something to the 4s and
// 5s here and in the header.
K2_CHECK_GT(num_layers, 1);
ContextPtr c = src.Context();
composite_row_splits_ = Array2<int32_t>(c, num_layers + 1, dim0 + 1);
Array2Accessor<int32_t> composite_row_splits_acc =
composite_row_splits_.Accessor();
RowSplitsAccessor<5> src_row_splits_acc(src);
SmallVec<int32_t *, 5> row_splits_out_acc;
K2_CHECK(num_layers_out <= 5);
Array1<int32_t> garbage1(c, dim0 + dim0 + 1); // won't be read.
row_splits_out_acc.data[0] = garbage1.Data();
for (int32_t l = 0; l < num_layers_out; l++) {
row_splits_out_[l] = Array1<int32_t>(c, src.TotSize(l + 1) + dim0 + 1);
row_splits_out_acc.data[l + 1] = row_splits_out_[l].Data();
}
// set composite_row_splits_ and also those elements of
// the output row_splits which are bound to be zero.
K2_EVAL(
c, dim0 + 1, lambda_set_composite_row_splits, (int32_t i)->void {
int32_t cur_pos = i;
composite_row_splits_acc(0, i) = cur_pos;
for (int32_t l = 0; l < num_layers; l++) {
// The following statement sets the zero at the beginning of each
// row_splits, plus a final zero that we write to avoid an
// if-statement.
row_splits_out_acc.data[l][cur_pos + i] = 0;
cur_pos = src_row_splits_acc.ptrs[l][cur_pos];
composite_row_splits_acc(l + 1, i) = cur_pos;
}
});
composite_row_splits_cpu_ = composite_row_splits_.To(GetCpuContext());
// Right now to_idx0 maps from an idx0 to an idx0 (identity map); next time it
// will map from an idx01 to to an idx0, then idx012 to idx0 (all w.r.t. src).
// It doesn't include the extra last element like a row_splits would; it's
// like a composite row_ids vector: row_ids1, row_ids12 and so on.
Array1<int32_t> to_idx0 = composite_row_splits_.Row(0).Arange(0, dim0);
for (int32_t layer = 0; layer < num_layers_out; layer++)
row_ids_out_[layer] = Array1<int32_t>(c, src.TotSize(layer + 2));
Array1<int32_t> garbage2(c,
src.TotSize(1)); // corresponds to row_ids_out_[-1].
for (int32_t layer = 0; layer <= num_layers_out; layer++) {
// num_elems is the number of elements we process in this kernel.
int32_t num_elems = src.TotSize(layer + 1);
// The names here are valid for layer == 1; this just happens to be useful
// for exposition.
const int32_t *src_row_ids2_data = src.RowIds(layer + 1).Data(),
*idx01_to_idx0_data = to_idx0.Data();
int32_t *row_ids1_out_data =
(layer == 0 ? garbage2.Data() : row_ids_out_[layer - 1].Data());
if (layer < num_layers_out) {
Array1<int32_t> to_idx0_next(c, num_elems);
int32_t *row_splits2_out_data = row_splits_out_[layer].Data(),
*idx012_to_idx0_data = to_idx0_next.Data();
const int32_t *src_row_splits3_data = src.RowSplits(layer + 2).Data();
// row_splits3 maps from idx012 -> idx012x.
// remember: the names are valid for layer == 1, just as an example.
K2_EVAL(
c, num_elems, lambda_set_row_splits_and_ids,
(int32_t src_idx012)->void {
int32_t src_idx01 = src_row_ids2_data[src_idx012],
src_idx012x_next = src_row_splits3_data[src_idx012 + 1],
src_idx0 = idx01_to_idx0_data[src_idx01];
idx012_to_idx0_data[src_idx012] = src_idx0; // <-- output here.
int32_t src_idx0x = composite_row_splits_acc(layer, src_idx0),
src_idx0xxx = composite_row_splits_acc(layer + 2, src_idx0),
src_idx1 = src_idx01 - src_idx0x,
src_idx12x_next = src_idx012x_next - src_idx0xxx,
out_idx0 = src_idx1, out_idx01x_next = src_idx12x_next;
row_ids1_out_data[src_idx012] = out_idx0;
// below, the "+1" is because each element handles the next one
// within this output row_splits array, with the zeros (1st elem of
// each output row_splits array) handled by
// lambda_set_composite_row_splits. The "+ idx0" is to make room
// for the extra final element of all the previous row_splits
// arrays.
row_splits2_out_data[src_idx012 + 1 + src_idx0] = out_idx01x_next;
});
to_idx0 = to_idx0_next;
} else {
// The next code is a subset of the other branch.
K2_EVAL(
c, num_elems, lambda_set_row_ids, (int32_t src_idx012)->void {
int32_t src_idx01 = src_row_ids2_data[src_idx012],
idx0 = idx01_to_idx0_data[src_idx01],
src_idx0x = composite_row_splits_acc(layer, idx0),
src_idx1 = src_idx01 - src_idx0x, out_idx0 = src_idx1;
row_ids1_out_data[src_idx012] = out_idx0;
});
}
}
}
RaggedShape RaggedShapeAxis0Splitter::GetElement(int32_t i,
int32_t *elem_offset) {
NVTX_RANGE(K2_FUNC);
int32_t num_layers_out = composite_row_splits_.Dim0() - 2;
std::vector<RaggedShapeLayer> out;
out.reserve(num_layers_out);
auto composite_row_splits_cpu_acc = composite_row_splits_cpu_.Accessor();
for (int32_t layer = 0; layer < num_layers_out; layer++) {
int32_t row_begin = composite_row_splits_cpu_acc(layer + 1, i),
row_end = composite_row_splits_cpu_acc(layer + 1, i + 1),
elem_begin = composite_row_splits_cpu_acc(layer + 2, i),
elem_end = composite_row_splits_cpu_acc(layer + 2, i + 1),
num_elems = elem_end - elem_begin;
if (layer + 1 == num_layers_out && elem_offset != nullptr)
*elem_offset = elem_begin;
// the "+ i" is to account for the extra final elements of preceding
// row_splits vectors; the + 1 is for the final element of this one.
Array1<int32_t> splits = row_splits_out_[layer].Arange(row_begin + i,
row_end + i + 1),
ids = row_ids_out_[layer].Arange(elem_begin, elem_end);
out.emplace_back(RaggedShapeLayer{splits, ids, num_elems});
}
// TODO: when thoroughly debugged, maybe turn off validation?
return RaggedShape(out);
}
namespace hash_internal {
// Utilities for hashing strings (actually: sequences of int32_t).
/*
T can be int32_t or int64_t.
The following code shows what we are computing:
std::vector<int32_t> input;
T hash1 = 13, hash2 = 787;
for (size_t i = 0; i < input.size(); i++) {
hash1 = 31 * hash1 + input[i];
hash2 = 167 * hash2 + input[i];
}
hash = hash1 + 104729 * hash2;
I'm not sure that these constants are very optimal, but they are primes.
The actual calculation is a little different from the above because
of the need to do it via a reduction.
*/
template <typename T>
struct Hash {
T hash1;
T hash2;
T product1;
T product2;
// Would like this to be a POD type so not adding the following constructor:
// Hash(int32_t i): hash1(i), hash2(i), product1(31), product2(167) { }
// .. but implementing it in HashInputIterator.
};
template <typename T>
struct HashInputIterator {
explicit __host__ __device__ __forceinline__ HashInputIterator(const int32_t *i) // NOLINT
: i_(i) {}
__device__ __forceinline__ Hash<T> operator[](int32_t idx) const {
return Hash<T>{i_[idx], i_[idx], 31, 167};
}
__device__ __forceinline__ HashInputIterator operator+(int32_t offset) const {
return HashInputIterator(i_ + offset);
}
const int32_t *i_;
};
template <typename T>
struct HashOutputIteratorDeref { // this is what you get when you dereference
// HashOutputIterator, it pretends to be a
// Hash<T> but really only stores the `idx`
// member.
explicit __device__ __forceinline__ HashOutputIteratorDeref(T *t)
: t_(t) {}
__device__ __forceinline__ HashOutputIteratorDeref &operator=(
const Hash<T> &h) {
*t_ = h.hash1 + 13 * h.product1 + 104729 * h.hash2 +
(104729 * 787) * h.product2;
return *this;
}
T *t_;
};
template <typename T>
struct HashOutputIterator { // outputs just the index of the pair.
explicit HashOutputIterator(T *t) : t_(t) {}
__device__ __forceinline__ HashOutputIteratorDeref<T> operator[](
int32_t idx) const {
return HashOutputIteratorDeref<T>(t_ + idx);
}
__device__ __forceinline__ HashOutputIterator operator+(size_t offset) {
return HashOutputIterator{t_ + offset};
}
T *t_;
};
template <typename T>
struct HashCombineOp {
__device__ __forceinline__ Hash<T> operator()(const Hash<T> &a,
const Hash<T> &b) const {
return Hash<T>{a.hash1 * b.product1 + b.hash1,
a.hash2 * b.product2 + b.hash2,
a.product1 * b.product1,
a.product2 * b.product2};
}
};
} // namespace hash_internal
} // namespace k2
namespace std {
// those below typedefs are required by hipcub::DeviceSegmentedReduce:Reduce
template <typename T>
struct iterator_traits<k2::hash_internal::HashInputIterator<T>> {
typedef k2::hash_internal::Hash<T> value_type;
};
template <typename T>
struct iterator_traits<k2::hash_internal::HashOutputIterator<T>> {
typedef k2::hash_internal::Hash<T> value_type;
typedef k2::hash_internal::HashOutputIteratorDeref<T> reference;
};
} // namespace std
namespace k2 {
template <typename T>
Array1<T> ComputeHash(Ragged<int32_t> &src) {
NVTX_RANGE(K2_FUNC);
int32_t last_axis = src.NumAxes() - 1;
const Array1<int32_t> &row_splits_array = src.RowSplits(last_axis);
int32_t num_rows = row_splits_array.Dim() - 1;
ContextPtr &c = src.Context();
Array1<T> ans(c, num_rows);
const int32_t *row_splits = row_splits_array.Data();
const int32_t *values_data = src.values.Data();
T *output_data = ans.Data();
if (c->GetDeviceType() == kCpu) {
int32_t j = row_splits[0];
for (int32_t i = 0; i < num_rows; ++i) {
T hash1 = 13, hash2 = 787;
int32_t row_end = row_splits[i + 1];
for (; j < row_end; ++j) {
T elem = values_data[j];
hash1 = 31 * hash1 + elem;
hash2 = 167 * hash2 + elem;
}
T hash = hash1 + 104729 * hash2;
output_data[i] = hash;
}
} else {
K2_CHECK_EQ(c->GetDeviceType(), kCuda);
hash_internal::HashInputIterator<T> input_iter(values_data);
hash_internal::HashOutputIterator<T> output_iter(output_data);
hash_internal::HashCombineOp<T> op;
hash_internal::Hash<T> initial_hash{ 0, 0, 1, 1 };
// This code is based on the example here:
// https://nvlabs.github.io/cub/structcub_1_1_device_segmented_reduce.html
std::size_t temp_storage_bytes = 0;
// the first time is to determine temporary device storage requirements
K2_CUDA_SAFE_CALL(hipcub::DeviceSegmentedReduce::Reduce(
nullptr, temp_storage_bytes, input_iter, output_iter, num_rows,
row_splits, row_splits + 1, op, initial_hash, c->GetCudaStream()));
Array1<int8_t> d_temp_storage(c, temp_storage_bytes);
K2_CUDA_SAFE_CALL(hipcub::DeviceSegmentedReduce::Reduce(
d_temp_storage.Data(), temp_storage_bytes, input_iter, output_iter,
num_rows, row_splits, row_splits + 1, op, initial_hash,
c->GetCudaStream()));
}
return ans;
}
Ragged<int32_t> UniqueSequences(Ragged<int32_t> &src,
Ragged<int32_t> *num_repeats /*=nullptr*/,
Array1<int32_t> *new2old_indexes /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
ContextPtr &c = src.Context();
if (src.NumAxes() == 2) {
// Put 'fake' layer at front, process, then remove.
Ragged<int32_t> temp = Unsqueeze(src, 0);
return UniqueSequences(temp, num_repeats, new2old_indexes).RemoveAxis(0);
}
Array1<int64_t> hashes = ComputeHash<int64_t>(src);
int32_t hashes_dim = hashes.Dim();
Array1<int32_t> order(c, hashes_dim);
// Using the layer before the last layer of `src` for the shape of
// `ragged_hashes`
Ragged<int64_t> ragged_hashes(GetLayer(src.shape, src.shape.NumLayers() - 2),
hashes);
SortSublists<int64_t, LessThan<int64_t> >(&ragged_hashes, &order);
Renumbering renumber_lists(c, hashes.Dim());
const int32_t *ragged_hashes_row_ids_data = ragged_hashes.RowIds(1).Data(),
*ragged_hashes_row_splits_data = ragged_hashes.RowSplits(1).Data();
const int64_t *ragged_hashes_data = ragged_hashes.values.Data();
char *keep_list_data = renumber_lists.Keep().Data();
K2_EVAL(
c, hashes_dim, lambda_set_keep, (int32_t i)->void {
char keep;
if (i == ragged_hashes_row_splits_data[ragged_hashes_row_ids_data[i]]) {
// this is the first element of its sub-list in `ragged_hashes`.
keep = 1;
} else {
keep = (ragged_hashes_data[i] != ragged_hashes_data[i - 1]);
}
keep_list_data[i] = keep;
});
Array1<int32_t> new2old = renumber_lists.New2Old(),
new2unsorted = order[new2old];
Ragged<int32_t> ans = Index(src, src.NumAxes() - 2, new2unsorted);
if (num_repeats != nullptr) {
int32_t new2old_dim = new2old.Dim();
Array1<int32_t> num_repeats_array(c, new2old_dim);
const int32_t *new2old_data = new2old.Data();
int32_t *num_repeats_data = num_repeats_array.Data();
K2_EVAL(
c, new2old_dim, set_num_repeats, (int32_t i)->void {
if (i < new2old_dim - 1) {
num_repeats_data[i] = new2old_data[i + 1] - new2old_data[i];
} else {
num_repeats_data[i] = hashes_dim - new2old_data[i];
}
});
*num_repeats = Ragged<int32_t>(GetLayer(ans.shape, ans.NumAxes() - 3),
num_repeats_array);
}
if (new2old_indexes != nullptr) {
*new2old_indexes = std::move(new2unsorted);
}
return ans;
}
// Instantiate template for int64 and int32.
template
Array1<int64_t> ComputeHash(Ragged<int32_t> &src);
template
Array1<int32_t> ComputeHash(Ragged<int32_t> &src);
} // namespace k2
| 82fa813c0ef380bcf1964cfd21476b03a20e1b26.cu | /**
* Copyright 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu)
* Mobvoi Inc. (authors: Fangjun Kuang)
* Yiming Wang
*
* See LICENSE for clarification regarding multiple authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cmath>
#include <memory>
#include <vector>
#include "k2/csrc/array_ops.h"
#include "k2/csrc/cub.h"
#include "k2/csrc/macros.h"
#include "k2/csrc/math.h"
#include "k2/csrc/moderngpu_allocator.h"
#include "k2/csrc/ragged.h"
#include "k2/csrc/ragged_ops.h"
#include "k2/csrc/ragged_utils.h"
namespace {
/*
A helper function used in RaggedShape3;
if both first and second are non-NULL, it will check if the context of them
is compatible or not and return that context if compatible;
if one of them is NULL, returns the other one's context.
*/
static k2::ContextPtr GetContext(const k2::Array1<int32_t> *first,
const k2::Array1<int32_t> *second) {
K2_CHECK(first != nullptr || second != nullptr)
<< "At least one of first and second must be non-NULL";
if (first == nullptr)
return second->Context();
else if (second == nullptr)
return first->Context();
else
return k2::GetContext(*first, *second);
}
} // namespace
namespace k2 {
RaggedShape RandomRaggedShape(bool set_row_ids, int32_t min_num_axes,
int32_t max_num_axes, int32_t min_num_elements,
int32_t max_num_elements) {
ContextPtr c = GetCpuContext();
K2_CHECK(min_num_axes >= 2 && max_num_axes >= min_num_axes &&
min_num_elements >= 0 && max_num_elements >= min_num_elements);
int32_t num_axes = RandInt(min_num_axes, max_num_axes);
int32_t num_elements = RandIntGeometric(min_num_elements, max_num_elements);
bool done_repeats = false;
std::vector<RaggedShapeLayer> axes(num_axes - 1);
for (int32_t axis = num_axes - 2; axis >= 0; axis--) {
// this axis will have row_ids of length num_elements and
// row_splits of length to be determined.
int32_t cur_row_split = 0;
std::vector<int32_t> row_splits_vec;
std::vector<int32_t> row_ids_vec;
row_splits_vec.push_back(cur_row_split);
// The reason for "|| RandInt(0, 2) == 0)" is so that even if there
// are no elements we can still potentially generate empty row-splits.
while (cur_row_split < num_elements || RandInt(0, 2) == 0) {
int32_t split_size = RandIntGeometric(0, num_elements - cur_row_split);
cur_row_split += split_size;
// sometimes we have a bunch of empty rows in a row (this will test out
// more of the code), so here we generate a bunch of empty rows, but we
// just do this only once (that's why we declare `done_repeats` here).
if (split_size == 0 && RandInt(0, 30) == 0 && !done_repeats) {
int32_t num_repeats = RandIntGeometric(1, 128);
row_splits_vec.insert(row_splits_vec.end(), num_repeats, cur_row_split);
// don't need to set `row_ids_vec` as there's no element.
done_repeats = true;
}
row_splits_vec.push_back(cur_row_split);
if (set_row_ids) {
int32_t cur_row = static_cast<int32_t>(row_splits_vec.size()) - 2;
row_ids_vec.insert(row_ids_vec.end(), split_size, cur_row);
}
}
axes[axis].row_splits = Array1<int32_t>(c, row_splits_vec);
if (set_row_ids) axes[axis].row_ids = Array1<int32_t>(c, row_ids_vec);
axes[axis].cached_tot_size = num_elements;
num_elements = axes[axis].row_splits.Dim() - 1;
}
// RaggedShape(axes, true) will check the returned RaggedShape for
// consistency.
return RaggedShape(axes, true);
}
RaggedShape RaggedShape2(Array1<int32_t> *row_splits, Array1<int32_t> *row_ids,
int32_t cached_tot_size) {
NVTX_RANGE(K2_FUNC);
K2_CHECK(row_splits != nullptr || row_ids != nullptr)
<< "At least one of row_splits and row_ids must be defined";
ContextPtr ctx = ::GetContext(row_splits, row_ids);
if (cached_tot_size != -1) {
if (row_ids != nullptr) K2_CHECK_EQ(cached_tot_size, row_ids->Dim());
if (row_splits != nullptr) {
// may be slow as it may copy memory from device to host
K2_DCHECK_EQ(cached_tot_size, row_splits->Back())
<< "Bad row splits is: " << *row_splits;
}
}
std::vector<RaggedShapeLayer> axes(1);
if (row_splits != nullptr) {
axes[0].row_splits = *row_splits;
} else {
// we need to work out row_splits as we always require row_splits is not
// empty for RaggedShape. Note here we suppose the last element in row_ids
// is num_rows - 1, i.e. there're no empty rows after row `row_ids[-1]`.
int32_t num_rows = row_ids->Dim() == 0 ? 0 : row_ids->Back() + 1;
Array1<int32_t> row_splits_array(ctx, num_rows + 1);
RowIdsToRowSplits(*row_ids, &row_splits_array);
axes[0].row_splits = row_splits_array;
}
if (row_ids != nullptr) axes[0].row_ids = *row_ids;
if (cached_tot_size == -1) {
cached_tot_size =
row_ids != nullptr ? row_ids->Dim() : axes[0].row_splits.Back();
}
axes[0].cached_tot_size = cached_tot_size;
// note below line will check if row_splits and row_ids are valid and agree
// with each other.
return RaggedShape(axes);
}
RaggedShape ComposeRaggedShapes(const RaggedShape &a, const RaggedShape &b) {
NVTX_RANGE(K2_FUNC);
if (a.NumElements() != b.Dim0()) {
K2_LOG(FATAL) << "ComposeRaggedShapes: shape mismatch: " << a.NumElements()
<< " vs. " << b.Dim0();
}
K2_CHECK(IsCompatible(a, b));
const auto &a_axes = a.Layers();
const auto &b_axes = b.Layers();
std::size_t a_size = a_axes.size(), b_size = b_axes.size();
std::vector<RaggedShapeLayer> axes;
axes.reserve(a_size + b_size);
for (std::size_t i = 0; i < a_size; ++i) axes.emplace_back(a_axes[i]);
for (std::size_t i = 0; i < b_size; ++i) axes.emplace_back(b_axes[i]);
bool validate = false;
return RaggedShape(axes, validate);
}
RaggedShape ComposeRaggedShapes3(const RaggedShape &a, const RaggedShape &b,
const RaggedShape &c) {
NVTX_RANGE(K2_FUNC);
if (a.NumElements() != b.Dim0()) {
K2_LOG(FATAL) << "ComposeRaggedShapes: shape mismatch: " << a.NumElements()
<< " vs. " << b.Dim0();
}
if (b.NumElements() != c.Dim0()) {
K2_LOG(FATAL) << "ComposeRaggedShapes: shape mismatch: " << b.NumElements()
<< " vs. " << c.Dim0();
}
K2_CHECK(IsCompatible(a, b));
K2_CHECK(IsCompatible(b, c));
const auto &a_axes = a.Layers();
const auto &b_axes = b.Layers();
const auto &c_axes = c.Layers();
std::size_t a_size = a_axes.size(), b_size = b_axes.size(),
c_size = c_axes.size();
std::vector<RaggedShapeLayer> axes;
axes.reserve(a_size + b_size + c_size);
for (std::size_t i = 0; i < a_size; ++i) axes.emplace_back(a_axes[i]);
for (std::size_t i = 0; i < b_size; ++i) axes.emplace_back(b_axes[i]);
for (std::size_t i = 0; i < c_size; ++i) axes.emplace_back(c_axes[i]);
bool validate = false;
return RaggedShape(axes, validate);
}
RaggedShape RaggedShape3(Array1<int32_t> *row_splits1,
Array1<int32_t> *row_ids1, int32_t cached_tot_size1,
Array1<int32_t> *row_splits2,
Array1<int32_t> *row_ids2, int32_t cached_tot_size2) {
NVTX_RANGE(K2_FUNC);
RaggedShape shape1 = RaggedShape2(row_splits1, row_ids1, cached_tot_size1);
Array1<int32_t> temp_array;
if (row_splits2 == nullptr) {
K2_CHECK_NE(row_ids2, nullptr)
<< "Either row-splits or row-ids must be defined";
temp_array = Array1<int32_t>(row_ids2->Context(), shape1.NumElements() + 1);
row_splits2 = &temp_array;
RowIdsToRowSplits(*row_ids2, row_splits2);
}
return ComposeRaggedShapes(
shape1, RaggedShape2(row_splits2, row_ids2, cached_tot_size2));
}
RaggedShape RaggedShape4(Array1<int32_t> *row_splits1,
Array1<int32_t> *row_ids1, int32_t cached_tot_size1,
Array1<int32_t> *row_splits2,
Array1<int32_t> *row_ids2, int32_t cached_tot_size2,
Array1<int32_t> *row_splits3,
Array1<int32_t> *row_ids3, int32_t cached_tot_size3) {
NVTX_RANGE(K2_FUNC);
RaggedShape shape12 = RaggedShape3(row_splits1, row_ids1, cached_tot_size1,
row_splits2, row_ids2, cached_tot_size2);
Array1<int32_t> temp_array;
if (row_splits3 == nullptr) {
K2_CHECK_NE(row_ids3, nullptr)
<< "Either row-splits or row-ids must be defined";
temp_array =
Array1<int32_t>(row_ids3->Context(), shape12.NumElements() + 1);
row_splits3 = &temp_array;
RowIdsToRowSplits(*row_ids3, row_splits3);
}
return ComposeRaggedShapes(
shape12, RaggedShape2(row_splits3, row_ids3, cached_tot_size3));
}
RaggedShape RaggedShapeFromTotSizes(ContextPtr c, int32_t num_axes,
const int32_t *tot_sizes) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(num_axes, 2);
std::vector<RaggedShapeLayer> axes(num_axes - 1);
int32_t tot_size = 0;
for (int32_t axis = 1; axis < num_axes; ++axis) {
tot_size += tot_sizes[axis - 1] + 1 + tot_sizes[axis];
}
Array1<int32_t> buf(c, tot_size);
int32_t start = 0;
for (int32_t axis = 1; axis < num_axes; ++axis) {
axes[axis - 1].row_splits = buf.Arange(start,
start + tot_sizes[axis - 1] + 1);
start += tot_sizes[axis - 1] + 1;
axes[axis - 1].row_ids = buf.Arange(start, start + tot_sizes[axis]);
start += tot_sizes[axis];
axes[axis - 1].cached_tot_size = tot_sizes[axis];
}
// Not check here as we did not set the values of row_splits and row_ids
return RaggedShape(axes, false);
}
// See declaration in ragged.h for documentation of its purpose and interface.
RaggedShape Unsqueeze(const RaggedShape &src, int32_t axis) {
// If axis == 0, initial row_splits and row_ids will look like the following,
// if for example src.Dim0() was 5: [ 0 5 ], [ 0 0 0 0 0 ]. The other axes
// would be pushed forward.
//
// If 0 < axis <= src.NumAxes(), the inserted row_splits and row_ids would
// look like the following, if for instance the src.TotSize(axis) = 8:
// [ 0 1 2 3 4 5 6 7 8 ], [ 0 1 2 3 4 5 6 7 ].
//
// The reason why the code is different for axis == 0, is that in that case we
// are really making visible an "implicit" axis of the input `src`; we could
// call it axis 0 of the original RaggedShape. Imagine that "implicit" axis's
// row_splits and row_ids map respectively from an idx_minus1 -> idx0 and from
// an idx_0 to idx_minus1, where idx_minus1 is always 0 and 0 <= idx0 <
// Dim0().
NVTX_RANGE(K2_FUNC);
ContextPtr &c = src.Context();
K2_CHECK(axis >= 0 && axis <= src.NumAxes());
const std::vector<RaggedShapeLayer> &axes_in = src.Layers();
int32_t num_axes_in = src.NumAxes();
// Note: in RaggedShape, the vector of RaggedShapeLayer is of length
// num_axes - 1, so the output will have one more axis than the input.
std::vector<RaggedShapeLayer> axes_out(num_axes_in);
int32_t row_splits_dim, row_ids_dim;
Array1<int32_t> mem;
if (axis == 0) {
row_splits_dim = 2; // e.g. [ 0 5 ]
row_ids_dim = src.Dim0(); // e.g. [ 0 0 0 0 0 ]
mem = Array1<int32_t>(c, row_splits_dim + row_ids_dim);
int32_t *mem_data = mem.Data();
K2_EVAL(
c, mem.Dim(), lambda_set_mem, (int32_t i)->void {
if (i == 1)
mem_data[i] = row_ids_dim;
else
mem_data[i] = 0;
});
} else {
int32_t tot_size = src.TotSize(axis);
row_splits_dim = tot_size + 1;
row_ids_dim = tot_size;
mem = Array1<int32_t>(c, row_splits_dim + row_ids_dim);
int32_t *mem_data = mem.Data();
K2_EVAL(
c, mem.Dim(), lambda_set_mem2,
(int32_t i)->void { mem_data[i] = i % (tot_size + 1); });
}
axes_out[axis].row_splits = mem.Range(0, row_splits_dim);
axes_out[axis].row_ids = mem.Range(row_splits_dim, row_ids_dim);
axes_out[axis].cached_tot_size = row_ids_dim;
for (int32_t i = 0; i < axis; ++i) axes_out[i] = axes_in[i];
// Note: the returned array has `num_axes_in + 1` axes, so its
// array of RaggedShapeLayer is of length `num_axes_in`.
for (int32_t i = axis + 1; i < num_axes_in; ++i) axes_out[i] = axes_in[i - 1];
return RaggedShape(axes_out);
}
std::vector<RaggedShape> UnsqueezeParallel(int32_t num_srcs, RaggedShape **src,
int32_t axis) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(axis, 0);
std::vector<RaggedShape> ans;
if (num_srcs == 0) return ans;
ans.reserve(num_srcs);
ContextPtr &c = src[0]->Context();
std::vector<int32_t> all_row_splits_vec(num_srcs * 2);
int32_t max_dim = 0;
// all_row_splits_vec will contain [ 0 d0 0 d1 0 d2 .. ]
// where d0 == src[0]->Dim0(), d1 == src[1]->Dim0()..
for (int32_t i = 0; i < num_srcs; ++i) {
int32_t this_dim0 = src[i]->Dim0();
if (this_dim0 > max_dim) max_dim = this_dim0;
all_row_splits_vec[i * 2] = 0;
all_row_splits_vec[i * 2 + 1] = this_dim0;
}
Array1<int32_t> all_row_splits(c, all_row_splits_vec);
Array1<int32_t> all_row_ids(c, max_dim, 0);
for (int32_t i = 0; i < num_srcs; ++i) {
int32_t num_axes = src[i]->NumAxes();
std::vector<RaggedShapeLayer> axes;
axes.reserve(num_axes); // note, the size of the `layers` of a RaggedShape
// is its NumAxes() - 1.
axes.resize(1);
int32_t this_old_dim0 = all_row_splits_vec[i * 2 + 1];
axes[0].row_splits = all_row_splits.Range(i * 2, 2);
axes[0].row_ids = all_row_ids.Range(0, this_old_dim0);
axes[0].cached_tot_size = this_old_dim0;
axes.insert(axes.end(), src[i]->Layers().begin(), src[i]->Layers().end());
ans.emplace_back(std::move(axes));
}
return ans;
}
/*
Internal function used in Index(), which gets certain arrays used internally.
@param [in] src Source shape to be indexed
@param [in] new2old Array of indexes into axis 0 of src; elements
equal to -1 will be interpreted as referring to
an empty list.
@param [out] old_offsets Will be set to new Array2 with dimension
(src.NumAxes(), new2old.Dim()), whose (i,j)'th
element contains the offset into axis i of `src`
where the slice of `src` with index0 (i.e. index
into 0'th-axis of `src`) equal to `new2old[j]`
begins.
@param [out] new_offsets Will be set to new Array2 with dimension
(src.NumAxes(), new2old.Dim()+1), whose (i,j)'th
element contains the offset into axis i of `ans`
where the data in `ans` corresponding to
index j (i.e. index j into axis 0 of `ans`) begins.
Note: `ans` is the result of Index(), with
ans.Dim0() == new2old.Dim().
*/
inline void GetOldAndNewOffsets(RaggedShape &src,
const Array1<int32_t> &new2old,
Array2<int32_t> *old_offsets,
Array2<int32_t> *new_offsets) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(src.NumAxes(), 1);
ContextPtr &c = src.Context();
int32_t num_axes = src.NumAxes(), ans_dim0 = new2old.Dim();
// max 5 layers.
RowSplitsAccessor<5> row_splits_acc(src);
const int32_t *new2old_data = new2old.Data();
*old_offsets = Array2<int32_t>(c, num_axes, ans_dim0);
*new_offsets = Array2<int32_t>(c, num_axes, ans_dim0 + 1);
auto old_offsets_acc = old_offsets->Accessor(),
new_offsets_acc = new_offsets->Accessor();
// Set old_offsets; and for now, set new_offsets to the corresponding
// sizes of the output slices.
K2_EVAL(
c, ans_dim0, lambda_set_offsets, (int32_t i)->void {
// 0 <= i < ans_dim0
int32_t old_offset = new2old_data[i],
old_offset_next = old_offset + 1,
offset_diff = 1;
// The following is a special case that interprets -1 as referring to an
// empty list. In this case, old_offset == old_offset_next == 0.
// The specific value 0 is not necessary; they could be equal
// and have any value in [0, src.Dim0() - 1] and still refer to
// the empty list.
if (old_offset == -1)
old_offset = 0;
for (int32_t axis = 0;; axis++) {
old_offsets_acc(axis, i) = old_offset;
// Below, 'new_offsets_acc' currently contains the size rather
// than the offset; we need to do exclusive-sum.
new_offsets_acc(axis, i) = offset_diff;
if (axis + 1 == num_axes) return;
old_offset = row_splits_acc(axis)[old_offset];
old_offset_next = row_splits_acc(axis)[old_offset_next];
offset_diff = old_offset_next - old_offset;
}
});
ExclusiveSum(*new_offsets, new_offsets);
}
// Don't make it static to fix the following error on Windows.
// Error : On Windows, the enclosing parent function ("IndexAxis0") for an
// extended __host__ __device__ lambda cannot have internal or no linkage
/*static*/ RaggedShape IndexAxis0(RaggedShape &src,
const Array1<int32_t> &new2old,
Array1<int32_t> *elem_indexes /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
ContextPtr &c = src.Context();
K2_CHECK(IsCompatible(src, new2old));
int32_t num_axes = src.NumAxes(), src_dim0 = src.Dim0(),
ans_dim0 = new2old.Dim();
if (ans_dim0 == 0) {
if (elem_indexes) *elem_indexes = Array1<int32_t>(c, 0);
return EmptyRaggedShape(c, num_axes);
}
Array2<int32_t> old_offsets, // num_axes by ans_dim0
new_offsets; // num_axes by (ans_dim0 + 1).
GetOldAndNewOffsets(src, new2old, &old_offsets, &new_offsets);
// tot_sizes_out is of dimension (num_axes), tot_sizes_out[i] is
// ans.TotSize(i)
Array1<int32_t> tot_sizes_out =
Array1<int32_t>(new_offsets.Col(ans_dim0)).To(GetCpuContext());
int32_t *tot_sizes_out_cpu_data = tot_sizes_out.Data();
if (elem_indexes)
*elem_indexes = Array1<int32_t>(c, tot_sizes_out_cpu_data[num_axes - 1]);
RaggedShape ans =
RaggedShapeFromTotSizes(c, num_axes, tot_sizes_out_cpu_data);
auto old_offsets_acc = old_offsets.Accessor(),
new_offsets_acc = new_offsets.Accessor();
for (int32_t axis = 1; axis < num_axes; axis++) {
// we are not creating the actual row_ids here, except for axis 1; we are
// creating "composed row_ids" which map to the index on axis 0.
Array1<int32_t> row_ids = ans.RowIds(axis);
RowSplitsToRowIds(new_offsets.Row(axis), &row_ids);
}
ans.Layers()[0].row_splits = new_offsets.Row(1);
// Caution: e.g. old_row_splits_acc(i) == src.RowSplits(i+1).
RowSplitsAccessor<5> old_row_splits_acc(src),
new_row_splits_acc(ans);
RowIdsAccessor<5> old_row_ids_acc(src),
new_row_ids_acc(ans);
SmallVec<int32_t, 6> tot_sizes;
K2_CHECK(num_axes <= 6);
int32_t max_tot_size = 0;
for (int32_t i = 0; i < num_axes; i++) {
tot_sizes.data[i] = tot_sizes_out_cpu_data[i];
max_tot_size = std::max<int32_t>(max_tot_size,
tot_sizes.data[i]);
}
int32_t *elem_indexes_data = (elem_indexes != nullptr ?
elem_indexes->Data() : nullptr);
// Note, the first row_splits vector was set above, ans.Layers()[0].row_splits
// = new_offsets.Row(1).
auto lambda_set_row_splits_and_ids = [=] __host__ __device__(
int32_t axis, int32_t i) -> void {
axis++; // make it one-based.
int32_t tot_size = tot_sizes(axis); // == new_offsets_acc(axis, ans_dim0);
if (i > tot_size)
return;
int32_t *composed_row_ids_data = new_row_ids_acc(axis - 1);
int32_t ans_idx0 = (i == tot_size ? ans_dim0 :
composed_row_ids_data[i]),
job_begin = new_offsets_acc(axis, ans_idx0),
job_this_idx0 = i - job_begin;
K2_CHECK_GE(job_this_idx0, 0);
int32_t row_split_value = 0, new_next_offset = 0;
if (axis + 1 < num_axes)
new_next_offset = new_offsets_acc(axis + 1, ans_idx0);
if (i < tot_size) {
// "prev" means for axis - 1
int32_t new_prev_offset = new_offsets_acc(axis - 1, ans_idx0),
old_prev_offset = old_offsets_acc(axis - 1, ans_idx0),
old_offset = old_offsets_acc(axis, ans_idx0),
old_idx = old_offset + job_this_idx0;
if (axis != 1) {
// Write row-ids.
// Actually doing this for axis == 1 is harmless, but unnecessary, as it
// would write back the same values that were already there. We avoid
// the memory access.
// this_new_row_ids = new_row_ids_acc(axis - 1);
int32_t *this_new_row_ids = composed_row_ids_data;
const int32_t *this_old_row_ids = old_row_ids_acc(axis - 1);
int32_t old_row_id = this_old_row_ids[old_idx],
new_row_id = old_row_id + new_prev_offset - old_prev_offset;
this_new_row_ids[i] = new_row_id;
}
if (elem_indexes_data != nullptr && axis == num_axes - 1)
elem_indexes_data[i] = old_idx;
if (axis + 1 < num_axes) {
int32_t old_next_offset = old_offsets_acc(axis + 1, ans_idx0),
next_offset_diff = new_next_offset - old_next_offset;
const int32_t *old_row_splits_data = old_row_splits_acc(axis);
row_split_value = next_offset_diff + old_row_splits_data[old_idx];
}
} else {
row_split_value = new_next_offset;
}
if (axis + 1 < num_axes) {
int32_t *new_row_splits_data = new_row_splits_acc(axis);
new_row_splits_data[i] = row_split_value;
}
};
constexpr int32_t cutoff = 50000;
if (c->GetDeviceType() == kCpu) {
for (int32_t axis = 0; axis < num_axes - 1; axis++) {
int32_t this_size = tot_sizes(axis + 1);
for (int32_t i = 0; i <= this_size; i++)
lambda_set_row_splits_and_ids(axis, i);
}
} else if (max_tot_size * (num_axes - 1) < cutoff) {
Eval2Device(c, num_axes - 1, max_tot_size + 1,
lambda_set_row_splits_and_ids);
} else {
// Loop in the kernel rather than submitting an excessive number of threads.
auto lambda_loop = [=] __device__(int32_t i) {
for (int32_t axis = 0; axis < num_axes - 1; axis++) {
lambda_set_row_splits_and_ids(axis, i);
}
};
EvalDevice(c, max_tot_size + 1, lambda_loop);
}
#if !defined(NDEBUG)
ans.Check();
#endif
return ans;
}
RaggedShape Index(RaggedShape &src, int32_t axis,
const Array1<int32_t> &indexes,
Array1<int32_t> *elem_indexes /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
int32_t num_axes = src.NumAxes();
K2_CHECK_LT(static_cast<uint32_t>(axis), static_cast<uint32_t>(num_axes));
if (axis == 0) {
return IndexAxis0(src, indexes, elem_indexes);
} else if (axis == src.NumAxes() - 1) {
// This code is related to SubsetRaggedShape(). `indexes` corresponds
// to `new2old`.
Array1<int32_t> last_row_ids = src.RowIds(num_axes - 1)[indexes];
#ifndef NDEBUG
if (!IsMonotonic(last_row_ids)) {
K2_LOG(FATAL) << "Invalid indexes used when indexing RaggedShape";
}
#endif
Array1<int32_t> last_row_splits(last_row_ids.Context(),
src.TotSize(num_axes - 2) + 1);
RowIdsToRowSplits(last_row_ids, &last_row_splits);
if (elem_indexes)
*elem_indexes = indexes;
std::vector<RaggedShapeLayer> axes = src.Layers();
axes.back().row_splits = last_row_splits;
axes.back().row_ids = last_row_ids;
axes.back().cached_tot_size = last_row_ids.Dim();
// TODO: disable checking by changing true to false.
return RaggedShape(axes, true);
} else {
RaggedShape top, bottom;
DecomposeRaggedShape(src, axis, &top, &bottom);
RaggedShape top_indexed = Index(top, axis, indexes, nullptr),
bottom_indexed = IndexAxis0(bottom, indexes, elem_indexes);
return ComposeRaggedShapes(top_indexed, bottom_indexed);
}
}
// returns array of dim (src[0]->NumAxes() + 1) by (num_srcs + 1),
// see documentation in header.
Array2<int32_t> GetOffsets(int32_t num_srcs, RaggedShape **src) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(num_srcs, 0);
int32_t num_axes_in = src[0]->NumAxes();
ContextPtr &ctx = src[0]->Context();
Array2<int32_t> src_offsets(GetCpuContext(), num_axes_in + 1, num_srcs + 1);
int32_t *src_offsets_data = src_offsets.Data();
int32_t src_offsets_stride0 = src_offsets.ElemStride0();
// Check if they have same num-axes and compatible context
for (int32_t i = 1; i < num_srcs; ++i) {
K2_CHECK_EQ(src[i]->NumAxes(), num_axes_in);
K2_CHECK(ctx->IsCompatible(*src[i]->Context()));
}
for (int32_t axis = 0; axis <= num_axes_in; ++axis) {
int32_t sum = 0;
for (int32_t i = 0; i <= num_srcs; ++i) { // i is the column
src_offsets_data[axis * src_offsets_stride0 + i] = sum;
if (i < num_srcs) {
sum += (axis == 0 ? 1 : src[i]->TotSize(axis - 1));
}
}
}
return src_offsets;
}
void GetRowInfo(RaggedShape &src, Array1<int32_t *> *row_splits,
Array1<int32_t *> *row_ids) {
NVTX_RANGE(K2_FUNC);
int32_t axes = src.NumAxes();
K2_CHECK_GE(axes, 2);
src.Populate();
std::vector<int32_t *> row_splits_ptrs(axes - 1);
std::vector<int32_t *> row_ids_ptrs(axes - 1);
for (int32_t i = 1; i != axes; ++i) {
row_splits_ptrs[i - 1] = src.RowSplits(i).Data();
row_ids_ptrs[i - 1] = src.RowIds(i).Data();
}
ContextPtr ctx = src.Context();
*row_splits = Array1<int32_t *>(ctx, row_splits_ptrs);
*row_ids = Array1<int32_t *>(ctx, row_ids_ptrs);
}
void GetRowInfoMulti(int32_t num_srcs, RaggedShape **src,
Array2<int32_t *> *row_splits,
Array2<int32_t *> *row_ids) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(num_srcs, 0);
int32_t num_axes_in = src[0]->NumAxes();
K2_CHECK_GE(num_axes_in, 2);
ContextPtr ctx = src[0]->Context();
// check if they have same num-axes and compatible context
for (int32_t i = 1; i < num_srcs; ++i) {
K2_CHECK_EQ(src[i]->NumAxes(), num_axes_in);
K2_CHECK(ctx->IsCompatible(*src[i]->Context()));
}
Array2<int32_t *> row_splits_ptrs(GetCpuContext(), num_axes_in - 1, num_srcs);
Array2<int32_t *> row_ids_ptrs(GetCpuContext(), num_axes_in - 1, num_srcs);
int32_t **splits_ptr_data = row_splits_ptrs.Data();
int32_t **ids_ptr_data = row_ids_ptrs.Data();
int32_t stride0 = row_splits_ptrs.ElemStride0();
K2_CHECK_EQ(stride0, row_ids_ptrs.ElemStride0());
for (int32_t axis = 0; axis != num_axes_in - 1; ++axis) {
for (int32_t i = 0; i != num_srcs; ++i) {
splits_ptr_data[axis * stride0 + i] = src[i]->RowSplits(axis + 1).Data();
ids_ptr_data[axis * stride0 + i] = src[i]->RowIds(axis + 1).Data();
}
}
*row_splits = row_splits_ptrs.To(ctx);
*row_ids = row_ids_ptrs.To(ctx);
}
/*static*/ RaggedShape StackAxis0(int32_t num_srcs, RaggedShape **src,
Array1<uint32_t> *merge_map /* == nullptr*/) {
NVTX_RANGE(K2_FUNC);
if (num_srcs == 1) {
if (merge_map)
*merge_map =
Arange<uint32_t>(src[0]->Context(), 0, src[0]->NumElements());
RaggedShape top_layer = TrivialShape(src[0]->Context(), src[0]->Dim0());
return ComposeRaggedShapes(top_layer, **src);
}
// We can't handle num_srcs == 0 because we won't have a context object.
K2_CHECK_GT(num_srcs, 1);
int32_t num_axes_in = src[0]->NumAxes(),
num_axes_out = num_axes_in + 1;
ContextPtr c = src[0]->Context();
// Check if they have same num-axes and compatible context
for (int32_t i = 1; i < num_srcs; ++i) {
K2_CHECK_EQ(num_axes_in, src[i]->NumAxes());
K2_CHECK(IsCompatible(*src[0], *src[i]));
}
// `offsets` will be on CPU for now.
// It shape is (num_axes_in + 1 == num_axes_out, num_srcs + 1).
Array2<int32_t> offsets = GetOffsets(num_srcs, src);
auto offsets_acc = offsets.Accessor();
SmallVec<int32_t, 6> tot_sizes_out;
K2_CHECK(num_axes_out <= 6);
int32_t max_tot_size = 0;
for (int32_t axis = 0; axis < num_axes_out; axis++) {
tot_sizes_out.data[axis] = offsets_acc(axis, num_srcs);
max_tot_size = std::max<int32_t>(max_tot_size,
tot_sizes_out.data[axis]);
}
RaggedShape ans = RaggedShapeFromTotSizes(c, num_axes_out,
tot_sizes_out.data);
// src_row_splits and src_row_ids are of dim num_axes_in-1 by num_srcs.
Array2<int32_t *> src_row_splits, src_row_ids;
GetRowInfoMulti(num_srcs, src, &src_row_splits, &src_row_ids);
auto src_row_splits_acc = src_row_splits.Accessor(),
src_row_ids_acc = src_row_ids.Accessor();
offsets = offsets.To(c);
offsets_acc = offsets.Accessor();
for (int32_t axis = 1; axis < num_axes_out; axis++) {
// we are not creating the actual row_ids here, except for axis 1; we are
// creating "composed row_ids" which map to the index on axis 0.
Array1<int32_t> row_ids = ans.RowIds(axis);
RowSplitsToRowIds(offsets.Row(axis), &row_ids);
}
ans.Layers()[0].row_splits = offsets.Row(1);
// Caution: e.g. old_row_splits_acc(i) == src.RowSplits(i+1).
RowSplitsAccessor<5> new_row_splits_acc(ans);
RowIdsAccessor<5> new_row_ids_acc(ans);
uint32_t *merge_map_data;
if (merge_map != nullptr) {
*merge_map = Array1<uint32_t>(c, tot_sizes_out.data[num_axes_out - 1]);
merge_map_data = merge_map->Data();
} else {
merge_map_data = nullptr;
}
// Note, the first row_splits vector was set above, ans.Layers()[0].row_splits
// = new_offsets.Row(1).
auto lambda_set_row_splits_and_ids = [=] __host__ __device__(
int32_t axis, int32_t i) -> void {
++axis; // We want this to be called starting with axis == 1, but Eval2
// doesn't suppor that.
// At this point, 1 < axis < num_axes_out.
// This kernel will be writing one or both of:
// the row-splits for output-layer==`axis`/input-layer==`axis-1`,
// the row-ids for output-layer=`axis-1`/input-layer==`axis-2`.
int32_t tot_size = tot_sizes_out(axis); // == offsets_acc(axis, num_srcs);
if (i > tot_size)
return;
int32_t *composed_row_ids_data = new_row_ids_acc(axis - 1);
int32_t ans_idx0 =
(i == tot_size
? num_srcs
: composed_row_ids_data[i]), // note: ans_idx0 == src_idx.
job_begin = offsets_acc(axis, ans_idx0), job_this_idx0 = i - job_begin;
K2_CHECK_GE(job_this_idx0, 0);
int32_t row_split_value = 0, new_next_offset = 0;
uint32_t *merge_map_data_local = nullptr;
if (axis + 1 < num_axes_out) {
new_next_offset = offsets_acc(axis + 1, ans_idx0);
} else {
merge_map_data_local = merge_map_data;
}
if (i < tot_size) {
// "prev" means for axis - 1
int32_t new_prev_offset = offsets_acc(axis - 1, ans_idx0);
if (axis != 1) {
// Write row-ids.
// this_new_row_ids = new_row_ids_acc(axis - 1);
int32_t *this_new_row_ids = composed_row_ids_data;
const int32_t *this_src_row_ids = src_row_ids_acc(axis - 2, ans_idx0);
int32_t old_row_id = this_src_row_ids[job_this_idx0],
new_row_id = old_row_id + new_prev_offset;
this_new_row_ids[i] = new_row_id;
}
if (merge_map_data_local != nullptr) {
merge_map_data_local[i] = ans_idx0 + num_srcs * job_this_idx0;
}
if (axis + 1 < num_axes_out) {
const int32_t *src_row_splits_data = src_row_splits_acc(axis - 1,
ans_idx0);
int32_t old_row_split = src_row_splits_data[job_this_idx0];
row_split_value = new_next_offset + old_row_split;
}
} else {
row_split_value = new_next_offset;
}
if (axis + 1 < num_axes_out) {
int32_t *new_row_splits_data = new_row_splits_acc(axis);
new_row_splits_data[i] = row_split_value;
}
};
constexpr int32_t cutoff = 50000;
if (c->GetDeviceType() == kCpu) {
for (int32_t axis = 0; axis < num_axes_out - 1; axis++) {
int32_t this_size = tot_sizes_out(axis + 1);
for (int32_t i = 0; i <= this_size; i++)
lambda_set_row_splits_and_ids(axis, i);
}
} else if (max_tot_size * (num_axes_out - 1) < cutoff) {
Eval2Device(c, num_axes_out - 1, max_tot_size + 1,
lambda_set_row_splits_and_ids);
} else {
// Loop in the kernel rather than submitting an excessive number of threads.
auto lambda_loop = [=] __device__(int32_t i) {
for (int32_t axis = 0; axis < num_axes_out - 1; axis++) {
lambda_set_row_splits_and_ids(axis, i);
}
};
EvalDevice(c, max_tot_size + 1, lambda_loop);
}
#if !defined(NDEBUG)
ans.Check();
#endif
return ans;
}
RaggedShape Cat(int32_t axis, int32_t num_srcs, RaggedShape **src,
Array1<uint32_t> *merge_map /* == nullptr*/) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(num_srcs, 0);
if (axis == 0) {
RaggedShape temp = StackAxis0(num_srcs, src, merge_map);
std::vector<RaggedShapeLayer> ans_layers(
temp.Layers().begin() + 1, temp.Layers().end());
return RaggedShape(ans_layers, false);
}
K2_CHECK_LT(static_cast<uint32_t>(axis),
static_cast<uint32_t>(src[0]->NumAxes()));
int32_t num_axes = src[0]->NumAxes();
std::vector<RaggedShapeLayer> ans_layers(num_axes - 1);
// If axis >= 2, some layers of `src` will pass through unchanged (we should
// check that they are identical across all sources).
for (int32_t l = 0; l + 1 < axis; l++) {
CheckLayerEqual(l, num_srcs, src);
ans_layers[l] = src[0]->Layers()[l];
}
Array1<uint32_t> merge_map_local;
Array1<uint32_t> *this_m =
(axis + 1 == num_axes ? merge_map : &merge_map_local);
RaggedShape s = IntersperseRaggedLayer(axis - 1, num_srcs, src, this_m),
t = SubsampleRaggedLayer(s, 0, num_srcs);
ans_layers[axis - 1] = t.Layers()[0];
for (int32_t l = axis; l + 1 < num_axes; l++) {
Array1<uint32_t> merge_map_next;
Array1<uint32_t> *this_m =
(l + 2 == num_axes ? merge_map : &merge_map_next);
RaggedShape r = MergeRaggedLayer(l, num_srcs, src, merge_map_local, this_m);
ans_layers[l] = r.Layers()[0];
merge_map_local = merge_map_next;
}
// TODO(dan) after this is debugged: add ", false".
return RaggedShape(ans_layers);
}
RaggedShape RemoveAxis(RaggedShape &src, int32_t axis) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(src.NumAxes(), 2);
K2_CHECK(axis >= 0 && axis < src.NumAxes());
// note, `axes_in` is of dim src.NumAxes() - 1.
// Also note: axes_in[i] pertains to the relationship between
// axes i and i+1 in the source.
src.Populate();
const std::vector<RaggedShapeLayer> &axes_in = src.Layers();
std::vector<RaggedShapeLayer> axes_out(axes_in.size() - 1);
int32_t axes_out_size = static_cast<int32_t>(axes_out.size());
for (int32_t i = 0; i < axis - 1; ++i) axes_out[i] = axes_in[i];
if (axis > 0 && axis + 1 < src.NumAxes()) {
axes_out[axis - 1].row_ids =
axes_in[axis - 1].row_ids[axes_in[axis].row_ids];
axes_out[axis - 1].row_splits =
axes_in[axis].row_splits[axes_in[axis - 1].row_splits];
axes_out[axis - 1].cached_tot_size = axes_out[axis - 1].row_ids.Dim();
}
for (int32_t i = axis; i < axes_out_size; ++i) axes_out[i] = axes_in[i + 1];
return RaggedShape(axes_out);
}
RaggedShape MakeTransposable(RaggedShape &src) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(src.NumAxes(), 2);
int32_t src_dim0 = src.Dim0(), src_tot_size1 = src.TotSize(1);
if (src_dim0 <= 1) return src;
ContextPtr c = src.Context();
int32_t num_axes = src.NumAxes();
int32_t max_size = src.MaxSize(1);
if (max_size <= 0) return src;
int32_t ans_tot_size1 = max_size * src_dim0;
src.Populate();
const std::vector<RaggedShapeLayer> &axes_in = src.Layers();
std::vector<RaggedShapeLayer> axes_out(num_axes - 1);
const int32_t *src_row_splits1_data = src.RowSplits(1).Data();
const int32_t *src_row_ids1_data = src.RowIds(1).Data();
{
ParallelRunner pr(c);
RaggedShapeLayer &axis1_shape = axes_out[0];
{
// set ans.RowSplits(1);
With w(pr.NewStream());
axis1_shape.row_splits = Range(c, src_dim0 + 1, 0, max_size);
}
{
// set ans.RowIds(1);
With w(pr.NewStream());
axis1_shape.row_ids = Array1<int32_t>(c, ans_tot_size1);
int32_t *row_ids1_data = axis1_shape.row_ids.Data();
axis1_shape.cached_tot_size = ans_tot_size1;
K2_EVAL(
c, ans_tot_size1, lambda_set_row_ids1,
(int32_t i)->void { row_ids1_data[i] = i / max_size; });
}
if (num_axes > 2) {
RaggedShapeLayer &axis2_shape = axes_out[1];
const int32_t *src_row_splits2_data = src.RowSplits(2).Data();
{
// set ans.RowSplits(2);
With w(pr.NewStream());
axis2_shape.cached_tot_size = src.TotSize(2);
axis2_shape.row_splits = Array1<int32_t>(c, ans_tot_size1 + 1);
int32_t *ans_row_splits2_data = axis2_shape.row_splits.Data();
K2_EVAL(
c, ans_tot_size1 + 1, lambda_set_row_splits2,
(int32_t idx01)->void {
if (idx01 == ans_tot_size1) {
ans_row_splits2_data[idx01] =
src_row_splits2_data[src_tot_size1];
return;
}
int32_t idx0 = idx01 / max_size, idx1 = idx01 % max_size;
int32_t idx0x = src_row_splits1_data[idx0],
idx0x_next = src_row_splits1_data[idx0 + 1];
int32_t num_elems_this_row = idx0x_next - idx0x;
if (idx1 < num_elems_this_row)
ans_row_splits2_data[idx01] =
src_row_splits2_data[idx0x + idx1];
else
ans_row_splits2_data[idx01] =
src_row_splits2_data[idx0x_next]; // append empty row
});
}
{
// set ans.RowIds(2);
With w(pr.NewStream());
int32_t tot_size2 = src.TotSize(2);
axis2_shape.row_ids = Array1<int32_t>(c, tot_size2);
int32_t *ans_row_ids2_data = axis2_shape.row_ids.Data();
const int32_t *src_row_ids2_data = src.RowIds(2).Data();
K2_EVAL(
c, tot_size2, lambda_set_row_ids2, (int32_t idx012)->void {
int32_t src_idx01 = src_row_ids2_data[idx012];
int32_t src_idx0 = src_row_ids1_data[src_idx01];
int32_t src_idx1 = src_idx01 - src_row_splits1_data[src_idx0];
ans_row_ids2_data[idx012] = (src_idx0 * max_size) + src_idx1;
});
}
}
}
// copy left row_splits and row_ids;
for (int32_t i = 2; i < num_axes - 1; ++i) axes_out[i] = axes_in[i];
return RaggedShape(axes_out);
}
// transpose axes 0 and 1.
RaggedShape Transpose(RaggedShape &src, Array1<int32_t> *value_indexes) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(src.NumAxes(), 2);
ContextPtr c = src.Context();
int32_t src_dim0 = src.Dim0(), src_tot_size1 = src.TotSize(1);
if (src_dim0 <= 0) {
if (value_indexes) *value_indexes = Array1<int32_t>(c, 0);
return src;
}
int32_t src_dim1 = src_tot_size1 / src_dim0;
K2_CHECK_EQ(src_tot_size1 % src_dim0, 0)
<< "Transpose(): all dims on axis 0 must be the same.\n"
<< "src_tot_size1: " << src_tot_size1 << "\n"
<< "src_dim0: " << src_dim0 << ", array is: " << src;
K2_DCHECK(
Equal(src.RowSplits(1), Range(c, src.RowSplits(1).Dim(), 0, src_dim1)))
<< " Expected row-splits to be evenly spaced: " << src.RowSplits(1);
RaggedShape src_no_axis0 = RemoveAxis(src, 0);
K2_CHECK_EQ(src_no_axis0.Dim0(), src_tot_size1);
// `renumbering` is a `new2old` map, that maps from the first index in
// src_no_axis0_renumbered
// to the first index into src_no_axis0.
Array1<int32_t> renumbering(c, src_tot_size1);
int32_t *renumbering_data = renumbering.Data();
K2_EVAL(
c, src_tot_size1, lambda_set_renumbering, (int32_t i)->void {
int32_t j = i % src_dim0, k = i / src_dim0, i_old = j * src_dim1 + k;
renumbering_data[i] = i_old;
});
RaggedShape src_no_axis0_renumbered =
Index(src_no_axis0, 0, renumbering, value_indexes);
int32_t num_rows = src_dim1, row_splits_dim = num_rows + 1,
row_ids_dim = src_tot_size1;
std::vector<RaggedShapeLayer> ans_axis0(1);
Array1<int32_t> mem(c, row_splits_dim + row_ids_dim);
int32_t *mem_data = mem.Data();
K2_EVAL(
c, row_splits_dim + row_ids_dim, lambda_set_row_info, (int32_t i)->void {
int32_t val;
if (i >= row_splits_dim) {
// row_ids
int32_t elem_idx = i - row_splits_dim;
val = elem_idx / src_dim0;
} else {
// row_splits
int32_t row_idx = i;
val = row_idx * src_dim0;
}
mem_data[i] = val;
});
ans_axis0[0].row_splits = mem.Range(0, row_splits_dim);
ans_axis0[0].row_ids = mem.Range(row_splits_dim, row_ids_dim);
ans_axis0[0].cached_tot_size = row_ids_dim;
RaggedShape temp(ans_axis0);
return ComposeRaggedShapes(temp, src_no_axis0_renumbered);
}
RaggedShape Stack(int32_t axis, int32_t num_srcs, RaggedShape **src,
Array1<uint32_t> *merge_map /* = nullptr*/) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(num_srcs, 0);
K2_CHECK_LT(static_cast<uint32_t>(axis),
static_cast<uint32_t>(src[0]->NumAxes()));
ContextPtr c = src[0]->Context();
if (axis == 0) {
return StackAxis0(num_srcs, src, merge_map);
}
K2_CHECK_LT(static_cast<uint32_t>(axis),
static_cast<uint32_t>(src[0]->NumAxes()));
int32_t num_axes = src[0]->NumAxes();
std::vector<RaggedShapeLayer> ans_layers(num_axes);
// If axis >= 2, some layers of `src` will pass through unchanged (we should
// check that they are identical across all sources).
for (int32_t l = 0; l + 1 < axis; l++) {
CheckLayerEqual(l, num_srcs, src);
ans_layers[l] = src[0]->Layers()[l];
}
Array1<uint32_t> merge_map_local;
Array1<uint32_t> *this_m =
(axis + 1 == num_axes ? merge_map : &merge_map_local);
RaggedShape s = IntersperseRaggedLayer(axis - 1, num_srcs, src, this_m);
// note: s.Dim0() will be a multiple of num_srcs.
ans_layers[axis - 1] =
RegularRaggedShape(c, s.Dim0() / num_srcs, num_srcs).Layers()[0];
ans_layers[axis] = s.Layers()[0];
for (int32_t l = axis; l + 1 < num_axes; l++) {
Array1<uint32_t> merge_map_next;
Array1<uint32_t> *this_m =
(l + 2 == num_axes ? merge_map : &merge_map_next);
RaggedShape r = MergeRaggedLayer(l, num_srcs, src, merge_map_local, this_m);
ans_layers[l + 1] = r.Layers()[0];
merge_map_local = merge_map_next;
}
// TODO(dan) after this is debugged: add ", false".
return RaggedShape(ans_layers);
}
/*
Select ragged tensor's shape on axis 0 with a two axes ragged index.
@param [in] src Source RaggedShape to select.
@param [in] indexes A **TWO** axes ragged tensor containing the indexes
into the axis 0 of src. we also support -1 as an index,
which will result in the empty list (as if it were the
index into a position in `src` that had an empty list)
i.e. with `-1 <= indexes[i] < src.TotSize(0)`.
@param [out] out The container where the output RaggedShape will write to,
MUST NOT be a nullptr. Will be reallocated and the final
size of `out` would equal to `indexes.TotSize(0)`.
Note, The `NumAxes()` of output RaggedShape is the same
as the `NumAxes()` of src.
@param [out] split_map If not nullptr will store the element-index within
src telling where the elements of split RaggedShape
come from. Will be reallocated and the final size of
`split_map` would equal to `indexes.TotSize(0)`.
Suppose indexes is `[ [ 0 3 5 ] [ 1 2 4] [ 6 -1 ] ]`, it means that we will
select elements 0,3,5 of src's axis 0 to construct the first output
RaggedShape, 1,2,4 to construct the second output RaggedShape, 6 and a empty
list to construct the third output RaggedShape.
*/
/*static*/ void SelectAxis0(RaggedShape &src, const Ragged<int32_t> &indexes,
std::vector<RaggedShape> *out, std::vector<Array1<int32_t>> *split_map) {
NVTX_RANGE(K2_FUNC);
ContextPtr &c = src.Context();
K2_CHECK(IsCompatible(src, indexes));
K2_CHECK_EQ(indexes.NumAxes(), 2);
K2_CHECK(out != nullptr);
int32_t num_axes = src.NumAxes(),
out_size = indexes.Dim0(),
tot_elems = indexes.NumElements();
if (out_size == 0) {
*out = std::vector<RaggedShape>();
if (split_map) {
*split_map = std::vector<Array1<int32_t>>();
}
return;
}
Array2<int32_t> old_offsets, // num_axes by tot_elems
new_offsets; // num_axes by (tot_elems + 1).
GetOldAndNewOffsets(src, indexes.values, &old_offsets, &new_offsets);
const int32_t *indexes_row_split1_data = indexes.RowSplits(1).Data(),
*indexes_row_ids1_data = indexes.RowIds(1).Data();
// Contains the `TotSize` of each axes of each output RaggedShape
Array2<int32_t> tot_sizes(c, out_size, num_axes);
Array2Accessor<int32_t> tot_sizes_acc = tot_sizes.Accessor();
Array2Accessor<int32_t> new_offsets_acc = new_offsets.Accessor();
K2_EVAL2(c, out_size, num_axes, lambda_set_tot_sizes,
(int32_t i, int32_t j) -> void {
int32_t idx0 = indexes_row_split1_data[i],
idx0_next = indexes_row_split1_data[i + 1];
tot_sizes_acc(i, j) =
new_offsets_acc(j, idx0_next) - new_offsets_acc(j, idx0);
});
auto tot_sizes_cpu = tot_sizes.To(GetCpuContext());
auto tot_sizes_cpu_acc = tot_sizes_cpu.Accessor();
out->resize(out_size);
if (split_map != nullptr) split_map->resize(out_size);
// We can not avoid this for loop on dim0, as we want to allocate memory
// seperately, may consider using a ThreadPool later.
for (int32_t i = 0; i < out_size; ++i) {
out->at(i) = RaggedShapeFromTotSizes(c,
num_axes, tot_sizes_cpu.Row(i).Data());
if (split_map != nullptr) {
split_map->at(i) =
Array1<int32_t>(c, tot_sizes_cpu_acc(i, num_axes - 1));
};
}
// Caution: e.g. old_row_splits_acc(i) == src.RowSplits(i+1).
RowSplitsAccessor<5> old_row_splits_acc(src);
RowIdsAccessor<5> old_row_ids_acc(src);
auto old_offsets_acc = old_offsets.Accessor();
// axes_elems contains the elements number of each axes before splitting into
// different RaggedShape, it should equal to the Col sum of `tot_sizes` above.
Array1<int32_t> axes_elems =
Array1<int32_t>(new_offsets.Col(tot_elems)).To(GetCpuContext());
for (int32_t axis = 0; axis < num_axes; axis++) {
// Contains the RowSplits & RowIds pointer for current layer,
// has a dimension of dim0 * 2, the layout is splits_pointer0, ids_pointer0,
// splits_pointer1, ids_pointer1, ...
Array1<int32_t *> splits_ids_ptr(GetCpuContext(), out_size * 2);
int32_t **splits_ids_ptr_data = splits_ids_ptr.Data();
// Contains the pointers for split_map
Array1<int32_t *> split_map_ptr;
int32_t **split_map_ptr_data = nullptr;
if (axis == num_axes - 1 && split_map != nullptr) {
split_map_ptr = Array1<int32_t *>(GetCpuContext(), out_size);
split_map_ptr_data = split_map_ptr.Data();
}
for (int32_t i = 0; i < out_size; ++i) {
splits_ids_ptr_data[2 * i] = axis == num_axes - 1 ? nullptr :
out->at(i).RowSplits(axis + 1).Data();
splits_ids_ptr_data[2 * i + 1] =
axis == 0 ? nullptr : out->at(i).RowIds(axis).Data();
if (axis == num_axes - 1 && split_map != nullptr) {
split_map_ptr_data[i] = split_map->at(i).Data();
}
}
// transfer to GPU if we're using a GPU
splits_ids_ptr = splits_ids_ptr.To(c);
splits_ids_ptr_data = splits_ids_ptr.Data();
// set row split1
if (axis == 0) {
K2_EVAL(c, tot_elems, lambda_set_row_split1, (int32_t idx01) {
int32_t index_idx0 = indexes_row_ids1_data[idx01],
idx0x = indexes_row_split1_data[index_idx0];
splits_ids_ptr_data[2 * index_idx0][idx01 - idx0x]
= new_offsets_acc(axis + 1, idx01) -
new_offsets_acc(axis + 1, idx0x);
// Set the last elements of row_splits1 of each output shape
if (idx01 == tot_elems - 1 ||
index_idx0 != indexes_row_ids1_data[idx01 + 1]) {
splits_ids_ptr_data[2 * index_idx0][idx01 - idx0x + 1]
= new_offsets_acc(axis + 1, idx01 + 1) -
new_offsets_acc(axis + 1, idx0x);
}
});
continue;
}
// set last element of each row_splits
// TODO: Integrate this kernel into the kernel below.
if (axis < num_axes - 1) {
K2_EVAL(c, out_size, lambda_set_last_row_splits, (int32_t idx0) {
int32_t idx0x = indexes_row_split1_data[idx0],
idx0x_next = indexes_row_split1_data[idx0 + 1],
value = new_offsets_acc(axis + 1, idx0x_next) -
new_offsets_acc(axis + 1, idx0x),
pos = tot_sizes_acc(idx0, axis);
splits_ids_ptr_data[2 * idx0][pos] = value;
});
}
if (axis == num_axes - 1 && split_map != nullptr) {
split_map_ptr = split_map_ptr.To(c);
split_map_ptr_data = split_map_ptr.Data();
}
int32_t num_elems = axes_elems[axis];
// composed_row_ids maps current idx to idx01 of indexes
Array1<int32_t> composed_row_ids(c, num_elems);
RowSplitsToRowIds(new_offsets.Row(axis), &composed_row_ids);
const int32_t *composed_row_ids_data = composed_row_ids.Data();
K2_EVAL(c, num_elems, lambda_set_row_splits_and_ids, (int32_t i) {
// tot_elems = indexes.NumElements(), so tot_idx0 can be interpreted as
// index_idx01
int32_t tot_idx0 = composed_row_ids_data[i],
index_idx0 = indexes_row_ids1_data[tot_idx0],
index_idx0x = indexes_row_split1_data[index_idx0],
begin_base = new_offsets_acc(axis, index_idx0x),
begin = new_offsets_acc(axis, tot_idx0),
this_idx0 = i - begin,
this_idx01 = i - begin_base;
K2_CHECK_GE(this_idx0, 0);
K2_CHECK_GE(this_idx01, 0);
// "prev" means for axis - 1
int32_t new_prev_offset = new_offsets_acc(axis - 1, tot_idx0),
old_prev_offset = old_offsets_acc(axis - 1, tot_idx0),
old_offset = old_offsets_acc(axis, tot_idx0),
old_idx = old_offset + this_idx0;
if (split_map != nullptr && axis == num_axes - 1)
split_map_ptr_data[index_idx0][this_idx01] = old_idx;
// set row ids
const int32_t *this_old_row_ids = old_row_ids_acc(axis - 1);
int32_t old_row_id = this_old_row_ids[old_idx],
new_row_id = old_row_id + new_prev_offset - old_prev_offset,
new_pre_offset_idx0x = new_offsets_acc(axis - 1, index_idx0x);
splits_ids_ptr_data[2 * index_idx0 + 1][this_idx01] =
new_row_id - new_pre_offset_idx0x;
// set row splits
if (axis + 1 < num_axes) {
int32_t new_next_offset = new_offsets_acc(axis + 1, tot_idx0),
old_next_offset = old_offsets_acc(axis + 1, tot_idx0),
next_offset_diff = new_next_offset - old_next_offset;
const int32_t *old_row_splits_data = old_row_splits_acc(axis);
int32_t row_split_value =
next_offset_diff + old_row_splits_data[old_idx],
new_next_offset_idx0x = new_offsets_acc(axis + 1, index_idx0x);
splits_ids_ptr_data[2 * index_idx0][this_idx01]
= row_split_value - new_next_offset_idx0x;
}
});
}
}
void Unstack(RaggedShape &src, int32_t axis, bool pad_right,
std::vector<RaggedShape> *out,
std::vector<Array1<int32_t>> *split_map) {
NVTX_RANGE(K2_FUNC);
ContextPtr &c = src.Context();
if (axis == 0) {
if (src.NumAxes() == 2) {
auto new_src = ComposeRaggedShapes(
TrivialShape(c, src.TotSize(0)), src);
return Unstack(new_src, 1, pad_right, out, split_map);
}
auto indexes = Ragged<int32_t>(RegularRaggedShape(c, src.Dim0(), 1),
Arange(c, 0, src.Dim0()));
SelectAxis0(src, indexes, out, split_map);
for (size_t i = 0; i < out->size(); ++i) {
out->at(i) = RemoveAxis(out->at(i), 0);
}
} else {
int32_t tot_size_axis_minus1 = src.TotSize(axis - 1),
tot_size_axis = src.TotSize(axis);
const int32_t *row_splits_axis = src.RowSplits(axis).Data(),
*row_ids_axis = src.RowIds(axis).Data();
// Each sublist contains the elements of axis `axis`, unstack operation will
// split all these elements in a sublist to different RaggedShapes, so the
// number of output RaggedShapes is the size of the sublist with max
// elements.
int32_t num_out = src.MaxSize(axis);
out->resize(num_out);
if (split_map != nullptr) split_map->resize(num_out);
// We will select the elements of axis `axis` on each sublist, the number
// of sublits equals to `src.TotSize(axis - 1)`.
// Initialize with -1 here, because not all the sublists have the same size,
// -1s here mean that we don't select anything on those positions
Array1<int32_t> indexes(c, num_out * tot_size_axis_minus1, -1);
int32_t *indexes_data = indexes.Data();
// Decide the elements of axis `axis` will go to which output RaggedShape
K2_EVAL(c, tot_size_axis, lambda_set_indexes, (int32_t idx01) {
int32_t idx0 = row_ids_axis[idx01],
idx0x = row_splits_axis[idx0],
idx1 = idx01 - idx0x,
idx_row = idx1;
if (!pad_right) {
int32_t idx0x_next = row_splits_axis[idx0 + 1],
num_elems = idx0x_next - idx0x;
idx_row = num_out - num_elems + idx1;
}
indexes_data[idx_row * tot_size_axis_minus1 + idx0] = idx01;
});
// To make `DecomposeRaggedShape` work, we add a RegularRaggedShape
// layer after axis `axis` if axis equals to `src.NumAxes() - 1`.
// Of course, we have to remove the added layer finally.
bool remove_last_axis = false;
if (axis == src.NumAxes() - 1) {
src = ComposeRaggedShapes(src,
RegularRaggedShape(c, src.NumElements(), 1));
remove_last_axis = true;
}
RaggedShape top, bottom;
DecomposeRaggedShape(src, axis, &top, &bottom);
// Unstack will remove current axis (the last axis of top after decomposing
// on axis), to make `RemoveAxis` work, we add a TrivialShape layer before
// axix 0, finally we will remove the added layer.
bool remove_axis0 = false;
if (top.NumAxes() == 2) {
top = ComposeRaggedShapes(
TrivialShape(c, top.TotSize(0)), top);
remove_axis0 = true;
}
top = RemoveAxis(top, top.NumAxes() - 1);
auto ragged_indexes = Ragged<int32_t>(RegularRaggedShape(c,
num_out, tot_size_axis_minus1), indexes);
// Select elements according to indexes into corresponding RaggedShape
SelectAxis0(bottom, ragged_indexes, out, split_map);
for (int32_t i = 0; i < num_out; ++i) {
out->at(i) = ComposeRaggedShapes(top, out->at(i));
if (remove_axis0 && !remove_last_axis)
out->at(i) = RemoveAxis(out->at(i), 0);
if (remove_last_axis) {
out->at(i) = RemoveEmptyLists(out->at(i), out->at(i).NumAxes() - 2);
out->at(i) = RemoveAxis(out->at(i), out->at(i).NumAxes() - 1);
}
}
}
}
void Unstack(RaggedShape &src, int32_t axis, std::vector<RaggedShape> *out,
std::vector<Array1<int32_t>> *split_map /*= nullptr*/) {
Unstack(src, axis, true/*pad_right*/, out, split_map);
}
RaggedShape Merge(int32_t num_srcs, RaggedShape **src,
const Array1<uint32_t> &merge_map,
Array1<uint32_t> *merge_map_out) {
NVTX_RANGE(K2_FUNC);
K2_CHECK(num_srcs > 0);
int32_t num_layers = src[0]->NumAxes() - 1;
std::vector<RaggedShapeLayer> ans_layers(num_layers);
// Note: this is a shallow copy.
Array1<uint32_t> merge_map_local = merge_map;
for (int32_t l = 0; l < num_layers; l++) {
Array1<uint32_t> merge_map_next;
Array1<uint32_t> *this_m =
(l + 1 == num_layers ? merge_map_out : &merge_map_next);
RaggedShape r = MergeRaggedLayer(l, num_srcs, src, merge_map_local, this_m);
ans_layers[l] = r.Layers()[0];
merge_map_local = merge_map_next;
}
// TODO(dan) after this is debugged: add ", false".
return RaggedShape(ans_layers);
}
RaggedShape TrivialShape(ContextPtr &c, int32_t num_elems) {
NVTX_RANGE(K2_FUNC);
// row_splits= [
Array1<int32_t> row_splits = Range<int32_t>(c, 2, 0, num_elems);
Array1<int32_t> row_ids(c, num_elems, 0);
return RaggedShape2(&row_splits, &row_ids, num_elems);
}
RaggedShape RegularRaggedShape(ContextPtr &c, int32_t dim0, int32_t dim1) {
NVTX_RANGE(K2_FUNC);
Array1<int32_t> row_splits = Range<int32_t>(c, dim0 + 1, 0, dim1);
Array1<int32_t> row_ids(c, dim0 * dim1);
int32_t *row_ids_data = row_ids.Data();
K2_EVAL2(
c, dim0, dim1, lambda_set_row_ids,
(int32_t i, int32_t j)->void { row_ids_data[i * dim1 + j] = i; });
return RaggedShape2(&row_splits, &row_ids, dim0 * dim1);
}
Ragged<int32_t> GetCountsPartitioned(Ragged<int32_t> &src,
RaggedShape &ans_ragged_shape) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(src.NumAxes(), 2);
K2_CHECK_EQ(ans_ragged_shape.NumAxes(), 2);
K2_CHECK(IsCompatible(src, ans_ragged_shape));
K2_CHECK_EQ(src.Dim0(), ans_ragged_shape.Dim0());
const Array1<int32_t> &values = src.values;
const Array1<int32_t> &row_splits = ans_ragged_shape.RowSplits(1);
int32_t n = ans_ragged_shape.NumElements();
Array1<int32_t> counts = GetCounts(values, n);
return Ragged<int32_t>(ans_ragged_shape, counts);
}
/*static*/ Array1<int32_t> GetTransposeReorderingCpu(Ragged<int32_t> &src,
int32_t num_cols) {
NVTX_RANGE(K2_FUNC);
std::vector<std::vector<int32_t>> column_indexes(num_cols); // [column][row]
const int32_t *values_data = src.values.Data();
int32_t n = src.values.Dim();
for (int32_t i = 0; i != n; ++i) {
int32_t bucket = values_data[i];
column_indexes[bucket].push_back(i);
}
Array1<int32_t> ans(src.Context(), n);
int32_t *ans_data = ans.Data();
for (int32_t i = 0; i != num_cols; ++i) {
std::copy(column_indexes[i].begin(), column_indexes[i].end(), ans_data);
ans_data += column_indexes[i].size();
}
return ans;
}
#ifndef _MSC_VER
/*static*/ Array1<int32_t> GetTransposeReorderingThreeAxesCuda(
Ragged<int32_t> &src, int32_t num_cols) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(src.NumAxes(), 3);
ContextPtr &context = src.Context();
K2_CHECK_EQ(context->GetDeviceType(), kCuda);
const Array1<int32_t> &row_splits1 = src.RowSplits(1);
const int32_t *row_ids2_data = src.RowIds(2).Data();
const int32_t *value_data = src.values.Data();
Array1<int32_t> segments = src.RowSplits(2)[row_splits1];
auto lambda_comp = [=] __device__(int32_t a_idx012,
int32_t b_idx012) -> bool {
int32_t a_col_index = value_data[a_idx012];
int32_t b_col_index = value_data[b_idx012];
if (a_col_index < b_col_index) return true; // sort by column indexes
if (a_col_index > b_col_index) return false;
// at this point, a_idx012 and b_idx012 belong to the same column;
// then we sort by its row indexes
int32_t a_idx01 = row_ids2_data[a_idx012];
int32_t b_idx01 = row_ids2_data[b_idx012];
if (a_idx01 < b_idx01) return true;
if (a_idx01 > b_idx01) return false;
// at this point, a_idx012 and b_idx012 are duplicate elements
return false; // either true or false is fine
};
mgpu::context_t *mgpu_context = GetModernGpuAllocator(context);
int32_t n = src.values.Dim();
Array1<int32_t> ans = Range(context, n, 0);
if (n == 0) return ans;
K2_CUDA_SAFE_CALL(mgpu::segmented_sort(ans.Data(), // keys
ans.Dim(), // count
segments.Data(), // segments
segments.Dim() - 1, // num_segments
lambda_comp, *mgpu_context));
return ans;
}
#endif
/*
// Checks the result of GetTranspoeReordering(), in debug mode and dies if it is wrong.
static void CheckGetTransposeReordering(Ragged<int32_t> &src,
Array1<int32_t> &ans) {
if (!internal::kDisableDebug && !internal::DisableChecks()) {
K2_CHECK(IsPermutation(ans));
K2_CHECK(IsMonotonic(src.values[ans]));
}
}*/
Array1<int32_t> GetTransposeReordering(Ragged<int32_t> &src, int32_t num_cols) {
NVTX_RANGE(K2_FUNC);
ContextPtr &context = src.Context();
if (src.NumAxes() < 2 || src.values.Dim() == 0) {
// src is empty
return Array1<int32_t>(context, 0);
}
DeviceType device_type = context->GetDeviceType();
if (device_type == kCpu) return GetTransposeReorderingCpu(src, num_cols);
K2_CHECK_EQ(device_type, kCuda);
#ifdef _MSC_VER
// See https://github.com/k2-fsa/k2/pull/753
// and
// https://github.com/k2-fsa/k2/pull/571
int32_t num_buckets = num_cols;
int32_t num_elements = src.values.Dim();
int32_t log_buckets = static_cast<int32_t>(ceilf(log2f(num_buckets)));
Array1<int32_t> ans = Range(context, num_elements, 0);
cudaStream_t stream = context->GetCudaStream();
size_t temp_storage_bytes = 0;
K2_CUDA_SAFE_CALL(cub::DeviceRadixSort::SortPairs(
nullptr, temp_storage_bytes, src.values.Data(),
static_cast<int32_t *>(nullptr), ans.Data(), ans.Data(), num_elements, 0,
log_buckets, stream));
Array1<int8_t> d_temp_storage(
context, temp_storage_bytes + num_elements * sizeof(int32_t));
K2_CUDA_SAFE_CALL(cub::DeviceRadixSort::SortPairs(
d_temp_storage.Data() + sizeof(int32_t) * num_elements,
temp_storage_bytes, src.values.Data(),
reinterpret_cast<int32_t *>(d_temp_storage.Data()), ans.Data(),
ans.Data(), num_elements, 0, log_buckets, stream));
return ans;
#else
(void)GetTransposeReorderingThreeAxesCuda; // remove compiler warnings
#if __CUDACC_VER_MAJOR__ > 10 || \
(__CUDACC_VER_MAJOR__ == 10 && \
(__CUDACC_VER_MINOR__ > 1 || \
(__CUDACC_VER_MINOR__ == 1 && __CUDACC_VER_BUILD__ > 105)))
// Enable it only for NVCC > 10.1.105
//
// Refer to https://github.com/LLNL/axom/issues/88
// NVCC 10.1.105 has a known issue for cub::DeviceRadixSort
int32_t num_buckets = num_cols;
int32_t num_elements = src.values.Dim();
int32_t log_buckets = static_cast<int32_t>(ceilf(log2f(num_buckets)));
Array1<int32_t> order = Range(context, num_elements, 0);
Array1<int32_t> src_tmp_out(context, num_elements);
Array1<int32_t> ans(context, num_elements);
cudaStream_t stream = context->GetCudaStream();
size_t temp_storage_bytes = 0;
K2_CUDA_SAFE_CALL(cub::DeviceRadixSort::SortPairs(
nullptr, temp_storage_bytes, src.values.Data(), src_tmp_out.Data(),
order.Data(), ans.Data(), num_elements, 0, log_buckets, stream));
Array1<int8_t> d_temp_storage(context, temp_storage_bytes);
K2_CUDA_SAFE_CALL(cub::DeviceRadixSort::SortPairs(
d_temp_storage.Data(), temp_storage_bytes, src.values.Data(),
src_tmp_out.Data(), order.Data(), ans.Data(), num_elements, 0,
log_buckets, stream));
// CheckGetTransposeReordering(src, ans);
return ans;
#else // __CUDACC_VER_MAJOR__
if (src.NumAxes() == 3) {
Array1<int32_t> ans = GetTransposeReorderingThreeAxesCuda(src, num_cols);
// CheckGetTransposeReordering(src, ans);
return ans;
}
const int32_t *row_splits1_data = src.RowSplits(src.NumAxes() - 1).Data();
const int32_t *row_ids1_data = src.RowIds(src.NumAxes() - 1).Data();
const int32_t *value_data = src.values.Data();
int32_t n = src.values.Dim();
Array1<int32_t> ans = Range(context, n, 0);
if (n == 0) return ans;
auto lambda_comp = [=] __device__(int32_t a_idx01, int32_t b_idx01) -> bool {
int32_t a_idx0 = row_ids1_data[a_idx01];
int32_t b_idx0 = row_ids1_data[b_idx01];
int32_t a_col_index = value_data[a_idx01];
int32_t b_col_index = value_data[b_idx01];
if (a_col_index < b_col_index) return true; // sort by column indexes
if (a_col_index > b_col_index) return false;
// now we have a_col_index == b_col_index
if (a_idx0 < b_idx0) return true; // sort by row indexes
if (a_idx0 > b_idx0) return false;
// now we have a_idx0 == b_idx0 && a_col_index == b_col_index
// this entry is duplicated in the sparse matrix.
return false; // we can return either true or false here.
};
mgpu::context_t *mgpu_context = GetModernGpuAllocator(context);
K2_CUDA_SAFE_CALL(mgpu::mergesort(ans.Data(), n, lambda_comp, *mgpu_context));
// CheckGetTransposeReordering(src, ans);
return ans;
#endif
#endif // _MSC_VER
}
RaggedShape ChangeSublistSize(const RaggedShape &src, int32_t size_delta) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(src.NumAxes(), 2);
// the result will have the same num-axes as `src` (the NumAxes() of the
// object is not the same as the number of RaggedShapeLayer axes).
std::vector<RaggedShapeLayer> ans_axes(src.NumAxes() - 1);
int32_t last_axis = src.NumAxes() - 1;
// The following will only do something if src.NumAxes() > 2.
for (int32_t i = 0; i + 1 < last_axis; ++i) ans_axes[i] = src.Layers()[i];
ContextPtr &c = src.Context();
int32_t num_rows = src.TotSize(last_axis - 1),
src_num_elems = src.TotSize(last_axis),
num_elems = src_num_elems + size_delta * num_rows;
ans_axes.back().row_splits = Array1<int32_t>(c, num_rows + 1);
ans_axes.back().row_ids = Array1<int32_t>(c, num_elems);
ans_axes.back().cached_tot_size = num_elems;
const int32_t *src_row_splits_data = src.RowSplits(last_axis).Data(),
*src_row_ids_data = src.RowIds(last_axis).Data();
int32_t *row_splits_data = ans_axes.back().row_splits.Data(),
*row_ids_data = ans_axes.back().row_ids.Data();
{
ParallelRunner pr(c);
{
With w(pr.NewStream());
K2_EVAL(
c, num_rows + 1, lambda_set_row_splits, (int32_t idx0)->void {
row_splits_data[idx0] =
src_row_splits_data[idx0] + size_delta * idx0;
});
}
{
With w(pr.NewStream());
K2_EVAL(
c, src_num_elems, lambda_set_row_ids1, (int32_t src_idx01)->void {
int32_t src_idx0 = src_row_ids_data[src_idx01],
src_idx0x = src_row_splits_data[src_idx0],
src_idx1 = src_idx01 - src_idx0x,
new_idx0x = row_splits_data[src_idx0],
new_idx0x_next = row_splits_data[src_idx0 + 1],
new_idx01 = new_idx0x + src_idx1;
// it's only necessary to guard the next statement with in 'if'
// because size_delta might be negative.
if (new_idx01 < new_idx0x_next) row_ids_data[new_idx01] = src_idx0;
});
}
if (size_delta > 0) {
// This sets the row-ids that are not set by lambda_set_row_ids1.
With w(pr.NewStream());
K2_EVAL(
c, num_rows * size_delta, lambda_set_row_ids2, (int32_t i)->void {
int32_t idx0 = i / size_delta, n = i % size_delta,
next_idx0 = idx0 + 1;
// The following formula is the same as the one in
// lambda_set_row_splits; we want to compute the new value of
// row_splits_data[next_idx0] without waiting for that kernel to
// terminate.
int32_t next_idx0x =
src_row_splits_data[next_idx0] + size_delta * next_idx0;
row_ids_data[next_idx0x - 1 - n] = idx0;
});
}
// make the ParallelRunner go out of scope (should do this before any
// validation code that gets invoked by the constructor of RaggedShape
// below).
}
return RaggedShape(ans_axes);
}
// TODO(dan): this could definitely be made more efficient.
RaggedShape ChangeSublistSizePinned(RaggedShape &src, int32_t size_delta) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(src.NumAxes(), 2);
// the result will have the same num-axes as `src` (the NumAxes() of the
// object is not the same as the number of RaggedShapeLayer axes).
std::vector<RaggedShapeLayer> ans_axes(src.NumAxes() - 1);
int32_t last_axis = src.NumAxes() - 1;
// The following will only do something if src.NumAxes() > 2.
for (int32_t i = 0; i + 1 < last_axis; ++i) ans_axes[i] = src.Layers()[i];
ContextPtr &c = src.Context();
int32_t num_rows = src.TotSize(last_axis - 1);
ans_axes.back().row_splits = Array1<int32_t>(c, num_rows + 1);
const int32_t *src_row_splits_data = src.RowSplits(last_axis).Data();
int32_t *row_splits_data = ans_axes.back().row_splits.Data();
K2_EVAL(
c, num_rows, lambda_set_row_sizes, (int32_t idx0)->void {
int32_t orig_size =
src_row_splits_data[idx0 + 1] - src_row_splits_data[idx0],
size;
if (orig_size == 0 || orig_size + size_delta <= 0)
size = 0;
else
size = orig_size + size_delta;
row_splits_data[idx0] = size;
});
ExclusiveSum(ans_axes.back().row_splits, &ans_axes.back().row_splits);
ans_axes.back().row_ids =
Array1<int32_t>(c, ans_axes.back().row_splits.Back());
RowSplitsToRowIds(ans_axes.back().row_splits, &ans_axes.back().row_ids);
ans_axes.back().cached_tot_size = ans_axes.back().row_ids.Dim();
return RaggedShape(ans_axes);
}
RaggedShape Prefix(RaggedShape &src, int32_t n) {
NVTX_RANGE(K2_FUNC);
int32_t dim0 = src.Dim0();
K2_CHECK(n >= 0 && n <= dim0);
src.Populate();
int32_t num_axes = src.NumAxes();
K2_CHECK_GE(num_axes, 2);
const std::vector<RaggedShapeLayer> &axes_in = src.Layers();
std::vector<RaggedShapeLayer> axes_out(axes_in.size());
int32_t row_end = n;
for (int32_t axis = 0; axis < num_axes - 1; ++axis) {
axes_out[axis].row_splits = axes_in[axis].row_splits.Arange(0, row_end + 1);
// notice here we may do a memory copy from GPU to CPU.
row_end = axes_in[axis].row_splits[row_end];
axes_out[axis].row_ids = axes_in[axis].row_ids.Arange(0, row_end);
axes_out[axis].cached_tot_size = row_end;
}
return RaggedShape(axes_out);
}
std::vector<RaggedShape> GetPrefixes(RaggedShape &src,
const std::vector<int32_t> &sizes) {
NVTX_RANGE(K2_FUNC);
src.Populate();
int32_t dim0 = src.Dim0();
int32_t num_axes = src.NumAxes();
K2_CHECK_GE(num_axes, 2);
ContextPtr &c = src.Context();
const std::vector<RaggedShapeLayer> &axes_in = src.Layers();
// get those row_end elements at each axis.
int32_t ans_size = static_cast<int32_t>(sizes.size());
Array1<int32_t> row_ends(c, num_axes * ans_size);
Array1<int32_t> sizes_array(GetCpuContext(), sizes);
Array1<int32_t> indexes = row_ends.Arange(0, ans_size);
indexes.CopyFrom(sizes_array);
for (int32_t axis = 1; axis < num_axes; ++axis) {
Array1<int32_t> curr_axis_row_ends =
row_ends.Arange(axis * ans_size, (axis + 1) * ans_size);
axes_in[axis - 1].row_splits.Index(indexes, &curr_axis_row_ends);
indexes = curr_axis_row_ends;
}
row_ends = row_ends.To(GetCpuContext());
std::vector<RaggedShape> ans(ans_size);
for (int32_t i = 0; i != ans_size; ++i) {
std::vector<RaggedShapeLayer> axes_out(axes_in.size());
int32_t row_end = row_ends[i];
K2_CHECK(row_end >= 0 && row_end <= dim0);
for (int32_t axis = 0; axis < num_axes - 1; ++axis) {
axes_out[axis].row_splits =
axes_in[axis].row_splits.Arange(0, row_end + 1);
row_end = row_ends[i + (axis + 1) * ans_size];
axes_out[axis].row_ids = axes_in[axis].row_ids.Arange(0, row_end);
axes_out[axis].cached_tot_size = row_end;
}
ans[i] = RaggedShape(axes_out, false);
}
return ans;
}
RaggedShape Arange(RaggedShape &src, int32_t axis, int32_t begin, int32_t end,
std::pair<int32_t, int32_t> *value_range /*= nullptr*/) {
NVTX_RANGE(K2_FUNC);
int32_t num_axes = src.NumAxes();
K2_CHECK_GE(num_axes, 2);
K2_CHECK(axis >= 0 && axis < num_axes - 1);
K2_CHECK(begin >= 0 && begin <= end && end <= src.TotSize(axis));
if (begin == end) {
RaggedShape ans = EmptyRaggedShape(src.Context(), num_axes - axis);
// as begin == end, user always get empty values when doing
// `src.values.Arange(begin, end)`
if (value_range != nullptr) *value_range = std::make_pair(begin, end);
return ans;
}
src.Populate();
ContextPtr &c = src.Context();
const std::vector<RaggedShapeLayer> &axes_in = src.Layers();
int32_t ans_num_axes = num_axes - axis;
// `-1` as Layers().size is NumAxes() - 1
std::vector<RaggedShapeLayer> axes_out(ans_num_axes - 1);
// get those `row_begin` and `row_end` indexes for all axes in a kernel so we
// can do just one GPU to CPU memory transfer.
// the format of `indexes` is: row_begin_axis0, row_end_axis0,
// row_begin_axis1, row_end_axis2, etc. axis0, axis1 here are the axis of ans.
Array1<int32_t> indexes(c, ans_num_axes * 2);
int32_t *indexes_data = indexes.Data();
RowSplitsAccessor<5> src_row_splits_acc(src);
K2_EVAL(
c, 1, lambda_set_indexes, (int32_t i)->void {
// we just start a kernel with only one element here.
K2_CHECK_EQ(i, 0);
int32_t row_begin = begin, row_end = end;
indexes_data[0] = row_begin, indexes_data[1] = row_end;
for (int32_t cur_axis = axis; cur_axis < num_axes - 1; ++cur_axis) {
row_begin = src_row_splits_acc(cur_axis)[row_begin];
row_end = src_row_splits_acc(cur_axis)[row_end];
int32_t indexes_pos = ((cur_axis - axis) + 1) * 2;
indexes_data[indexes_pos] = row_begin;
indexes_data[indexes_pos + 1] = row_end;
}
});
indexes = indexes.To(GetCpuContext());
int32_t row_begin = indexes[0], row_end = indexes[1];
for (int32_t cur_axis = axis; cur_axis < num_axes - 1; ++cur_axis) {
axes_out[cur_axis - axis].row_splits =
axes_in[cur_axis].row_splits.Arange(row_begin, row_end + 1);
int32_t row_id = row_begin;
int32_t indexes_pos = ((cur_axis - axis) + 1) * 2;
row_begin = indexes[indexes_pos];
row_end = indexes[indexes_pos + 1];
axes_out[cur_axis - axis].row_splits =
Minus(axes_out[cur_axis - axis].row_splits, row_begin);
axes_out[cur_axis - axis].row_ids =
axes_in[cur_axis].row_ids.Arange(row_begin, row_end);
axes_out[cur_axis - axis].row_ids =
Minus(axes_out[cur_axis - axis].row_ids, row_id);
axes_out[cur_axis - axis].cached_tot_size = row_end - row_begin;
}
if (value_range != nullptr) *value_range = std::make_pair(row_begin, row_end);
return RaggedShape(axes_out);
}
Ragged<int32_t> AddSuffixToRagged(const Ragged<int32_t> &src,
const Array1<int32_t> &suffix) {
NVTX_RANGE(K2_FUNC);
int32_t num_axes = src.NumAxes();
K2_CHECK_GE(num_axes, 2);
K2_CHECK_EQ(suffix.Dim(), src.TotSize(num_axes - 2));
ContextPtr &c = src.Context();
Array1<int32_t> dst_values(c, src.NumElements() + suffix.Dim());
RaggedShape dst_shape = ChangeSublistSize(src.shape, 1);
// "row_splits1" and "row_ids1" below are actually on the last axis. We name
// them with "1" so that we can use "idx01" and "idx0" for those indexes in
// lambda, following the naming convention explained in k2/csrc/utils.h
const int32_t *dst_row_splits1_data =
dst_shape.RowSplits(num_axes - 1).Data(),
*dst_row_ids1_data = dst_shape.RowIds(num_axes - 1).Data(),
*src_values_data = src.values.Data(),
*suffix_data = suffix.Data();
int32_t *dst_values_data = dst_values.Data();
K2_EVAL(
c, dst_shape.NumElements(), lambda_copy_values, (int32_t idx01)->void {
int32_t idx0 = dst_row_ids1_data[idx01];
if (idx01 == dst_row_splits1_data[idx0 + 1] - 1) {
// idx01 points to the last element of this row; copy from suffix
dst_values_data[idx01] = suffix_data[idx0];
} else {
// copy from src
int32_t src_idx01 = idx01 - dst_row_ids1_data[idx01];
dst_values_data[idx01] = src_values_data[src_idx01];
}
});
return Ragged<int32_t>(dst_shape, dst_values);
}
Ragged<int32_t> AddPrefixToRagged(const Ragged<int32_t> &src,
const Array1<int32_t> &prefix) {
NVTX_RANGE(K2_FUNC);
int32_t num_axes = src.NumAxes();
K2_CHECK_GE(num_axes, 2);
K2_CHECK_EQ(prefix.Dim(), src.TotSize(num_axes - 2));
ContextPtr &c = src.Context();
Array1<int32_t> dst_values(c, src.NumElements() + prefix.Dim());
RaggedShape dst_shape = ChangeSublistSize(src.shape, 1);
// "row_splits1" and "row_ids1" below are actually on the last axis. We name
// them with "1" so that we can use "idx01" and "idx0" for those indexes in
// lambda, following the naming convention explained in k2/csrc/utils.h
const int32_t *dst_row_splits1_data =
dst_shape.RowSplits(num_axes - 1).Data(),
*dst_row_ids1_data = dst_shape.RowIds(num_axes - 1).Data(),
*src_values_data = src.values.Data(),
*prefix_data = prefix.Data();
int32_t *dst_values_data = dst_values.Data();
K2_EVAL(
c, dst_shape.NumElements(), lambda_copy_values, (int32_t idx01)->void {
int32_t idx0 = dst_row_ids1_data[idx01];
if (idx01 == dst_row_splits1_data[idx0]) {
// idx01 points to the first element of this row; copy from prefix
dst_values_data[idx01] = prefix_data[idx0];
} else {
// copy from src
int32_t src_idx01 = idx01 - dst_row_ids1_data[idx01] - 1;
dst_values_data[idx01] = src_values_data[src_idx01];
}
});
return Ragged<int32_t>(dst_shape, dst_values);
}
RaggedShape SubsetRaggedShape(RaggedShape &src, Renumbering &renumbering,
int32_t axis, Array1<int32_t> *elems_new2old) {
NVTX_RANGE(K2_FUNC);
axis = axis < 0 ? src.NumAxes() + axis : axis;
K2_CHECK_EQ(renumbering.NumOldElems(), src.TotSize(axis));
return Index(src, axis, renumbering.New2Old(), elems_new2old);
}
RaggedShape SubsetRaggedShape(RaggedShape &src, Renumbering &r_before_last,
Renumbering &r_last) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(r_before_last.NumOldElems(), src.TotSize(src.NumAxes() - 2));
K2_CHECK_EQ(r_last.NumOldElems(), src.NumElements());
// Make sure final and before-final row-ids are populated.
src.RowIds(src.NumAxes() - 2);
src.RowIds(src.NumAxes() - 1);
std::vector<RaggedShapeLayer> axes = src.Layers();
// Suppose this shape has 3 axes (0,1,2). Its NumAxes()==3;
// axes.size()==2.
// r_before_last deals with the numbering on axis 1.
// r_last deals with the numbering on axis 2.
RaggedShapeLayer &before_last = axes[axes.size() - 2],
&last = axes[axes.size() - 1];
int32_t new_tot_size1 = r_before_last.NumNewElems(),
new_tot_size2 = r_last.NumNewElems();
ContextPtr c = src.Context();
Array1<int32_t> before_last_row_ids(c, new_tot_size1),
last_row_splits(c, new_tot_size1 + 1), last_row_ids(c, new_tot_size2);
// The variable names below use this 3-axis assumption but the
// code will work for greater number of axes.
int32_t *new_row_ids1_data = before_last_row_ids.Data(),
*new_row_splits2_data = last_row_splits.Data(),
*new_row_ids2_data = last_row_ids.Data();
const int32_t *old_row_ids1_data = before_last.row_ids.Data(),
*old_row_splits2_data = last.row_splits.Data(),
*old_row_ids2_data = last.row_ids.Data();
const int32_t *idx01_new2old_data = r_before_last.New2Old().Data(),
*idx01_old2new_data = r_before_last.Old2New().Data(),
*idx012_new2old_data = r_last.New2Old().Data(),
*idx012_old2new_data = r_last.Old2New().Data();
ParallelRunner pr(c);
{
With w(pr.NewStream());
// before_last.row_splits maps from idx0 -> idx01 (contains idx01's). Map
// the idx01's; the idx0s stay the same.
before_last.row_splits = r_before_last.Old2New()[before_last.row_splits];
}
{
With w(pr.NewStream());
K2_EVAL(
c, new_tot_size1 + 1, lambda_set_row_ids1_and_row_splits2,
(int32_t new_idx01)->void {
// row_ids1 maps from idx01 -> idx0. Select subset of
// idx01's; the idx0 stays the same.
int32_t old_idx01 = idx01_new2old_data[new_idx01];
if (new_idx01 < new_tot_size1)
new_row_ids1_data[new_idx01] = old_row_ids1_data[old_idx01];
// row_splits2 maps from idx01 -> idx012. Map both indexes.
// idx01's; the idx0 stays the same.
new_row_splits2_data[new_idx01] =
idx012_old2new_data[old_row_splits2_data[old_idx01]];
});
}
{
With w(pr.NewStream());
K2_EVAL(
c, new_tot_size2, lambda_set_row_ids2, (int32_t new_idx012)->void {
// row_ids2 maps from idx012 -> idx01. Both must be mapped.
int32_t old_idx012 = idx012_new2old_data[new_idx012];
int32_t old_idx01 = old_row_ids2_data[old_idx012],
new_idx01 = idx01_old2new_data[old_idx01];
new_row_ids2_data[new_idx012] = new_idx01;
});
}
before_last.row_ids = before_last_row_ids;
before_last.cached_tot_size = new_tot_size1;
last.row_splits = last_row_splits;
last.row_ids = last_row_ids;
last.cached_tot_size = new_tot_size2;
return RaggedShape(axes);
}
RaggedShape EmptyRaggedShape(ContextPtr &c, int32_t num_axes) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(num_axes, 2);
std::vector<RaggedShapeLayer> axes(num_axes - 1);
axes[0].row_splits = Array1<int32_t>(c, 1, 0);
// row_ids will be the empty vector, with context `c`.
axes[0].row_ids = axes[0].row_splits.Range(0, 0);
axes[0].cached_tot_size = 0;
for (int32_t a = 1; a + 1 < num_axes; ++a) axes[a] = axes[0];
return RaggedShape(axes);
}
Array1<int32_t> GetDecreasingSizeOrder(RaggedShape &shape) {
NVTX_RANGE(K2_FUNC);
ContextPtr &c = shape.Context();
Array1<int32_t> sizes = RowSplitsToSizes(shape.RowSplits(1));
Array1<int32_t> index_map;
Sort<int32_t, GreaterThan<int32_t>>(&sizes, &index_map);
return index_map;
}
RaggedShape GetLayer(const RaggedShape &src, int32_t layer) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(layer, 0);
K2_CHECK_LT(layer, src.NumAxes() - 1);
std::vector<RaggedShapeLayer> layers;
layers.push_back(src.Layers()[layer]);
bool check = false;
return RaggedShape(layers, check);
}
void DecomposeRaggedShape(const RaggedShape &src, int32_t axis,
RaggedShape *top, RaggedShape *bottom) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(axis, 0);
K2_CHECK_LT(axis, src.NumAxes() - 1);
const std::vector<RaggedShapeLayer> &src_layers = src.Layers();
std::vector<RaggedShapeLayer> top_layers(axis),
bottom_layers(src_layers.size() - axis);
int32_t src_size = static_cast<int32_t>(src_layers.size());
for (int32_t i = 0; i < axis; ++i) top_layers[i] = src_layers[i];
for (int32_t i = axis; i < src_size; ++i)
bottom_layers[i - axis] = src_layers[i];
*top = RaggedShape(top_layers);
*bottom = RaggedShape(bottom_layers);
}
RaggedShape RemoveEmptyLists(RaggedShape &src_shape, int32_t axis,
Renumbering *renumbering_out) {
NVTX_RANGE(K2_FUNC);
if (axis == 0) {
return RemoveEmptyListsAxis0(src_shape, renumbering_out);
}
RaggedShape top_shape, bottom_shape;
DecomposeRaggedShape(src_shape, axis, &top_shape, &bottom_shape);
Renumbering r_temp;
if (!renumbering_out) renumbering_out = &r_temp;
bottom_shape = RemoveEmptyListsAxis0(bottom_shape, renumbering_out);
top_shape = SubsetRaggedShape(top_shape, *renumbering_out);
return ComposeRaggedShapes(top_shape, bottom_shape);
}
RaggedShape RemoveSomeEmptyLists(RaggedShape &src_shape, int32_t axis,
Renumbering &renumbering) {
NVTX_RANGE(K2_FUNC);
if (axis == 0) {
return RenumberAxis0Simple(src_shape, renumbering);
}
RaggedShape top_shape, bottom_shape;
DecomposeRaggedShape(src_shape, axis, &top_shape, &bottom_shape);
bottom_shape = RenumberAxis0Simple(bottom_shape, renumbering);
top_shape = SubsetRaggedShape(top_shape, renumbering);
return ComposeRaggedShapes(top_shape, bottom_shape);
}
RaggedShape RemoveEmptyListsAxis0(RaggedShape &src_shape,
Renumbering *renumbering_out) {
NVTX_RANGE(K2_FUNC);
Renumbering r_temp;
if (!renumbering_out) renumbering_out = &r_temp;
ContextPtr &c = src_shape.Context();
int32_t num_lists = src_shape.Dim0();
*renumbering_out = Renumbering(c, num_lists);
const int32_t *row_splits_data = src_shape.RowSplits(1).Data();
char *keep_data = renumbering_out->Keep().Data();
K2_EVAL(
c, num_lists, lambda_set_keep, (int32_t i)->void {
keep_data[i] = (row_splits_data[i + 1] != row_splits_data[i]);
});
return RenumberAxis0Simple(src_shape, *renumbering_out);
}
RaggedShape RenumberAxis0Simple(RaggedShape &src_shape,
Renumbering &renumbering) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(renumbering.NumOldElems(), src_shape.Dim0());
ContextPtr c = src_shape.Context();
src_shape.RowIds(1); // make sure RowIds(1) is populated.
std::vector<RaggedShapeLayer> layers = src_shape.Layers();
int32_t num_layers = layers.size();
int32_t new_num_lists = renumbering.NumNewElems(),
num_elems = src_shape.TotSize(1); // unchanged old vs. new.
Array1<int32_t> new_row_splits(c, new_num_lists + 1),
new_row_ids = renumbering.Old2New()[src_shape.RowIds(1)];
int32_t *new_row_splits_data = new_row_splits.Data();
const int32_t *old_row_splits_data = src_shape.RowSplits(1).Data(),
*new2old_data = renumbering.New2Old().Data();
// set `new_row_splits_data`.
#ifndef NDEBUG
{
Array1<int32_t> is_ok(c, 1, 1);
int32_t *is_ok_data = is_ok.Data();
int32_t old_num_lists = src_shape.Dim0();
const int32_t *old2new_data = renumbering.Old2New().Data();
K2_EVAL(
c, old_num_lists, lambda_check_preconditions, (int32_t i)->void {
if (old2new_data[i + 1] == old2new_data[i]) { // This list not kept
if (old_row_splits_data[i + 1] != old_row_splits_data[i]) {
// this list was nonempty...
is_ok_data[0] = 0;
}
}
});
K2_CHECK_NE(is_ok[0], 0) << "RenumberAxis0Simple(): preconditions not met; "
"renumbering removes nonempty lists.";
}
#endif
K2_EVAL(
c, new_num_lists + 1, lambda_set_new_row_splits, (int32_t new_i)->void {
int32_t j;
if (new_i == new_num_lists) {
j = num_elems;
} else {
int32_t old_i = new2old_data[new_i];
j = old_row_splits_data[old_i];
}
new_row_splits_data[new_i] = j;
});
layers[0].row_splits = new_row_splits;
layers[0].row_ids = new_row_ids;
// no need to set its cached_tot_size; that didn't change.
return RaggedShape(layers);
}
RaggedShape CoveringShape(int32_t num_srcs, RaggedShape **srcs) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(num_srcs, 0);
if (num_srcs == 1) return *srcs[0];
K2_CHECK_EQ(srcs[0]->NumAxes(), 2);
int32_t dim0 = srcs[0]->Dim0();
ContextPtr &c = srcs[0]->Context();
for (int32_t i = 1; i != num_srcs; ++i) {
K2_CHECK_EQ(srcs[i]->NumAxes(), 2);
K2_CHECK_EQ(srcs[i]->Dim0(), dim0);
K2_CHECK(c->IsCompatible(*srcs[i]->Context()));
}
// get row splits of srcs
Array1<int32_t *> row_splits_ptrs(GetCpuContext(), num_srcs);
int32_t **splits_ptr_data = row_splits_ptrs.Data();
for (int32_t i = 0; i != num_srcs; ++i) {
splits_ptr_data[i] = srcs[i]->RowSplits(1).Data();
}
row_splits_ptrs = row_splits_ptrs.To(c);
int32_t **src_row_splits_ptr_data = row_splits_ptrs.Data();
RaggedShape shape = RegularRaggedShape(c, dim0, num_srcs);
Array1<int32_t> values(c, dim0 * num_srcs);
// elements in row i of `sublist_sizes` are the sizes of row i
// of src[0], src[1]...
Ragged<int32_t> sublist_sizes(shape, values);
int32_t *values_data = sublist_sizes.values.Data();
K2_EVAL2(
c, dim0, num_srcs, lambda_set_sublist_sizes,
(int32_t i, int32_t j)->void {
values_data[i * num_srcs + j] =
src_row_splits_ptr_data[j][i + 1] - src_row_splits_ptr_data[j][i];
});
Array1<int32_t> ans_row_splits(c, dim0 + 1);
Array1<int32_t> ans_row_sizes = ans_row_splits.Arange(0, dim0);
MaxPerSublist(sublist_sizes, 0, &ans_row_sizes);
ExclusiveSum(ans_row_sizes, &ans_row_splits);
return RaggedShape2(&ans_row_splits, nullptr, -1);
}
Array1<int32_t> CoveringShapeForwardMap(RaggedShape &src,
RaggedShape &covering) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(src.NumAxes(), 2);
K2_CHECK_EQ(covering.NumAxes(), 2);
K2_CHECK_EQ(src.Dim0(), covering.Dim0());
int32_t num_elems = covering.NumElements();
K2_CHECK_GE(num_elems, src.NumElements());
ContextPtr c = GetContext(src, covering);
Array1<int32_t> ans(c, num_elems);
int32_t *ans_data = ans.Data();
const int32_t *covering_row_splits_data = covering.RowSplits(1).Data(),
*covering_row_ids_data = covering.RowIds(1).Data(),
*src_row_splits_data = src.RowSplits(1).Data();
K2_EVAL(
c, num_elems, lambda_set_value, (int32_t covering_idx01)->void {
int32_t covering_idx0 = covering_row_ids_data[covering_idx01],
covering_idx0x = covering_row_splits_data[covering_idx0],
covering_idx1 = covering_idx01 - covering_idx0x;
// src and covering has the same dim0
int32_t src_idx0x = src_row_splits_data[covering_idx0],
src_cur_row_size =
src_row_splits_data[covering_idx0 + 1] - src_idx0x;
K2_DCHECK_GE(
covering_row_splits_data[covering_idx0 + 1] - covering_idx0x,
src_cur_row_size);
if (covering_idx1 >= src_cur_row_size)
ans_data[covering_idx01] = -1;
else
ans_data[covering_idx01] = src_idx0x + covering_idx1; // src_idx01
});
return ans;
}
void RaggedShapeAxis0Splitter::Init(RaggedShape &src) {
NVTX_RANGE(K2_FUNC);
int32_t num_layers = src.NumLayers(), num_layers_out = num_layers - 1,
dim0 = src.Dim0();
K2_CHECK_LE(num_layers_out, 4); // If this fails, add something to the 4s and
// 5s here and in the header.
K2_CHECK_GT(num_layers, 1);
ContextPtr c = src.Context();
composite_row_splits_ = Array2<int32_t>(c, num_layers + 1, dim0 + 1);
Array2Accessor<int32_t> composite_row_splits_acc =
composite_row_splits_.Accessor();
RowSplitsAccessor<5> src_row_splits_acc(src);
SmallVec<int32_t *, 5> row_splits_out_acc;
K2_CHECK(num_layers_out <= 5);
Array1<int32_t> garbage1(c, dim0 + dim0 + 1); // won't be read.
row_splits_out_acc.data[0] = garbage1.Data();
for (int32_t l = 0; l < num_layers_out; l++) {
row_splits_out_[l] = Array1<int32_t>(c, src.TotSize(l + 1) + dim0 + 1);
row_splits_out_acc.data[l + 1] = row_splits_out_[l].Data();
}
// set composite_row_splits_ and also those elements of
// the output row_splits which are bound to be zero.
K2_EVAL(
c, dim0 + 1, lambda_set_composite_row_splits, (int32_t i)->void {
int32_t cur_pos = i;
composite_row_splits_acc(0, i) = cur_pos;
for (int32_t l = 0; l < num_layers; l++) {
// The following statement sets the zero at the beginning of each
// row_splits, plus a final zero that we write to avoid an
// if-statement.
row_splits_out_acc.data[l][cur_pos + i] = 0;
cur_pos = src_row_splits_acc.ptrs[l][cur_pos];
composite_row_splits_acc(l + 1, i) = cur_pos;
}
});
composite_row_splits_cpu_ = composite_row_splits_.To(GetCpuContext());
// Right now to_idx0 maps from an idx0 to an idx0 (identity map); next time it
// will map from an idx01 to to an idx0, then idx012 to idx0 (all w.r.t. src).
// It doesn't include the extra last element like a row_splits would; it's
// like a composite row_ids vector: row_ids1, row_ids12 and so on.
Array1<int32_t> to_idx0 = composite_row_splits_.Row(0).Arange(0, dim0);
for (int32_t layer = 0; layer < num_layers_out; layer++)
row_ids_out_[layer] = Array1<int32_t>(c, src.TotSize(layer + 2));
Array1<int32_t> garbage2(c,
src.TotSize(1)); // corresponds to row_ids_out_[-1].
for (int32_t layer = 0; layer <= num_layers_out; layer++) {
// num_elems is the number of elements we process in this kernel.
int32_t num_elems = src.TotSize(layer + 1);
// The names here are valid for layer == 1; this just happens to be useful
// for exposition.
const int32_t *src_row_ids2_data = src.RowIds(layer + 1).Data(),
*idx01_to_idx0_data = to_idx0.Data();
int32_t *row_ids1_out_data =
(layer == 0 ? garbage2.Data() : row_ids_out_[layer - 1].Data());
if (layer < num_layers_out) {
Array1<int32_t> to_idx0_next(c, num_elems);
int32_t *row_splits2_out_data = row_splits_out_[layer].Data(),
*idx012_to_idx0_data = to_idx0_next.Data();
const int32_t *src_row_splits3_data = src.RowSplits(layer + 2).Data();
// row_splits3 maps from idx012 -> idx012x.
// remember: the names are valid for layer == 1, just as an example.
K2_EVAL(
c, num_elems, lambda_set_row_splits_and_ids,
(int32_t src_idx012)->void {
int32_t src_idx01 = src_row_ids2_data[src_idx012],
src_idx012x_next = src_row_splits3_data[src_idx012 + 1],
src_idx0 = idx01_to_idx0_data[src_idx01];
idx012_to_idx0_data[src_idx012] = src_idx0; // <-- output here.
int32_t src_idx0x = composite_row_splits_acc(layer, src_idx0),
src_idx0xxx = composite_row_splits_acc(layer + 2, src_idx0),
src_idx1 = src_idx01 - src_idx0x,
src_idx12x_next = src_idx012x_next - src_idx0xxx,
out_idx0 = src_idx1, out_idx01x_next = src_idx12x_next;
row_ids1_out_data[src_idx012] = out_idx0;
// below, the "+1" is because each element handles the next one
// within this output row_splits array, with the zeros (1st elem of
// each output row_splits array) handled by
// lambda_set_composite_row_splits. The "+ idx0" is to make room
// for the extra final element of all the previous row_splits
// arrays.
row_splits2_out_data[src_idx012 + 1 + src_idx0] = out_idx01x_next;
});
to_idx0 = to_idx0_next;
} else {
// The next code is a subset of the other branch.
K2_EVAL(
c, num_elems, lambda_set_row_ids, (int32_t src_idx012)->void {
int32_t src_idx01 = src_row_ids2_data[src_idx012],
idx0 = idx01_to_idx0_data[src_idx01],
src_idx0x = composite_row_splits_acc(layer, idx0),
src_idx1 = src_idx01 - src_idx0x, out_idx0 = src_idx1;
row_ids1_out_data[src_idx012] = out_idx0;
});
}
}
}
RaggedShape RaggedShapeAxis0Splitter::GetElement(int32_t i,
int32_t *elem_offset) {
NVTX_RANGE(K2_FUNC);
int32_t num_layers_out = composite_row_splits_.Dim0() - 2;
std::vector<RaggedShapeLayer> out;
out.reserve(num_layers_out);
auto composite_row_splits_cpu_acc = composite_row_splits_cpu_.Accessor();
for (int32_t layer = 0; layer < num_layers_out; layer++) {
int32_t row_begin = composite_row_splits_cpu_acc(layer + 1, i),
row_end = composite_row_splits_cpu_acc(layer + 1, i + 1),
elem_begin = composite_row_splits_cpu_acc(layer + 2, i),
elem_end = composite_row_splits_cpu_acc(layer + 2, i + 1),
num_elems = elem_end - elem_begin;
if (layer + 1 == num_layers_out && elem_offset != nullptr)
*elem_offset = elem_begin;
// the "+ i" is to account for the extra final elements of preceding
// row_splits vectors; the + 1 is for the final element of this one.
Array1<int32_t> splits = row_splits_out_[layer].Arange(row_begin + i,
row_end + i + 1),
ids = row_ids_out_[layer].Arange(elem_begin, elem_end);
out.emplace_back(RaggedShapeLayer{splits, ids, num_elems});
}
// TODO: when thoroughly debugged, maybe turn off validation?
return RaggedShape(out);
}
namespace hash_internal {
// Utilities for hashing strings (actually: sequences of int32_t).
/*
T can be int32_t or int64_t.
The following code shows what we are computing:
std::vector<int32_t> input;
T hash1 = 13, hash2 = 787;
for (size_t i = 0; i < input.size(); i++) {
hash1 = 31 * hash1 + input[i];
hash2 = 167 * hash2 + input[i];
}
hash = hash1 + 104729 * hash2;
I'm not sure that these constants are very optimal, but they are primes.
The actual calculation is a little different from the above because
of the need to do it via a reduction.
*/
template <typename T>
struct Hash {
T hash1;
T hash2;
T product1;
T product2;
// Would like this to be a POD type so not adding the following constructor:
// Hash(int32_t i): hash1(i), hash2(i), product1(31), product2(167) { }
// .. but implementing it in HashInputIterator.
};
template <typename T>
struct HashInputIterator {
explicit __host__ __device__ __forceinline__ HashInputIterator(const int32_t *i) // NOLINT
: i_(i) {}
__device__ __forceinline__ Hash<T> operator[](int32_t idx) const {
return Hash<T>{i_[idx], i_[idx], 31, 167};
}
__device__ __forceinline__ HashInputIterator operator+(int32_t offset) const {
return HashInputIterator(i_ + offset);
}
const int32_t *i_;
};
template <typename T>
struct HashOutputIteratorDeref { // this is what you get when you dereference
// HashOutputIterator, it pretends to be a
// Hash<T> but really only stores the `idx`
// member.
explicit __device__ __forceinline__ HashOutputIteratorDeref(T *t)
: t_(t) {}
__device__ __forceinline__ HashOutputIteratorDeref &operator=(
const Hash<T> &h) {
*t_ = h.hash1 + 13 * h.product1 + 104729 * h.hash2 +
(104729 * 787) * h.product2;
return *this;
}
T *t_;
};
template <typename T>
struct HashOutputIterator { // outputs just the index of the pair.
explicit HashOutputIterator(T *t) : t_(t) {}
__device__ __forceinline__ HashOutputIteratorDeref<T> operator[](
int32_t idx) const {
return HashOutputIteratorDeref<T>(t_ + idx);
}
__device__ __forceinline__ HashOutputIterator operator+(size_t offset) {
return HashOutputIterator{t_ + offset};
}
T *t_;
};
template <typename T>
struct HashCombineOp {
__device__ __forceinline__ Hash<T> operator()(const Hash<T> &a,
const Hash<T> &b) const {
return Hash<T>{a.hash1 * b.product1 + b.hash1,
a.hash2 * b.product2 + b.hash2,
a.product1 * b.product1,
a.product2 * b.product2};
}
};
} // namespace hash_internal
} // namespace k2
namespace std {
// those below typedefs are required by cub::DeviceSegmentedReduce:Reduce
template <typename T>
struct iterator_traits<k2::hash_internal::HashInputIterator<T>> {
typedef k2::hash_internal::Hash<T> value_type;
};
template <typename T>
struct iterator_traits<k2::hash_internal::HashOutputIterator<T>> {
typedef k2::hash_internal::Hash<T> value_type;
typedef k2::hash_internal::HashOutputIteratorDeref<T> reference;
};
} // namespace std
namespace k2 {
template <typename T>
Array1<T> ComputeHash(Ragged<int32_t> &src) {
NVTX_RANGE(K2_FUNC);
int32_t last_axis = src.NumAxes() - 1;
const Array1<int32_t> &row_splits_array = src.RowSplits(last_axis);
int32_t num_rows = row_splits_array.Dim() - 1;
ContextPtr &c = src.Context();
Array1<T> ans(c, num_rows);
const int32_t *row_splits = row_splits_array.Data();
const int32_t *values_data = src.values.Data();
T *output_data = ans.Data();
if (c->GetDeviceType() == kCpu) {
int32_t j = row_splits[0];
for (int32_t i = 0; i < num_rows; ++i) {
T hash1 = 13, hash2 = 787;
int32_t row_end = row_splits[i + 1];
for (; j < row_end; ++j) {
T elem = values_data[j];
hash1 = 31 * hash1 + elem;
hash2 = 167 * hash2 + elem;
}
T hash = hash1 + 104729 * hash2;
output_data[i] = hash;
}
} else {
K2_CHECK_EQ(c->GetDeviceType(), kCuda);
hash_internal::HashInputIterator<T> input_iter(values_data);
hash_internal::HashOutputIterator<T> output_iter(output_data);
hash_internal::HashCombineOp<T> op;
hash_internal::Hash<T> initial_hash{ 0, 0, 1, 1 };
// This code is based on the example here:
// https://nvlabs.github.io/cub/structcub_1_1_device_segmented_reduce.html
std::size_t temp_storage_bytes = 0;
// the first time is to determine temporary device storage requirements
K2_CUDA_SAFE_CALL(cub::DeviceSegmentedReduce::Reduce(
nullptr, temp_storage_bytes, input_iter, output_iter, num_rows,
row_splits, row_splits + 1, op, initial_hash, c->GetCudaStream()));
Array1<int8_t> d_temp_storage(c, temp_storage_bytes);
K2_CUDA_SAFE_CALL(cub::DeviceSegmentedReduce::Reduce(
d_temp_storage.Data(), temp_storage_bytes, input_iter, output_iter,
num_rows, row_splits, row_splits + 1, op, initial_hash,
c->GetCudaStream()));
}
return ans;
}
Ragged<int32_t> UniqueSequences(Ragged<int32_t> &src,
Ragged<int32_t> *num_repeats /*=nullptr*/,
Array1<int32_t> *new2old_indexes /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
ContextPtr &c = src.Context();
if (src.NumAxes() == 2) {
// Put 'fake' layer at front, process, then remove.
Ragged<int32_t> temp = Unsqueeze(src, 0);
return UniqueSequences(temp, num_repeats, new2old_indexes).RemoveAxis(0);
}
Array1<int64_t> hashes = ComputeHash<int64_t>(src);
int32_t hashes_dim = hashes.Dim();
Array1<int32_t> order(c, hashes_dim);
// Using the layer before the last layer of `src` for the shape of
// `ragged_hashes`
Ragged<int64_t> ragged_hashes(GetLayer(src.shape, src.shape.NumLayers() - 2),
hashes);
SortSublists<int64_t, LessThan<int64_t> >(&ragged_hashes, &order);
Renumbering renumber_lists(c, hashes.Dim());
const int32_t *ragged_hashes_row_ids_data = ragged_hashes.RowIds(1).Data(),
*ragged_hashes_row_splits_data = ragged_hashes.RowSplits(1).Data();
const int64_t *ragged_hashes_data = ragged_hashes.values.Data();
char *keep_list_data = renumber_lists.Keep().Data();
K2_EVAL(
c, hashes_dim, lambda_set_keep, (int32_t i)->void {
char keep;
if (i == ragged_hashes_row_splits_data[ragged_hashes_row_ids_data[i]]) {
// this is the first element of its sub-list in `ragged_hashes`.
keep = 1;
} else {
keep = (ragged_hashes_data[i] != ragged_hashes_data[i - 1]);
}
keep_list_data[i] = keep;
});
Array1<int32_t> new2old = renumber_lists.New2Old(),
new2unsorted = order[new2old];
Ragged<int32_t> ans = Index(src, src.NumAxes() - 2, new2unsorted);
if (num_repeats != nullptr) {
int32_t new2old_dim = new2old.Dim();
Array1<int32_t> num_repeats_array(c, new2old_dim);
const int32_t *new2old_data = new2old.Data();
int32_t *num_repeats_data = num_repeats_array.Data();
K2_EVAL(
c, new2old_dim, set_num_repeats, (int32_t i)->void {
if (i < new2old_dim - 1) {
num_repeats_data[i] = new2old_data[i + 1] - new2old_data[i];
} else {
num_repeats_data[i] = hashes_dim - new2old_data[i];
}
});
*num_repeats = Ragged<int32_t>(GetLayer(ans.shape, ans.NumAxes() - 3),
num_repeats_array);
}
if (new2old_indexes != nullptr) {
*new2old_indexes = std::move(new2unsorted);
}
return ans;
}
// Instantiate template for int64 and int32.
template
Array1<int64_t> ComputeHash(Ragged<int32_t> &src);
template
Array1<int32_t> ComputeHash(Ragged<int32_t> &src);
} // namespace k2
|
f441e00fceeab3ecc65e774cff89e9dfa956e01e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "BucketBuilderGPU.h"
__global__ void count_element_bucket_kernel(ImageData *imageData, int *cntEleInBucketDevice, int bucketGroupID, int imageIndex) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int bucketIndex = 0;
if (index < imageData->cntPoint) {
bucketIndex = imageData->deviceBucketIDSiftPoint[bucketGroupID * imageData->cntPoint + index];
atomicAdd(&(cntEleInBucketDevice[bucketIndex]), 1);
__syncthreads();
}
}
void count_element_bucket_GPU(ImageData *imageData, int *cntEleInBucketDevice, int siftCount, int bucketGroupID, int imageIndex) {
dim3 block(1024);
dim3 grid((siftCount + block.x - 1) / block.x);
//printf("sift point count: %d & sift = %d\n", grid.x, siftCount);
hipLaunchKernelGGL(( count_element_bucket_kernel) , dim3(grid), dim3(block) , 0, 0, imageData, cntEleInBucketDevice, bucketGroupID, imageIndex);
} | f441e00fceeab3ecc65e774cff89e9dfa956e01e.cu | #include <stdio.h>
#include "BucketBuilderGPU.h"
__global__ void count_element_bucket_kernel(ImageData *imageData, int *cntEleInBucketDevice, int bucketGroupID, int imageIndex) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int bucketIndex = 0;
if (index < imageData->cntPoint) {
bucketIndex = imageData->deviceBucketIDSiftPoint[bucketGroupID * imageData->cntPoint + index];
atomicAdd(&(cntEleInBucketDevice[bucketIndex]), 1);
__syncthreads();
}
}
void count_element_bucket_GPU(ImageData *imageData, int *cntEleInBucketDevice, int siftCount, int bucketGroupID, int imageIndex) {
dim3 block(1024);
dim3 grid((siftCount + block.x - 1) / block.x);
//printf("sift point count: %d & sift = %d\n", grid.x, siftCount);
count_element_bucket_kernel <<<grid, block >>> (imageData, cntEleInBucketDevice, bucketGroupID, imageIndex);
} |
71c6b89216e833f1167ef5c07de237885c9cbc82.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
//#include "caffe/layers/focal_loss_layer.hpp"
#include "focal_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void LogOpGPU(const int nthreads,
const Dtype* in, Dtype* out, const Dtype eps)
{
CUDA_KERNEL_LOOP(index, nthreads) {
out[index] = log(max(in[index], eps));
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::compute_intermediate_values_of_gpu() {
// compute the corresponding variables
const int count = prob_.count();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* ones_data = ones_.gpu_data();
Dtype* log_prob_data = log_prob_.mutable_gpu_data();
Dtype* power_prob_data = power_prob_.mutable_gpu_data();
/// log(p_t)
const int nthreads = prob_.count();
const Dtype eps = Dtype(FLT_MIN); // where FLT_MIN = 1.17549e-38, here u can change it
// more stable
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( LogOpGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, log_prob_data, eps);
/// caffe_gpu_log(count, prob_data, log_prob_data);
/// (1 - p_t) ^ gamma
caffe_gpu_sub(count, ones_data, prob_data, power_prob_data);
caffe_gpu_powx(count, power_prob_.gpu_data(), gamma_, power_prob_data);
caffe_gpu_scal(count, alpha_, power_prob_data);
}
template <typename Dtype>
__global__ void FocalLossForwardGPU(const int nthreads,
const Dtype* log_prob_data,
const Dtype* power_prob_data,
const Dtype* label,
Dtype* loss,
const int num,
const int dim,
const int spatial_dim,
const bool has_ignore_label_,
const int ignore_label_,
Dtype* counts)
{
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
int ind = n * dim + label_value * spatial_dim + s;
// loss[index] = -max(power_prob_data[ind] * log_prob_data[ind], Dtype(log(Dtype(FLT_MIN))));
loss[index] = -power_prob_data[ind] * log_prob_data[ind];
counts[index] = 1;
}
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
// The forward pass computes the softmax prob values.
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
// compute all needed values
compute_intermediate_values_of_gpu();
// const Dtype* prob_data = prob_.gpu_data();
const Dtype* log_prob_data = log_prob_.gpu_data();
const Dtype* power_prob_data = power_prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( FocalLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, log_prob_data, power_prob_data,
label, loss_data,outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void FocalLossBackwardGPU(const int nthreads,
const Dtype* top,
const Dtype* label,
const Dtype* prob_data,
const Dtype* log_prob_data,
const Dtype* power_prob_data,
Dtype* bottom_diff,
const int num,
const int dim,
const int spatial_dim,
const Dtype gamma,
const bool has_ignore_label_,
const int ignore_label_,
const Dtype eps,
Dtype* counts)
{
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
// the gradient from FL w.r.t p_t, here ignore the `sign`
int ind_i = n * dim + label_value * spatial_dim + s; // index of ground-truth label
Dtype grad = 0 - gamma * (power_prob_data[ind_i] / max(1 - prob_data[ind_i], eps))
* log_prob_data[ind_i] * prob_data[ind_i]
+ power_prob_data[ind_i];
// the gradient w.r.t input data x
for (int c = 0; c < channels; ++c) {
int ind_j = n * dim + c * spatial_dim + s;
if(c == label_value) {
// if i == j, (here i,j are refered for derivative of softmax)
bottom_diff[ind_j] = grad * (prob_data[ind_i] - 1);
} else {
// if i != j, (here i,j are refered for derivative of softmax)
bottom_diff[ind_j] = grad * prob_data[ind_j];
}
}
// count
counts[index] = 1;
}
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
const Dtype eps = 1e-10;
// intermidiate
const Dtype* log_prob_data = log_prob_.gpu_data();
const Dtype* power_prob_data = power_prob_.gpu_data();
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( FocalLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, prob_data, log_prob_data, power_prob_data,
bottom_diff, outer_num_, dim, inner_num_, gamma_, has_ignore_label_, ignore_label_, eps, counts);
// Only launch another CUDA kernel if we actually need the count of valid outputs.
Dtype valid_count = -1;
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
// Scale gradient
const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(FocalLossLayer);
} // namespace caffe
| 71c6b89216e833f1167ef5c07de237885c9cbc82.cu | #include <algorithm>
#include <cfloat>
#include <vector>
//#include "caffe/layers/focal_loss_layer.hpp"
#include "focal_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void LogOpGPU(const int nthreads,
const Dtype* in, Dtype* out, const Dtype eps)
{
CUDA_KERNEL_LOOP(index, nthreads) {
out[index] = log(max(in[index], eps));
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::compute_intermediate_values_of_gpu() {
// compute the corresponding variables
const int count = prob_.count();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* ones_data = ones_.gpu_data();
Dtype* log_prob_data = log_prob_.mutable_gpu_data();
Dtype* power_prob_data = power_prob_.mutable_gpu_data();
/// log(p_t)
const int nthreads = prob_.count();
const Dtype eps = Dtype(FLT_MIN); // where FLT_MIN = 1.17549e-38, here u can change it
// more stable
// NOLINT_NEXT_LINE(whitespace/operators)
LogOpGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, log_prob_data, eps);
/// caffe_gpu_log(count, prob_data, log_prob_data);
/// (1 - p_t) ^ gamma
caffe_gpu_sub(count, ones_data, prob_data, power_prob_data);
caffe_gpu_powx(count, power_prob_.gpu_data(), gamma_, power_prob_data);
caffe_gpu_scal(count, alpha_, power_prob_data);
}
template <typename Dtype>
__global__ void FocalLossForwardGPU(const int nthreads,
const Dtype* log_prob_data,
const Dtype* power_prob_data,
const Dtype* label,
Dtype* loss,
const int num,
const int dim,
const int spatial_dim,
const bool has_ignore_label_,
const int ignore_label_,
Dtype* counts)
{
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
int ind = n * dim + label_value * spatial_dim + s;
// loss[index] = -max(power_prob_data[ind] * log_prob_data[ind], Dtype(log(Dtype(FLT_MIN))));
loss[index] = -power_prob_data[ind] * log_prob_data[ind];
counts[index] = 1;
}
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
// The forward pass computes the softmax prob values.
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
// compute all needed values
compute_intermediate_values_of_gpu();
// const Dtype* prob_data = prob_.gpu_data();
const Dtype* log_prob_data = log_prob_.gpu_data();
const Dtype* power_prob_data = power_prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
FocalLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, log_prob_data, power_prob_data,
label, loss_data,outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void FocalLossBackwardGPU(const int nthreads,
const Dtype* top,
const Dtype* label,
const Dtype* prob_data,
const Dtype* log_prob_data,
const Dtype* power_prob_data,
Dtype* bottom_diff,
const int num,
const int dim,
const int spatial_dim,
const Dtype gamma,
const bool has_ignore_label_,
const int ignore_label_,
const Dtype eps,
Dtype* counts)
{
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
// the gradient from FL w.r.t p_t, here ignore the `sign`
int ind_i = n * dim + label_value * spatial_dim + s; // index of ground-truth label
Dtype grad = 0 - gamma * (power_prob_data[ind_i] / max(1 - prob_data[ind_i], eps))
* log_prob_data[ind_i] * prob_data[ind_i]
+ power_prob_data[ind_i];
// the gradient w.r.t input data x
for (int c = 0; c < channels; ++c) {
int ind_j = n * dim + c * spatial_dim + s;
if(c == label_value) {
// if i == j, (here i,j are refered for derivative of softmax)
bottom_diff[ind_j] = grad * (prob_data[ind_i] - 1);
} else {
// if i != j, (here i,j are refered for derivative of softmax)
bottom_diff[ind_j] = grad * prob_data[ind_j];
}
}
// count
counts[index] = 1;
}
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
const Dtype eps = 1e-10;
// intermidiate
const Dtype* log_prob_data = log_prob_.gpu_data();
const Dtype* power_prob_data = power_prob_.gpu_data();
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
FocalLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, prob_data, log_prob_data, power_prob_data,
bottom_diff, outer_num_, dim, inner_num_, gamma_, has_ignore_label_, ignore_label_, eps, counts);
// Only launch another CUDA kernel if we actually need the count of valid outputs.
Dtype valid_count = -1;
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
// Scale gradient
const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(FocalLossLayer);
} // namespace caffe
|
a4736e2e9b6df6cebb69214eba80a486735c023f.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hip/hip_cooperative_groups.h>
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <math.h>
#include <iostream>
using namespace cooperative_groups;
__device__ int reduce_sum(thread_group g, int* temp, int val) {
int lane = g.thread_rank();
for (int i = g.size() / 2; i > 0; i /= 2) {
temp[lane] = val;
g.sync();
if (lane < i) {
val += temp[lane + i];
}
g.sync();
}
return val;
}
__device__ int thread_sum(int* input, int n) {
int sum = 0;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n / 4; i += blockDim.x * gridDim.x) {
int4 in = ((int4*)input)[i];
sum += in.x + in.y + in.z + in.w;
}
return sum;
}
__global__ void sum_reduction(int* sum, int* input, int n) {
int my_sum = thread_sum(input, n);
extern __shared__ int temp[];
auto g = this_thread_block();
int block_sum = reduce_sum(g, temp, my_sum);
if (g.thread_rank() == 0) {
atomicAdd(sum, block_sum);
}
}
void initialize_vector(int* v, int n) {
for (int i = 0; i < n; i++) {
v[i] = 1;
}
}
int main() {
int n = 1 << 13;
size_t bytes = n * sizeof(int);
int* sum;
int* data;
hipMallocManaged(&sum, sizeof(int));
hipMallocManaged(&data, bytes);
initialize_vector(data, n);
int TB_SIZE = 256;
int GRID_SIZE = (n + TB_SIZE - 1) / TB_SIZE;
sum_reduction << <GRID_SIZE, TB_SIZE, n * sizeof(int) >> > (sum, data, n);
hipDeviceSynchronize();
assert(*sum == 8192);
printf("COMPLETED SUCCESSFULLY\n");
return 0;
} | a4736e2e9b6df6cebb69214eba80a486735c023f.cu | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cooperative_groups.h>
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <math.h>
#include <iostream>
using namespace cooperative_groups;
__device__ int reduce_sum(thread_group g, int* temp, int val) {
int lane = g.thread_rank();
for (int i = g.size() / 2; i > 0; i /= 2) {
temp[lane] = val;
g.sync();
if (lane < i) {
val += temp[lane + i];
}
g.sync();
}
return val;
}
__device__ int thread_sum(int* input, int n) {
int sum = 0;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n / 4; i += blockDim.x * gridDim.x) {
int4 in = ((int4*)input)[i];
sum += in.x + in.y + in.z + in.w;
}
return sum;
}
__global__ void sum_reduction(int* sum, int* input, int n) {
int my_sum = thread_sum(input, n);
extern __shared__ int temp[];
auto g = this_thread_block();
int block_sum = reduce_sum(g, temp, my_sum);
if (g.thread_rank() == 0) {
atomicAdd(sum, block_sum);
}
}
void initialize_vector(int* v, int n) {
for (int i = 0; i < n; i++) {
v[i] = 1;
}
}
int main() {
int n = 1 << 13;
size_t bytes = n * sizeof(int);
int* sum;
int* data;
cudaMallocManaged(&sum, sizeof(int));
cudaMallocManaged(&data, bytes);
initialize_vector(data, n);
int TB_SIZE = 256;
int GRID_SIZE = (n + TB_SIZE - 1) / TB_SIZE;
sum_reduction << <GRID_SIZE, TB_SIZE, n * sizeof(int) >> > (sum, data, n);
cudaDeviceSynchronize();
assert(*sum == 8192);
printf("COMPLETED SUCCESSFULLY\n");
return 0;
} |
4d6c99a3546b0014f8914a1c1bbaf1f87a46158f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// MP Reduction
// Given a list (lst) of length n
// Output its sum = lst[0] + lst[1] + ... + lst[n-1];
#include <wb.h>
#define BLOCK_SIZE 512 //@@ You can change this
// #define BLOCK_SIZE 5120 //@@ You can change this
#define SECTION_SIZE 2*BLOCK_SIZE
// Trace main::36 Device 1 name: Tesla C2050
// Trace main::37 Computational Capabilities: 2.0
// Trace main::38 Maximum global memory size: 2817982464
// Trace main::39 Maximum constant memory size: 65536
// Trace main::40 Maximum shared memory size per block: 49152
// Trace main::43 Maximum block dimensions: 1024 x 1024 x 64
// Trace main::46 Maximum grid dimensions: 65535 x 65535 x 65535
// Trace main::47 Warp size: 32
// 0 256
// 1 512
// 2 600
// 3 1024
// 4 9000
// 5 12670
#define wbCheck(stmt) do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while(0)
__global__ void total(float * input, float * output, int len) {
//@@ Load a segment of the input vector into shared memory
//@@ Traverse the reduction tree
//@@ Write the computed sum of the block to the output vector at the
//@@ correct index
__shared__ float partialSum[2*BLOCK_SIZE];
unsigned int t = threadIdx.x;
unsigned int start = 2*blockIdx.x*blockDim.x;
partialSum[t] = start + t >= len ? 0.0 : input[start + t];
partialSum[blockDim.x+t] = start + blockDim.x+t >= len ? 0.0 : input[start + blockDim.x+t];
//printf("len %d start %d start + blockDim.x+t %d partialSum[t] %g partialSum[blockDim.x+t] %g\n", len, start, start + blockDim.x+t, partialSum[t], partialSum[blockDim.x+t]);
// XY[2*BLOCK_SIZE] is in shared memory
// for (unsigned int stride = 1; stride <= blockDim.x; stride *= 2)
// {
// __syncthreads();
// if (t % stride == 0)
// partialSum[2*t] += partialSum[2*t+stride];
//
}
__syncthreads();
for (unsigned int stride = blockDim.x; stride > 0; stride /= 2)
{
__syncthreads();
if (t < stride)
partialSum[t] += partialSum[t+stride];
}
// printf("[%i, %i, %i]: %f + %f = %f\n", blockIdx.x, tx, stride, f1, f2, partialSum[tx]);
__syncthreads();
if (t == 0)
{
output[0] = partialSum[0];
}
}
int main(int argc, char ** argv) {
int ii;
wbArg_t args;
float * hostInput; // The input 1D list
float * hostOutput; // The output list
float * deviceInput;
float * deviceOutput;
int numInputElements; // number of elements in the input list
int numOutputElements; // number of elements in the output list
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (float *) wbImport(wbArg_getInputFile(args, 0), &numInputElements);
numOutputElements = numInputElements / (BLOCK_SIZE<<1);
if (numInputElements % (BLOCK_SIZE<<1)) {
numOutputElements++;
}
hostOutput = (float*) malloc(numOutputElements * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The number of input elements in the input is ", numInputElements);
wbLog(TRACE, "The number of output elements in the input is ", numOutputElements);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
hipMalloc((void **) &deviceInput, numInputElements * sizeof(float));
hipMalloc((void **) &deviceOutput, numOutputElements * sizeof(float));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
hipMemcpy(deviceInput, hostInput, numInputElements * sizeof(float), hipMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 DimGrid((numInputElements + BLOCK_SIZE - 1)/BLOCK_SIZE, 1, 1);
wbLog(TRACE, "GridSize ", (numInputElements + BLOCK_SIZE - 1)/BLOCK_SIZE);
wbLog(TRACE, "numOutputElements ", numOutputElements);
// dim3 DimGrid(ceil(numInputElements/BLOCK_SIZE), 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
for (int i = 0; i < numOutputElements; ++i)
{
hipLaunchKernelGGL(( total), dim3(1),dim3(DimBlock), 0, 0,
// deviceInput,
&deviceInput[BLOCK_SIZE*i*2],
// deviceOutput,
&deviceOutput[i],
numInputElements - BLOCK_SIZE*(i+1)*2 > 0 ? BLOCK_SIZE*2 : numInputElements - BLOCK_SIZE*(i)*2
);
//wbLog(TRACE, "len ", numInputElements - BLOCK_SIZE*(i+1)*2);
//float temp;
//hipMemcpy(&temp, &deviceOutput[i], 1 * sizeof(float), hipMemcpyDeviceToHost);
//wbLog(TRACE, "output ", temp);
// hipDeviceSynchronize();
hipDeviceSynchronize();
}
// total<<<DimGrid,DimBlock>>>(deviceInput, deviceOutput, numInputElements);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
hipMemcpy(hostOutput, deviceOutput, numOutputElements * sizeof(float), hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
/********************************************************************
* Reduce output vector on the host
* NOTE: One could also perform the reduction of the output vector
* recursively and support any size input. For simplicity, we do not
* require that for this lab.
********************************************************************/
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
hipFree(deviceInput);
hipFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, 1);
free(hostInput);
free(hostOutput);
return 0;
}
| 4d6c99a3546b0014f8914a1c1bbaf1f87a46158f.cu | // MP Reduction
// Given a list (lst) of length n
// Output its sum = lst[0] + lst[1] + ... + lst[n-1];
#include <wb.h>
#define BLOCK_SIZE 512 //@@ You can change this
// #define BLOCK_SIZE 5120 //@@ You can change this
#define SECTION_SIZE 2*BLOCK_SIZE
// Trace main::36 Device 1 name: Tesla C2050
// Trace main::37 Computational Capabilities: 2.0
// Trace main::38 Maximum global memory size: 2817982464
// Trace main::39 Maximum constant memory size: 65536
// Trace main::40 Maximum shared memory size per block: 49152
// Trace main::43 Maximum block dimensions: 1024 x 1024 x 64
// Trace main::46 Maximum grid dimensions: 65535 x 65535 x 65535
// Trace main::47 Warp size: 32
// 0 256
// 1 512
// 2 600
// 3 1024
// 4 9000
// 5 12670
#define wbCheck(stmt) do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while(0)
__global__ void total(float * input, float * output, int len) {
//@@ Load a segment of the input vector into shared memory
//@@ Traverse the reduction tree
//@@ Write the computed sum of the block to the output vector at the
//@@ correct index
__shared__ float partialSum[2*BLOCK_SIZE];
unsigned int t = threadIdx.x;
unsigned int start = 2*blockIdx.x*blockDim.x;
partialSum[t] = start + t >= len ? 0.0 : input[start + t];
partialSum[blockDim.x+t] = start + blockDim.x+t >= len ? 0.0 : input[start + blockDim.x+t];
//printf("len %d start %d start + blockDim.x+t %d partialSum[t] %g partialSum[blockDim.x+t] %g\n", len, start, start + blockDim.x+t, partialSum[t], partialSum[blockDim.x+t]);
// XY[2*BLOCK_SIZE] is in shared memory
// for (unsigned int stride = 1; stride <= blockDim.x; stride *= 2)
// {
// __syncthreads();
// if (t % stride == 0)
// partialSum[2*t] += partialSum[2*t+stride];
//
}
__syncthreads();
for (unsigned int stride = blockDim.x; stride > 0; stride /= 2)
{
__syncthreads();
if (t < stride)
partialSum[t] += partialSum[t+stride];
}
// printf("[%i, %i, %i]: %f + %f = %f\n", blockIdx.x, tx, stride, f1, f2, partialSum[tx]);
__syncthreads();
if (t == 0)
{
output[0] = partialSum[0];
}
}
int main(int argc, char ** argv) {
int ii;
wbArg_t args;
float * hostInput; // The input 1D list
float * hostOutput; // The output list
float * deviceInput;
float * deviceOutput;
int numInputElements; // number of elements in the input list
int numOutputElements; // number of elements in the output list
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (float *) wbImport(wbArg_getInputFile(args, 0), &numInputElements);
numOutputElements = numInputElements / (BLOCK_SIZE<<1);
if (numInputElements % (BLOCK_SIZE<<1)) {
numOutputElements++;
}
hostOutput = (float*) malloc(numOutputElements * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The number of input elements in the input is ", numInputElements);
wbLog(TRACE, "The number of output elements in the input is ", numOutputElements);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
cudaMalloc((void **) &deviceInput, numInputElements * sizeof(float));
cudaMalloc((void **) &deviceOutput, numOutputElements * sizeof(float));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
cudaMemcpy(deviceInput, hostInput, numInputElements * sizeof(float), cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 DimGrid((numInputElements + BLOCK_SIZE - 1)/BLOCK_SIZE, 1, 1);
wbLog(TRACE, "GridSize ", (numInputElements + BLOCK_SIZE - 1)/BLOCK_SIZE);
wbLog(TRACE, "numOutputElements ", numOutputElements);
// dim3 DimGrid(ceil(numInputElements/BLOCK_SIZE), 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
for (int i = 0; i < numOutputElements; ++i)
{
total<<<1,DimBlock>>>(
// deviceInput,
&deviceInput[BLOCK_SIZE*i*2],
// deviceOutput,
&deviceOutput[i],
numInputElements - BLOCK_SIZE*(i+1)*2 > 0 ? BLOCK_SIZE*2 : numInputElements - BLOCK_SIZE*(i)*2
);
//wbLog(TRACE, "len ", numInputElements - BLOCK_SIZE*(i+1)*2);
//float temp;
//cudaMemcpy(&temp, &deviceOutput[i], 1 * sizeof(float), cudaMemcpyDeviceToHost);
//wbLog(TRACE, "output ", temp);
// cudaDeviceSynchronize();
cudaThreadSynchronize();
}
// total<<<DimGrid,DimBlock>>>(deviceInput, deviceOutput, numInputElements);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostOutput, deviceOutput, numOutputElements * sizeof(float), cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
/********************************************************************
* Reduce output vector on the host
* NOTE: One could also perform the reduction of the output vector
* recursively and support any size input. For simplicity, we do not
* require that for this lab.
********************************************************************/
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
cudaFree(deviceInput);
cudaFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, 1);
free(hostInput);
free(hostOutput);
return 0;
}
|
0beebe8967367f4166061552fe2fbb761bc19441.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "BackwardReLU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *Z = NULL;
hipMalloc(&Z, XSIZE*YSIZE);
float *dA = NULL;
hipMalloc(&dA, XSIZE*YSIZE);
int nRowsdZ = 1;
int nColsdZ = 1;
float *dZ = NULL;
hipMalloc(&dZ, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
BackwardReLU), dim3(gridBlock),dim3(threadBlock), 0, 0, Z,dA,nRowsdZ,nColsdZ,dZ);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
BackwardReLU), dim3(gridBlock),dim3(threadBlock), 0, 0, Z,dA,nRowsdZ,nColsdZ,dZ);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
BackwardReLU), dim3(gridBlock),dim3(threadBlock), 0, 0, Z,dA,nRowsdZ,nColsdZ,dZ);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0beebe8967367f4166061552fe2fbb761bc19441.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "BackwardReLU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *Z = NULL;
cudaMalloc(&Z, XSIZE*YSIZE);
float *dA = NULL;
cudaMalloc(&dA, XSIZE*YSIZE);
int nRowsdZ = 1;
int nColsdZ = 1;
float *dZ = NULL;
cudaMalloc(&dZ, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
BackwardReLU<<<gridBlock,threadBlock>>>(Z,dA,nRowsdZ,nColsdZ,dZ);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
BackwardReLU<<<gridBlock,threadBlock>>>(Z,dA,nRowsdZ,nColsdZ,dZ);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
BackwardReLU<<<gridBlock,threadBlock>>>(Z,dA,nRowsdZ,nColsdZ,dZ);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
cda3379d26d9c379e0a62611a7c5ecec9da50413.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TILE_SIZE 32
#define WARPS_PER_GROUP (THREAD_BLOCK_SIZE/TILE_SIZE)
typedef struct {
real3 pos, force;
float radius, epsilon, padding;
} AtomData;
inline __device__ void
loadAtomData(AtomData &data, int atom, const real4 *__restrict__ posq, const float2 *__restrict__ radiusEpsilon) {
real4 atomPosq = posq[atom];
data.pos = make_real3(atomPosq.x, atomPosq.y, atomPosq.z);
float2 temp = radiusEpsilon[atom];
data.radius = temp.x;
data.epsilon = temp.y;
}
__device__ void
initParticleParameters(float radius, float epsilon, real &rmixo, real &rmixh, real &emixo, real &emixh) {
real sqrtEps = SQRT(epsilon);
real denominator = SQRT(EPSO) + sqrtEps;
emixo = 4 * EPSO * epsilon / (denominator * denominator);
denominator = SQRT(EPSH) + sqrtEps;
emixh = 4 * EPSH * epsilon / (denominator * denominator);
real radius2 = radius * radius;
real rmino2 = RMINO * RMINO;
rmixo = 2 * (rmino2 * RMINO + radius2 * radius) / (rmino2 + radius2);
real rminh2 = RMINH * RMINH;
rmixh = 2 * (rminh2 * RMINH + radius2 * radius) / (rminh2 + radius2);
}
__device__ real integralBeforeRMin(real eps, real r, real r2, real sk2,
real lik2, real lik3, real lik4,
real uik2, real uik3, real uik4) {
return -eps * (4 * M_PI / (48 * r) *
(3 * (lik4 - uik4) - 8 * r * (lik3 - uik3) + 6 * (r2 - sk2) * (lik2 - uik2)));
}
__device__ real integralBeforeRminDerivative(real ri, real eps, real rmin, real r, real r2, real r3, real sk, real sk2,
real lik, real lik2, real lik3, real uik, real uik2, real uik3) {
real dl;
if (ri > r - sk) {
dl = (-lik2 + 2 * r2 + 2 * sk2) * lik2;
} else {
dl = (-lik3 + 4 * lik2 * r - 6 * lik * r2 + 2 * lik * sk2 + 4 * r3 - 4 * r * sk2) * lik;
}
real du;
if (r + sk > rmin) {
du = -(-uik2 + 2 * r2 + 2 * sk2) * uik2;
} else {
du = -(-uik3 + 4 * uik2 * r - 6 * uik * r2 + 2 * uik * sk2 + 4 * r3 - 4 * r * sk2) * uik;
}
return -eps * M_PI * (dl + du) / (4 * r2);
}
__device__ real integratlAfterRmin(real eps, real rmin7, real r, real r2, real sk2,
real lik, real lik2, real lik3, real lik4, real lik5, real lik10,
real lik11, real lik12, real uik, real uik2, real uik3, real uik4,
real uik5, real uik10, real uik11, real uik12) {
real er7 = eps * rmin7;
real term = 4 * M_PI / (120 * r * lik5 * uik5)
* (15 * uik * lik * r * (uik4 - lik4)
- 10 * uik2 * lik2 * (uik3 - lik3)
+ 6 * (sk2 - r2) * (uik5 - lik5));
real term2 = 4 * M_PI / (2640 * r * lik12 * uik12)
* (120 * uik * lik * r * (uik11 - lik11)
- 66 * uik2 * lik2 * (uik10 - lik10)
+ 55 * (sk2 - r2) * (uik12 - lik12));
real idisp = -2 * er7 * term;
real irep = er7 * rmin7 * term2;
return irep + idisp;
}
__device__ real integratlAfterRminDerivative(real ri, real eps, real rmin, real rmin7, real rmax,
real r, real r2, real r3, real sk, real sk2, real lik,
real lik2, real lik3, real lik5, real lik6, real lik12,
real lik13, real uik, real uik2, real uik3, real uik6,
real uik13) {
real er7 = eps * rmin7;
real lowerTerm = lik2 * r + r3 - r * sk2;
real upperTerm = uik2 * r + r3 - r * sk2;
real dl;
if (ri > r - sk || rmax < rmin) {
dl = -(-5 * lik2 + 3 * r2 + 3 * sk2) / lik5;
} else {
dl = (5 * lik3 - 33 * lik * r2 - 3 * lik * sk2 + 15 * lowerTerm) / lik6;
}
real du = -(5 * uik3 - 33 * uik * r2 - 3 * uik * sk2 + 15 * upperTerm) / uik6;
real de = -2 * M_PI * er7 * (dl + du) / (15 * r2);
if (ri > r - sk || rmax < rmin) {
dl = -(-6 * lik2 + 5 * r2 + 5 * sk2) / lik12;
} else {
dl = (6 * lik3 - 125 * lik * r2 - 5 * lik * sk2 + 60 * lowerTerm) / lik13;
}
du = -(6 * uik3 - 125 * uik * r2 - 5 * uik * sk2 + 60 * upperTerm) / uik13;
de += M_PI * er7 * rmin7 * (dl + du) / (60 * r2);
return de;
}
__device__ real interact(real factor, real ri, real sk, real rmix, real emix,
real r, real r2, real r3, real3 &force) {
real sum = 0;
// Nothing to do if the integral begins beyond r + sk (i.e. atom k does not exclude solvent)
if (ri < r + sk) {
// Zero out the derivative contribution of atom k.
real de = 0;
real sk2 = sk * sk;
// Compute the maximum of 1) the beginning of the integral and 2) closest edge of atom K.
real iStart = ri > r - sk ? ri : r - sk;
// Use this as the lower limit for integrating the constant eps value below Rmin.
real lik = iStart;
// Interaction with water from lik to Rmin; nothing to do if the lower limit is greater than Rmin.
if (lik < rmix) {
real lik2 = lik * lik;
real lik3 = lik2 * lik;
real lik4 = lik3 * lik;
// Upper limit is the minimum of Rmin and the farthest edge of atom K.
real uik = r + sk < rmix ? r + sk : rmix;
real uik2 = uik * uik;
real uik3 = uik2 * uik;
real uik4 = uik3 * uik;
sum = integralBeforeRMin(emix, r, r2, sk2, lik2, lik3, lik4, uik2, uik3, uik4);
de = integralBeforeRminDerivative(ri, emix, rmix, r, r2, r3, sk, sk2, lik, lik2, lik3, uik, uik2, uik3);
}
// Upper limit the variable part of Uwca always the farthest edge of atom K.
real uik = r + sk;
// Interaction with water beyond Rmin, from lik to uik = r + sk.
if (uik > rmix) {
// Start the integral at the max of 1) iStart and 2) Rmin.
lik = iStart > rmix ? iStart : rmix;
real lik2 = lik * lik;
real lik3 = lik2 * lik;
real lik4 = lik3 * lik;
real lik5 = lik4 * lik;
real lik6 = lik5 * lik;
real lik10 = lik5 * lik5;
real lik11 = lik10 * lik;
real lik12 = lik11 * lik;
real uik2 = uik * uik;
real uik3 = uik2 * uik;
real uik4 = uik3 * uik;
real uik5 = uik4 * uik;
real uik10 = uik5 * uik5;
real uik11 = uik10 * uik;
real uik12 = uik11 * uik;
real rmix3 = rmix * rmix * rmix;
real rmix7 = rmix3 * rmix3 * rmix;
sum += integratlAfterRmin(emix, rmix7, r, r2, sk2, lik, lik2, lik3, lik4, lik5, lik10, lik11, lik12, uik,
uik2, uik3, uik4, uik5, uik10, uik11, uik12);
real lik13 = lik12 * lik;
real uik6 = uik5 * uik;
real uik13 = uik12 * uik;
de += integratlAfterRminDerivative(ri, emix, rmix, rmix7, iStart, r, r2, r3, sk, sk2, lik, lik2, lik3,
lik5, lik6, lik12, lik13, uik, uik2, uik3, uik6, uik13);
}
// Increment the individual dispersion gradient components.
de *= factor / r;
force.x += de;
force.y += de;
force.z += de;
}
return factor * sum;
}
__device__ void
computeOneInteraction(AtomData &atom1, AtomData &atom2, real rmixo, real rmixh, real emixo, real emixh, real3 &force,
real &energy) {
// get deltaR and r between 2 atoms
force = atom1.pos - atom2.pos;
real r2 = dot(force, force);
if (r2 <= 0) {
force = make_real3(0);
energy = 0;
return;
}
real xr = force.x;
real yr = force.y;
real zr = force.z;
real r = sqrt(r2);
real r3 = r2 * r;
real sk = atom2.radius * SHCTD;
// Start of integration of dispersion for atom i with water oxygen.
real riO = rmixo * 0.5f + DISPOFF;
real nO = 1.0f;
// Start of integration of dispersion for atom i with water hydrogen.
real riH = rmixh * 0.5f + DISPOFF;
real nH = 2.0f;
force = make_real3(0);
energy = interact(nO, riO, sk, rmixo, emixo, r, r2, r3, force) +
interact(nH, riH, sk, rmixh, emixh, r, r2, r3, force);
force.x *= AWATER * xr;
force.y *= AWATER * yr;
force.z *= AWATER * zr;
}
/**
* Compute WCA interaction.
*/
extern "C" __global__ void
computeWCAForce(unsigned long long *__restrict__ forceBuffers, mixed *__restrict__ energyBuffer,
const real4 *__restrict__ posq, unsigned int startTileIndex, unsigned int numTileIndices,
const float2 *__restrict__ radiusEpsilon) {
unsigned int totalWarps = (blockDim.x * gridDim.x) / TILE_SIZE;
unsigned int warp = (blockIdx.x * blockDim.x + threadIdx.x) / TILE_SIZE;
const unsigned int numTiles = numTileIndices;
unsigned int pos = (unsigned int) (startTileIndex + warp * (long long) numTiles / totalWarps);
unsigned int end = (unsigned int) (startTileIndex + (warp + 1) * (long long) numTiles / totalWarps);
mixed energy = 0;
__shared__ AtomData localData[THREAD_BLOCK_SIZE];
do {
// Extract the coordinates of this tile
const unsigned int tgx = threadIdx.x & (TILE_SIZE - 1);
const unsigned int tbx = threadIdx.x - tgx;
const unsigned int localGroupIndex = threadIdx.x / TILE_SIZE;
int x, y;
AtomData data;
if (pos < end) {
y = (int) floor(NUM_BLOCKS + 0.5f - SQRT((NUM_BLOCKS + 0.5f) * (NUM_BLOCKS + 0.5f) - 2 * pos));
x = (pos - y * NUM_BLOCKS + y * (y + 1) / 2);
if (x < y || x >= NUM_BLOCKS) { // Occasionally happens due to roundoff error.
y += (x < y ? -1 : 1);
x = (pos - y * NUM_BLOCKS + y * (y + 1) / 2);
}
unsigned int atom1 = x * TILE_SIZE + tgx;
loadAtomData(data, atom1, posq, radiusEpsilon);
loadAtomData(localData[threadIdx.x], y * TILE_SIZE + tgx, posq, radiusEpsilon);
real emixo, emixh, rmixo, rmixh;
initParticleParameters(data.radius, data.epsilon, rmixo, rmixh, emixo, emixh);
data.force = make_real3(0);
localData[threadIdx.x].force = make_real3(0);
// Compute forces.
unsigned int tj = tgx;
for (unsigned int j = 0; j < TILE_SIZE; j++) {
int atom2 = y * TILE_SIZE + tj;
if (atom1 != atom2 && atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) {
real3 tempForce;
real tempEnergy;
computeOneInteraction(data, localData[tbx + tj], rmixo, rmixh, emixo, emixh, tempForce, tempEnergy);
data.force += tempForce;
localData[tbx + tj].force -= tempForce;
energy += (x == y ? 0.5f * tempEnergy : tempEnergy);
real emjxo, emjxh, rmjxo, rmjxh;
initParticleParameters(localData[tbx + tj].radius, localData[tbx + tj].epsilon, rmjxo, rmjxh, emjxo,
emjxh);
computeOneInteraction(localData[tbx + tj], data, rmjxo, rmjxh, emjxo, emjxh, tempForce, tempEnergy);
data.force -= tempForce;
localData[tbx + tj].force += tempForce;
energy += (x == y ? 0.5f * tempEnergy : tempEnergy);
}
tj = (tj + 1) & (TILE_SIZE - 1);
}
unsigned int offset = x * TILE_SIZE + tgx;
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (data.force.x * 0x100000000)));
atomicAdd(&forceBuffers[offset + PADDED_NUM_ATOMS],
static_cast<unsigned long long>((long long) (data.force.y * 0x100000000)));
atomicAdd(&forceBuffers[offset + 2 * PADDED_NUM_ATOMS],
static_cast<unsigned long long>((long long) (data.force.z * 0x100000000)));
if (x != y) {
offset = y * TILE_SIZE + tgx;
atomicAdd(&forceBuffers[offset],
static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.x * 0x100000000)));
atomicAdd(&forceBuffers[offset + PADDED_NUM_ATOMS],
static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.y * 0x100000000)));
atomicAdd(&forceBuffers[offset + 2 * PADDED_NUM_ATOMS],
static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.z * 0x100000000)));
}
}
pos++;
} while (pos < end);
energyBuffer[blockIdx.x * blockDim.x + threadIdx.x] -= AWATER * energy;
}
| cda3379d26d9c379e0a62611a7c5ecec9da50413.cu | #define TILE_SIZE 32
#define WARPS_PER_GROUP (THREAD_BLOCK_SIZE/TILE_SIZE)
typedef struct {
real3 pos, force;
float radius, epsilon, padding;
} AtomData;
inline __device__ void
loadAtomData(AtomData &data, int atom, const real4 *__restrict__ posq, const float2 *__restrict__ radiusEpsilon) {
real4 atomPosq = posq[atom];
data.pos = make_real3(atomPosq.x, atomPosq.y, atomPosq.z);
float2 temp = radiusEpsilon[atom];
data.radius = temp.x;
data.epsilon = temp.y;
}
__device__ void
initParticleParameters(float radius, float epsilon, real &rmixo, real &rmixh, real &emixo, real &emixh) {
real sqrtEps = SQRT(epsilon);
real denominator = SQRT(EPSO) + sqrtEps;
emixo = 4 * EPSO * epsilon / (denominator * denominator);
denominator = SQRT(EPSH) + sqrtEps;
emixh = 4 * EPSH * epsilon / (denominator * denominator);
real radius2 = radius * radius;
real rmino2 = RMINO * RMINO;
rmixo = 2 * (rmino2 * RMINO + radius2 * radius) / (rmino2 + radius2);
real rminh2 = RMINH * RMINH;
rmixh = 2 * (rminh2 * RMINH + radius2 * radius) / (rminh2 + radius2);
}
__device__ real integralBeforeRMin(real eps, real r, real r2, real sk2,
real lik2, real lik3, real lik4,
real uik2, real uik3, real uik4) {
return -eps * (4 * M_PI / (48 * r) *
(3 * (lik4 - uik4) - 8 * r * (lik3 - uik3) + 6 * (r2 - sk2) * (lik2 - uik2)));
}
__device__ real integralBeforeRminDerivative(real ri, real eps, real rmin, real r, real r2, real r3, real sk, real sk2,
real lik, real lik2, real lik3, real uik, real uik2, real uik3) {
real dl;
if (ri > r - sk) {
dl = (-lik2 + 2 * r2 + 2 * sk2) * lik2;
} else {
dl = (-lik3 + 4 * lik2 * r - 6 * lik * r2 + 2 * lik * sk2 + 4 * r3 - 4 * r * sk2) * lik;
}
real du;
if (r + sk > rmin) {
du = -(-uik2 + 2 * r2 + 2 * sk2) * uik2;
} else {
du = -(-uik3 + 4 * uik2 * r - 6 * uik * r2 + 2 * uik * sk2 + 4 * r3 - 4 * r * sk2) * uik;
}
return -eps * M_PI * (dl + du) / (4 * r2);
}
__device__ real integratlAfterRmin(real eps, real rmin7, real r, real r2, real sk2,
real lik, real lik2, real lik3, real lik4, real lik5, real lik10,
real lik11, real lik12, real uik, real uik2, real uik3, real uik4,
real uik5, real uik10, real uik11, real uik12) {
real er7 = eps * rmin7;
real term = 4 * M_PI / (120 * r * lik5 * uik5)
* (15 * uik * lik * r * (uik4 - lik4)
- 10 * uik2 * lik2 * (uik3 - lik3)
+ 6 * (sk2 - r2) * (uik5 - lik5));
real term2 = 4 * M_PI / (2640 * r * lik12 * uik12)
* (120 * uik * lik * r * (uik11 - lik11)
- 66 * uik2 * lik2 * (uik10 - lik10)
+ 55 * (sk2 - r2) * (uik12 - lik12));
real idisp = -2 * er7 * term;
real irep = er7 * rmin7 * term2;
return irep + idisp;
}
__device__ real integratlAfterRminDerivative(real ri, real eps, real rmin, real rmin7, real rmax,
real r, real r2, real r3, real sk, real sk2, real lik,
real lik2, real lik3, real lik5, real lik6, real lik12,
real lik13, real uik, real uik2, real uik3, real uik6,
real uik13) {
real er7 = eps * rmin7;
real lowerTerm = lik2 * r + r3 - r * sk2;
real upperTerm = uik2 * r + r3 - r * sk2;
real dl;
if (ri > r - sk || rmax < rmin) {
dl = -(-5 * lik2 + 3 * r2 + 3 * sk2) / lik5;
} else {
dl = (5 * lik3 - 33 * lik * r2 - 3 * lik * sk2 + 15 * lowerTerm) / lik6;
}
real du = -(5 * uik3 - 33 * uik * r2 - 3 * uik * sk2 + 15 * upperTerm) / uik6;
real de = -2 * M_PI * er7 * (dl + du) / (15 * r2);
if (ri > r - sk || rmax < rmin) {
dl = -(-6 * lik2 + 5 * r2 + 5 * sk2) / lik12;
} else {
dl = (6 * lik3 - 125 * lik * r2 - 5 * lik * sk2 + 60 * lowerTerm) / lik13;
}
du = -(6 * uik3 - 125 * uik * r2 - 5 * uik * sk2 + 60 * upperTerm) / uik13;
de += M_PI * er7 * rmin7 * (dl + du) / (60 * r2);
return de;
}
__device__ real interact(real factor, real ri, real sk, real rmix, real emix,
real r, real r2, real r3, real3 &force) {
real sum = 0;
// Nothing to do if the integral begins beyond r + sk (i.e. atom k does not exclude solvent)
if (ri < r + sk) {
// Zero out the derivative contribution of atom k.
real de = 0;
real sk2 = sk * sk;
// Compute the maximum of 1) the beginning of the integral and 2) closest edge of atom K.
real iStart = ri > r - sk ? ri : r - sk;
// Use this as the lower limit for integrating the constant eps value below Rmin.
real lik = iStart;
// Interaction with water from lik to Rmin; nothing to do if the lower limit is greater than Rmin.
if (lik < rmix) {
real lik2 = lik * lik;
real lik3 = lik2 * lik;
real lik4 = lik3 * lik;
// Upper limit is the minimum of Rmin and the farthest edge of atom K.
real uik = r + sk < rmix ? r + sk : rmix;
real uik2 = uik * uik;
real uik3 = uik2 * uik;
real uik4 = uik3 * uik;
sum = integralBeforeRMin(emix, r, r2, sk2, lik2, lik3, lik4, uik2, uik3, uik4);
de = integralBeforeRminDerivative(ri, emix, rmix, r, r2, r3, sk, sk2, lik, lik2, lik3, uik, uik2, uik3);
}
// Upper limit the variable part of Uwca always the farthest edge of atom K.
real uik = r + sk;
// Interaction with water beyond Rmin, from lik to uik = r + sk.
if (uik > rmix) {
// Start the integral at the max of 1) iStart and 2) Rmin.
lik = iStart > rmix ? iStart : rmix;
real lik2 = lik * lik;
real lik3 = lik2 * lik;
real lik4 = lik3 * lik;
real lik5 = lik4 * lik;
real lik6 = lik5 * lik;
real lik10 = lik5 * lik5;
real lik11 = lik10 * lik;
real lik12 = lik11 * lik;
real uik2 = uik * uik;
real uik3 = uik2 * uik;
real uik4 = uik3 * uik;
real uik5 = uik4 * uik;
real uik10 = uik5 * uik5;
real uik11 = uik10 * uik;
real uik12 = uik11 * uik;
real rmix3 = rmix * rmix * rmix;
real rmix7 = rmix3 * rmix3 * rmix;
sum += integratlAfterRmin(emix, rmix7, r, r2, sk2, lik, lik2, lik3, lik4, lik5, lik10, lik11, lik12, uik,
uik2, uik3, uik4, uik5, uik10, uik11, uik12);
real lik13 = lik12 * lik;
real uik6 = uik5 * uik;
real uik13 = uik12 * uik;
de += integratlAfterRminDerivative(ri, emix, rmix, rmix7, iStart, r, r2, r3, sk, sk2, lik, lik2, lik3,
lik5, lik6, lik12, lik13, uik, uik2, uik3, uik6, uik13);
}
// Increment the individual dispersion gradient components.
de *= factor / r;
force.x += de;
force.y += de;
force.z += de;
}
return factor * sum;
}
__device__ void
computeOneInteraction(AtomData &atom1, AtomData &atom2, real rmixo, real rmixh, real emixo, real emixh, real3 &force,
real &energy) {
// get deltaR and r between 2 atoms
force = atom1.pos - atom2.pos;
real r2 = dot(force, force);
if (r2 <= 0) {
force = make_real3(0);
energy = 0;
return;
}
real xr = force.x;
real yr = force.y;
real zr = force.z;
real r = sqrt(r2);
real r3 = r2 * r;
real sk = atom2.radius * SHCTD;
// Start of integration of dispersion for atom i with water oxygen.
real riO = rmixo * 0.5f + DISPOFF;
real nO = 1.0f;
// Start of integration of dispersion for atom i with water hydrogen.
real riH = rmixh * 0.5f + DISPOFF;
real nH = 2.0f;
force = make_real3(0);
energy = interact(nO, riO, sk, rmixo, emixo, r, r2, r3, force) +
interact(nH, riH, sk, rmixh, emixh, r, r2, r3, force);
force.x *= AWATER * xr;
force.y *= AWATER * yr;
force.z *= AWATER * zr;
}
/**
* Compute WCA interaction.
*/
extern "C" __global__ void
computeWCAForce(unsigned long long *__restrict__ forceBuffers, mixed *__restrict__ energyBuffer,
const real4 *__restrict__ posq, unsigned int startTileIndex, unsigned int numTileIndices,
const float2 *__restrict__ radiusEpsilon) {
unsigned int totalWarps = (blockDim.x * gridDim.x) / TILE_SIZE;
unsigned int warp = (blockIdx.x * blockDim.x + threadIdx.x) / TILE_SIZE;
const unsigned int numTiles = numTileIndices;
unsigned int pos = (unsigned int) (startTileIndex + warp * (long long) numTiles / totalWarps);
unsigned int end = (unsigned int) (startTileIndex + (warp + 1) * (long long) numTiles / totalWarps);
mixed energy = 0;
__shared__ AtomData localData[THREAD_BLOCK_SIZE];
do {
// Extract the coordinates of this tile
const unsigned int tgx = threadIdx.x & (TILE_SIZE - 1);
const unsigned int tbx = threadIdx.x - tgx;
const unsigned int localGroupIndex = threadIdx.x / TILE_SIZE;
int x, y;
AtomData data;
if (pos < end) {
y = (int) floor(NUM_BLOCKS + 0.5f - SQRT((NUM_BLOCKS + 0.5f) * (NUM_BLOCKS + 0.5f) - 2 * pos));
x = (pos - y * NUM_BLOCKS + y * (y + 1) / 2);
if (x < y || x >= NUM_BLOCKS) { // Occasionally happens due to roundoff error.
y += (x < y ? -1 : 1);
x = (pos - y * NUM_BLOCKS + y * (y + 1) / 2);
}
unsigned int atom1 = x * TILE_SIZE + tgx;
loadAtomData(data, atom1, posq, radiusEpsilon);
loadAtomData(localData[threadIdx.x], y * TILE_SIZE + tgx, posq, radiusEpsilon);
real emixo, emixh, rmixo, rmixh;
initParticleParameters(data.radius, data.epsilon, rmixo, rmixh, emixo, emixh);
data.force = make_real3(0);
localData[threadIdx.x].force = make_real3(0);
// Compute forces.
unsigned int tj = tgx;
for (unsigned int j = 0; j < TILE_SIZE; j++) {
int atom2 = y * TILE_SIZE + tj;
if (atom1 != atom2 && atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) {
real3 tempForce;
real tempEnergy;
computeOneInteraction(data, localData[tbx + tj], rmixo, rmixh, emixo, emixh, tempForce, tempEnergy);
data.force += tempForce;
localData[tbx + tj].force -= tempForce;
energy += (x == y ? 0.5f * tempEnergy : tempEnergy);
real emjxo, emjxh, rmjxo, rmjxh;
initParticleParameters(localData[tbx + tj].radius, localData[tbx + tj].epsilon, rmjxo, rmjxh, emjxo,
emjxh);
computeOneInteraction(localData[tbx + tj], data, rmjxo, rmjxh, emjxo, emjxh, tempForce, tempEnergy);
data.force -= tempForce;
localData[tbx + tj].force += tempForce;
energy += (x == y ? 0.5f * tempEnergy : tempEnergy);
}
tj = (tj + 1) & (TILE_SIZE - 1);
}
unsigned int offset = x * TILE_SIZE + tgx;
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (data.force.x * 0x100000000)));
atomicAdd(&forceBuffers[offset + PADDED_NUM_ATOMS],
static_cast<unsigned long long>((long long) (data.force.y * 0x100000000)));
atomicAdd(&forceBuffers[offset + 2 * PADDED_NUM_ATOMS],
static_cast<unsigned long long>((long long) (data.force.z * 0x100000000)));
if (x != y) {
offset = y * TILE_SIZE + tgx;
atomicAdd(&forceBuffers[offset],
static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.x * 0x100000000)));
atomicAdd(&forceBuffers[offset + PADDED_NUM_ATOMS],
static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.y * 0x100000000)));
atomicAdd(&forceBuffers[offset + 2 * PADDED_NUM_ATOMS],
static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.z * 0x100000000)));
}
}
pos++;
} while (pos < end);
energyBuffer[blockIdx.x * blockDim.x + threadIdx.x] -= AWATER * energy;
}
|
a858f6a8845544414fd2d992fc8740a311414ec5.hip | // !!! This is a file automatically generated by hipify!!!
#include <THH/THHTensorMath.h>
#include <THH/THHGeneral.h>
#include <TH/THHalf.h>
#include <THH/THHTensorCopy.h>
#include <THH/THHApply.cuh>
#include <THH/THHNumerics.cuh>
#include <THH/THHTensor.hpp>
template <typename T>
struct TensorMulConstantOp {
TensorMulConstantOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = *in * val;
}
__device__ __forceinline__ void operator()(T* v) {
*v *= val;
}
const T val;
};
#include <THH/generic/THHTensorMathPairwise.hip>
#include <THH/THHGenerateBoolType.h>
| a858f6a8845544414fd2d992fc8740a311414ec5.cu | #include <THC/THCTensorMath.h>
#include <THC/THCGeneral.h>
#include <TH/THHalf.h>
#include <THC/THCTensorCopy.h>
#include <THC/THCApply.cuh>
#include <THC/THCNumerics.cuh>
#include <THC/THCTensor.hpp>
template <typename T>
struct TensorMulConstantOp {
TensorMulConstantOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = *in * val;
}
__device__ __forceinline__ void operator()(T* v) {
*v *= val;
}
const T val;
};
#include <THC/generic/THCTensorMathPairwise.cu>
#include <THC/THCGenerateBoolType.h>
|
ac552308a71118975641f93f127ce4c3c02d6038.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void convtranspose_kernel(){
// extern __shared__ float shmem[];
// float* shared_X = &shmem[];
// float* shared_W = &shmem[output_size * output_size];
// int batch, out ;
// batch = blockIdx.x;
// out = blockIdx.y;
// int h_out, w_out;
// h_out = threadIdx.x;
// w_out = threadIdx.y;
}
void launch_convtranspose_general(){
// //each threads correspond to one element
// dim3 blockSize(output_size,output_size,1);
// dim3 gridSize(batch_size,out_channels,1);
// size_t shmem_size = sizeof(float) * (output_size * output_size + kernel_size);
}
__global__ void convtranspose_kernel_1(float *Y,
const float *X,
const float *W,
int in_channels,
int out_channels,
int kernel_size,
int feature_size,
int batch_size){
// X: [1, 128, 7, 7]
// batch x in_channel x feature_size x feature_size
// W: [128, 64, 4, 4]
// in_channel x out_channels x kernel_size x kernel_size
// Y: [1, 64, 14, 14]
// batch x out_channels x feature_size x feature_size
__shared__ float shared_X[31][31];
__shared__ float shared_W[4][4];
int batch, out ;
batch = blockIdx.x;
out = blockIdx.y;
int h_out, w_out;
h_out = threadIdx.x;
w_out = threadIdx.y;
float sum = 0.;
int X_idx, W_idx, Y_idx;
for (int in = 0; in < in_channels; in++){
// load W to shared memory
// no like conv, it's inverse load
if (h_out < kernel_size && w_out < kernel_size){
W_idx = in * out_channels * kernel_size * kernel_size +
out * kernel_size * kernel_size +
h_out * kernel_size + w_out;
shared_W[kernel_size - h_out -1][kernel_size - w_out -1] = W[W_idx];
}
__syncthreads();
//zero init of shared X
shared_X[h_out][w_out] =0;
// load X to shared memory
// extend mapping
if (h_out < feature_size && w_out < feature_size){
X_idx = batch * in_channels * feature_size *feature_size + \
in * feature_size * feature_size + \
h_out * feature_size + w_out;
shared_X[2 * h_out + 1][2 *w_out + 1] = X[X_idx];
}
__syncthreads();
for (int p = 0; p < kernel_size; p++)
{
for (int q = 0; q < kernel_size; q++)
{
// have problem boundary check
int h_idx = h_out - 1 + p;
int w_idx = w_out - 1 + q;
if (h_idx >= 0 && h_idx < feature_size * 2 &&
w_idx >= 0 && w_idx < feature_size * 2)
{
sum += shared_X[h_idx][w_idx] * shared_W[p][q];
}
}
}
__syncthreads();
}
Y_idx = batch * out_channels * feature_size * feature_size * 4 +
out * feature_size * feature_size * 4 +
h_out * feature_size * 2 + w_out;
Y[Y_idx] = sum;
}
void launch_convtranspose_1(float *Y,
const float *X,
const float *W,
int in_channels,
int out_channels,
int kernel_size,
int feature_size,
int batch_size,
int stride){
int output_size = feature_size * stride;
//each threads correspond to one element
dim3 blockSize(output_size,output_size,1);
dim3 gridSize(batch_size,out_channels,1);
hipLaunchKernelGGL(( convtranspose_kernel_1), dim3(gridSize), dim3(blockSize), 0, 0, Y,
X,
W,
in_channels,
out_channels,
kernel_size,
feature_size,
batch_size
);
}
void launch_convtranspose_2(float *Y,
const float *X,
const float *W,
int in_channels,
int out_channels,
int kernel_size,
int feature_size,
int batch_size){
}
| ac552308a71118975641f93f127ce4c3c02d6038.cu | __global__ void convtranspose_kernel(){
// extern __shared__ float shmem[];
// float* shared_X = &shmem[];
// float* shared_W = &shmem[output_size * output_size];
// int batch, out ;
// batch = blockIdx.x;
// out = blockIdx.y;
// int h_out, w_out;
// h_out = threadIdx.x;
// w_out = threadIdx.y;
}
void launch_convtranspose_general(){
// //each threads correspond to one element
// dim3 blockSize(output_size,output_size,1);
// dim3 gridSize(batch_size,out_channels,1);
// size_t shmem_size = sizeof(float) * (output_size * output_size + kernel_size);
}
__global__ void convtranspose_kernel_1(float *Y,
const float *X,
const float *W,
int in_channels,
int out_channels,
int kernel_size,
int feature_size,
int batch_size){
// X: [1, 128, 7, 7]
// batch x in_channel x feature_size x feature_size
// W: [128, 64, 4, 4]
// in_channel x out_channels x kernel_size x kernel_size
// Y: [1, 64, 14, 14]
// batch x out_channels x feature_size x feature_size
__shared__ float shared_X[31][31];
__shared__ float shared_W[4][4];
int batch, out ;
batch = blockIdx.x;
out = blockIdx.y;
int h_out, w_out;
h_out = threadIdx.x;
w_out = threadIdx.y;
float sum = 0.;
int X_idx, W_idx, Y_idx;
for (int in = 0; in < in_channels; in++){
// load W to shared memory
// no like conv, it's inverse load
if (h_out < kernel_size && w_out < kernel_size){
W_idx = in * out_channels * kernel_size * kernel_size +
out * kernel_size * kernel_size +
h_out * kernel_size + w_out;
shared_W[kernel_size - h_out -1][kernel_size - w_out -1] = W[W_idx];
}
__syncthreads();
//zero init of shared X
shared_X[h_out][w_out] =0;
// load X to shared memory
// extend mapping
if (h_out < feature_size && w_out < feature_size){
X_idx = batch * in_channels * feature_size *feature_size + \
in * feature_size * feature_size + \
h_out * feature_size + w_out;
shared_X[2 * h_out + 1][2 *w_out + 1] = X[X_idx];
}
__syncthreads();
for (int p = 0; p < kernel_size; p++)
{
for (int q = 0; q < kernel_size; q++)
{
// have problem boundary check
int h_idx = h_out - 1 + p;
int w_idx = w_out - 1 + q;
if (h_idx >= 0 && h_idx < feature_size * 2 &&
w_idx >= 0 && w_idx < feature_size * 2)
{
sum += shared_X[h_idx][w_idx] * shared_W[p][q];
}
}
}
__syncthreads();
}
Y_idx = batch * out_channels * feature_size * feature_size * 4 +
out * feature_size * feature_size * 4 +
h_out * feature_size * 2 + w_out;
Y[Y_idx] = sum;
}
void launch_convtranspose_1(float *Y,
const float *X,
const float *W,
int in_channels,
int out_channels,
int kernel_size,
int feature_size,
int batch_size,
int stride){
int output_size = feature_size * stride;
//each threads correspond to one element
dim3 blockSize(output_size,output_size,1);
dim3 gridSize(batch_size,out_channels,1);
convtranspose_kernel_1<<<gridSize, blockSize>>>(Y,
X,
W,
in_channels,
out_channels,
kernel_size,
feature_size,
batch_size
);
}
void launch_convtranspose_2(float *Y,
const float *X,
const float *W,
int in_channels,
int out_channels,
int kernel_size,
int feature_size,
int batch_size){
}
|
0891aaa6da000972f61d7d97ee572ae7da55a69c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* The MIT License (MIT)
* Copyright (c) 2021, NVIDIA CORPORATION.
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
******************************************************************************/
#define CUB_NS_PREFIX namespace kaolin {
#define CUB_NS_POSTFIX }
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <vector>
#include <iostream>
#include <helper_cuda.h>
#include <helper_math.h>
#include <hip/hip_vector_types.h>
#include <GL/glew.h>
#include <torch/torch.h>
#include <spc/SPC.h>
#include "SDF.h"
#include "nvmath.h"
#include <solr/util_timer.cuh>
#define CUB_STDERR
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#define CUDA_PRINT_ERROR() //cudaPrintError(__FILE__, __LINE__)
// #define DEBUG
#define STRINGIFY2(X) #X
#define STRINGIFY(X) STRINGIFY2(X)
#ifdef DEBUG
# define TIMER PerfTimer timer = PerfTimer()
# define TIMER_CHECK(x) timer.check(x)
# define DEBUG_PRINT(x) std::cout << STRINGIFY(x) ":" << x << std::endl
#else
# define TIMER
# define TIMER_CHECK(x)
# define DEBUG_PRINT(x)
#endif
// These are for hot testing
# define PROBE PerfTimer probe_timer = PerfTimer()
# define PROBE_CHECK(x) probe_timer.check(x)
using namespace solr;
namespace I = torch::indexing;
GLuint pbo; // OpenGL pixel buffer object
struct cudaGraphicsResource* cuda_pbo_resource = NULL; // CUDA Graphics Resource (to transfer PBO)
uint* d_output = NULL;
float g_milliseconds;
uint g_TargetLevel = 0;
uint g_Renderer = 0;
extern inline void cudaPrintError(const char* file, const int line);
torch::Tensor d_ray_o;
torch::Tensor d_ray_d;
torch::Tensor d_x;
torch::Tensor d_t;
torch::Tensor d_normal;
Nugget* d_Nuggets[2];
uint* d_Info;
uint* d_InfoA;
uint* d_PrefixSum;
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
torch::Tensor tInfo;
__device__ __inline__ uchar4 to_uchar4(float4 vec)
{
return make_uchar4((uchar)vec.x, (uchar)vec.y, (uchar)vec.z, (uchar)vec.w);
}
__global__ void
d_MarkUniqueRays(uint num, Nugget* nuggets, uint* info)
{
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num)
{
if (tidx == 0)
info[tidx] = 1;
else
info[tidx] = nuggets[tidx - 1].x == nuggets[tidx].x ? 0 : 1;
}
}
__global__ void
d_renderHit(uint num, float* ray_o, float* ray_d, bool* hit, uchar4 *d_output)
{
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num)
{
float4 color = make_float4(
float(hit[tidx]),
float(hit[tidx]),
float(hit[tidx]),
1.0);
d_output[tidx] = to_uchar4(255.0 * color);
}
}
__global__ void
d_renderDepth(uint num, float* ray_o, float* ray_d, float* depth, uchar4 *d_output)
{
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num)
{
float4 color = make_float4(
clamp(depth[tidx] * 0.25f, 0.0f, 1.0f),
clamp(depth[tidx] * 0.25f, 0.0f, 1.0f),
clamp(depth[tidx] * 0.25f, 0.0f, 1.0f),
1.0);
d_output[tidx] = to_uchar4(255.0 * color);
}
}
__global__ void
d_renderNormal(uint num, float* ray_o, float* ray_d, float* normal, uchar4 *d_output)
{
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num)
{
float4 color = make_float4(
(normal[tidx*3 ]+1) * 0.5,
(normal[tidx*3+1]+1) * 0.5,
(normal[tidx*3+2]+1) * 0.5,
1.0);
if (color.x + color.y + color. z < 2.9) {
d_output[tidx] = to_uchar4(255.0 * color);
}
}
}
extern "C"
uint RenderImage(
uchar4 *d_output,
uint imageW,
uint imageH,
torch::Tensor Org,
torch::Tensor Dir,
torch::Tensor Nuggets,
SPC* spc,
SDF* sdf)
{
CUDA_PRINT_ERROR();
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
uint num_rays = imageW*imageH;
CUDA_PRINT_ERROR();
// map PBO to get CUDA device pointer
hipGraphicsMapResources(1, &cuda_pbo_resource, 0);
size_t num_bytes;
hipGraphicsResourceGetMappedPointer((void **)&d_output, &num_bytes, cuda_pbo_resource);
hipMemset(d_output, g_Renderer!=0?0:-1, num_rays * sizeof(color_type)); //clear image buffer
CUDA_PRINT_ERROR();
TIMER;
torch::Tensor Info = torch::zeros({SCAN_MAX_VOXELS}, torch::device(torch::kCUDA).dtype(torch::kInt32));
int num_nuggets = Nuggets.size(0);
d_MarkUniqueRays << <(num_nuggets + 1023) / 1024, 1024 >> > (
num_nuggets,
reinterpret_cast<Nugget*>(Nuggets.data_ptr<int>()),
reinterpret_cast<uint*>(Info.data_ptr<int>()));
TIMER_CHECK("Postprocess SPC ");
CUDA_PRINT_ERROR();
TIMER_CHECK("generate rays ");
int lod = ::max(0, (int) g_TargetLevel - 2);
torch::Tensor Points = spc->GetPoints(g_TargetLevel).index({I::Slice(), torch::tensor({0,1,2})});
auto out = sdf->sphereTrace(Org, Dir, Nuggets, Points, Info, lod);
CUDA_PRINT_ERROR();
TIMER_CHECK("st ");
switch (g_Renderer)
{
case 0:
d_renderNormal << <(num_rays + 1023) / 1024, 1024 >> >(
num_rays, Org.data_ptr<float>(), Dir.data_ptr<float>(), out[3].data_ptr<float>(), d_output);
break;
case 1:
d_renderDepth << <(num_rays + 1023) / 1024, 1024 >> >(
num_rays, Org.data_ptr<float>(), Dir.data_ptr<float>(), out[1].data_ptr<float>(), d_output);
break;
case 2:
d_renderHit << <(num_rays + 1023) / 1024, 1024 >> >(
num_rays, Org.data_ptr<float>(), Dir.data_ptr<float>(), out[2].data_ptr<bool>(), d_output);
break;
}
TIMER_CHECK("write buffer ");
CUDA_PRINT_ERROR();
checkCudaErrors(hipGraphicsUnmapResources(1, &cuda_pbo_resource, 0));
hipEventRecord(stop);
hipEventSynchronize(stop);
g_milliseconds = 0;
hipEventElapsedTime(&g_milliseconds, start, stop);
CUDA_PRINT_ERROR();
return num_rays;
}
| 0891aaa6da000972f61d7d97ee572ae7da55a69c.cu | /******************************************************************************
* The MIT License (MIT)
* Copyright (c) 2021, NVIDIA CORPORATION.
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
******************************************************************************/
#define CUB_NS_PREFIX namespace kaolin {
#define CUB_NS_POSTFIX }
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <vector>
#include <iostream>
#include <helper_cuda.h>
#include <helper_math.h>
#include <vector_types.h>
#include <GL/glew.h>
#include <torch/torch.h>
#include <spc/SPC.h>
#include "SDF.h"
#include "nvmath.h"
#include <solr/util_timer.cuh>
#define CUB_STDERR
#include <cub/util_allocator.cuh>
#include <cub/device/device_scan.cuh>
#define CUDA_PRINT_ERROR() //cudaPrintError(__FILE__, __LINE__)
// #define DEBUG
#define STRINGIFY2(X) #X
#define STRINGIFY(X) STRINGIFY2(X)
#ifdef DEBUG
# define TIMER PerfTimer timer = PerfTimer()
# define TIMER_CHECK(x) timer.check(x)
# define DEBUG_PRINT(x) std::cout << STRINGIFY(x) ":" << x << std::endl
#else
# define TIMER
# define TIMER_CHECK(x)
# define DEBUG_PRINT(x)
#endif
// These are for hot testing
# define PROBE PerfTimer probe_timer = PerfTimer()
# define PROBE_CHECK(x) probe_timer.check(x)
using namespace solr;
namespace I = torch::indexing;
GLuint pbo; // OpenGL pixel buffer object
struct cudaGraphicsResource* cuda_pbo_resource = NULL; // CUDA Graphics Resource (to transfer PBO)
uint* d_output = NULL;
float g_milliseconds;
uint g_TargetLevel = 0;
uint g_Renderer = 0;
extern inline void cudaPrintError(const char* file, const int line);
torch::Tensor d_ray_o;
torch::Tensor d_ray_d;
torch::Tensor d_x;
torch::Tensor d_t;
torch::Tensor d_normal;
Nugget* d_Nuggets[2];
uint* d_Info;
uint* d_InfoA;
uint* d_PrefixSum;
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
torch::Tensor tInfo;
__device__ __inline__ uchar4 to_uchar4(float4 vec)
{
return make_uchar4((uchar)vec.x, (uchar)vec.y, (uchar)vec.z, (uchar)vec.w);
}
__global__ void
d_MarkUniqueRays(uint num, Nugget* nuggets, uint* info)
{
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num)
{
if (tidx == 0)
info[tidx] = 1;
else
info[tidx] = nuggets[tidx - 1].x == nuggets[tidx].x ? 0 : 1;
}
}
__global__ void
d_renderHit(uint num, float* ray_o, float* ray_d, bool* hit, uchar4 *d_output)
{
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num)
{
float4 color = make_float4(
float(hit[tidx]),
float(hit[tidx]),
float(hit[tidx]),
1.0);
d_output[tidx] = to_uchar4(255.0 * color);
}
}
__global__ void
d_renderDepth(uint num, float* ray_o, float* ray_d, float* depth, uchar4 *d_output)
{
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num)
{
float4 color = make_float4(
clamp(depth[tidx] * 0.25f, 0.0f, 1.0f),
clamp(depth[tidx] * 0.25f, 0.0f, 1.0f),
clamp(depth[tidx] * 0.25f, 0.0f, 1.0f),
1.0);
d_output[tidx] = to_uchar4(255.0 * color);
}
}
__global__ void
d_renderNormal(uint num, float* ray_o, float* ray_d, float* normal, uchar4 *d_output)
{
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num)
{
float4 color = make_float4(
(normal[tidx*3 ]+1) * 0.5,
(normal[tidx*3+1]+1) * 0.5,
(normal[tidx*3+2]+1) * 0.5,
1.0);
if (color.x + color.y + color. z < 2.9) {
d_output[tidx] = to_uchar4(255.0 * color);
}
}
}
extern "C"
uint RenderImage(
uchar4 *d_output,
uint imageW,
uint imageH,
torch::Tensor Org,
torch::Tensor Dir,
torch::Tensor Nuggets,
SPC* spc,
SDF* sdf)
{
CUDA_PRINT_ERROR();
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
uint num_rays = imageW*imageH;
CUDA_PRINT_ERROR();
// map PBO to get CUDA device pointer
cudaGraphicsMapResources(1, &cuda_pbo_resource, 0);
size_t num_bytes;
cudaGraphicsResourceGetMappedPointer((void **)&d_output, &num_bytes, cuda_pbo_resource);
cudaMemset(d_output, g_Renderer!=0?0:-1, num_rays * sizeof(color_type)); //clear image buffer
CUDA_PRINT_ERROR();
TIMER;
torch::Tensor Info = torch::zeros({SCAN_MAX_VOXELS}, torch::device(torch::kCUDA).dtype(torch::kInt32));
int num_nuggets = Nuggets.size(0);
d_MarkUniqueRays << <(num_nuggets + 1023) / 1024, 1024 >> > (
num_nuggets,
reinterpret_cast<Nugget*>(Nuggets.data_ptr<int>()),
reinterpret_cast<uint*>(Info.data_ptr<int>()));
TIMER_CHECK("Postprocess SPC ");
CUDA_PRINT_ERROR();
TIMER_CHECK("generate rays ");
int lod = std::max(0, (int) g_TargetLevel - 2);
torch::Tensor Points = spc->GetPoints(g_TargetLevel).index({I::Slice(), torch::tensor({0,1,2})});
auto out = sdf->sphereTrace(Org, Dir, Nuggets, Points, Info, lod);
CUDA_PRINT_ERROR();
TIMER_CHECK("st ");
switch (g_Renderer)
{
case 0:
d_renderNormal << <(num_rays + 1023) / 1024, 1024 >> >(
num_rays, Org.data_ptr<float>(), Dir.data_ptr<float>(), out[3].data_ptr<float>(), d_output);
break;
case 1:
d_renderDepth << <(num_rays + 1023) / 1024, 1024 >> >(
num_rays, Org.data_ptr<float>(), Dir.data_ptr<float>(), out[1].data_ptr<float>(), d_output);
break;
case 2:
d_renderHit << <(num_rays + 1023) / 1024, 1024 >> >(
num_rays, Org.data_ptr<float>(), Dir.data_ptr<float>(), out[2].data_ptr<bool>(), d_output);
break;
}
TIMER_CHECK("write buffer ");
CUDA_PRINT_ERROR();
checkCudaErrors(cudaGraphicsUnmapResources(1, &cuda_pbo_resource, 0));
cudaEventRecord(stop);
cudaEventSynchronize(stop);
g_milliseconds = 0;
cudaEventElapsedTime(&g_milliseconds, start, stop);
CUDA_PRINT_ERROR();
return num_rays;
}
|
1cde857f6ce472e820a1f25aa743145907523d56.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// @file bnorm_gpu.cu
// @brief Batch normalization implementation (GPU)
// @author Sebastien Ehrhardt
// @author Andrea Vedaldi
/*
Copyright (C) 2015-16 Sebastien Ehrhardt and Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "bnorm.hpp"
#include "../datacu.hpp"
#include "blashelper.hpp"
#include "sharedmem.cuh"
#include <assert.h>
#include <float.h>
#include <stdint.h>
// MSB_WARP = log2(WARP_SIZE)
#define WARP_SIZE 32
#define MSB_WARP 5
// macro function
#define min(a,b) (a > b ? b : a);
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
/* Helpers */
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
static inline int getBlockSize(int dataSize)
{
int blockSize = VL_CUDA_NUM_THREADS / 2 ;
if (dataSize < blockSize) {
unsigned int numWarps = dataSize / WARP_SIZE ;
if (numWarps < 4) {
blockSize = 2 * WARP_SIZE ;
}
else if (numWarps < 8) {
blockSize = 4 * WARP_SIZE ;
}
else {
blockSize = 8 * WARP_SIZE ;
}
}
return blockSize ;
}
// get the smallest x which is a multiple of factor
static inline int nextMultipleOf(int x, int factor)
{
return factor * ((x + factor - 1)/factor) ;
}
/*
# Reduction over the whole batch
`bnorm` works by accumulating statistics over planes (channels) and
images in a batch. It then uses these statistics to renormalize the values.
Summing over plens efficiently over planes is a little complex on the GPU.
What we have are threads, block of threads, and a grid of blocks:
* Warps (up to 32 threads). Highly coupled, and in fact *coalesced* and
run essentially in a single stream of vector instructions on the GPU,
which also means that they stay syncrhonized implicitly.
* Blocks (up to 512 threads). Blocks are assigned to a SM, and the SM
breaks them down into warps for execution. Threads in the same block
can be synchronised explicity using __syncthreads(). They all run
concurrently in the same SM.
* Grid. A grid is an array of blocks that are scheduled onto multiple SMs.
Threads in a grid can only be synchronised implicitly at the end of a kernel.
Given these constraints, we explain next how operations are mapped to the
blocks and the threads.
The input data is organised in SIZE images, each of which is composed of
DEPTH planes. The goal is to compute the mean and std deviation of each
plane (across images). In the follwing diagram, planes are enumerated
from left to right and top to bottom, first listing all the planes for
one image (a row) and then subsequent images (in different rows).
+-------++-------++-------++-------+
|plane1||p2||p3||p4|numPlanes=12
|ch1||c2||c3||c4|depth=4
|image1||i1||i1||i1|planeArea=28
+---+block1||b2||b3||b4|planeStride=gridSize=8
|+-------++-------++-------++-------+
|
|+-------++-------++-------++-------+
||p5||p6||p7||p8|
||c1||c2||c3||c4|
||i2||i2||i2||i2|
||b5||b6||b7||b8|
|+-------++-------++-------++-------+
|
|+-------++-------++-------++-------+
||p9||p10||p11||p12|
||c1||c2||c3||c4|
| |i3||i3||i3||i3|
+-->+b1||b2||b3||b4|
+-------++-------++-------++-------+
We create a certain number of thread blocks. Call this number gridSize.
Each block operates (sums) over a certain number of planes, with
subsequent blocks taking over subsequent planes.
Since there may be less blocks than planes overall, a single block
does more than one plane in general but skips over the ones that are
already processed by neighbour blocks. In the example, the thread block 1
integrates plane 1 and plane 9).
It is important to optimise how blocks access memory. This is organised
in three phases:
1. Blocks accumulate in a shared scratch space (of blockSize elements,
for each block) partial sums. In this manner, the scratch space of each block
contains the statistics for a particular plane (feature channels) and subset
of the images.
2. Blocks reduce the data in their scratch space using within-block reduction.
3. This is still a partial result as blocks do not do in general all the images.
A last pass accumulates the outputs from the individual blocks.
# Sliding-window accumulation
As blocks accumulate over different planes and images and these are not
necessarily aligned to nice memory boundaries, the problem is how to make
computations efficient.
The trick is to look at the block as a jumping window, sliding over the memory
that needs to be summed, but always aligned at good block boundaries. This means
that occasionally threads in a block will access some data that needs to be discarded.
+-------+ +-------+ +-------+ +-------+ aligned blocks (with two warps each)
| : | | : | | : | | : | covering the data
+-------+ +-------+ +-------+ +-------+
+-------------+ +-------------+ data to sum
+-------------------------------------------------------->
increasing memory addresses
As each block slides over the data, it accumulates partial results
in a scratch buffer which has a number of elememts equal to the block size.
Evenetually, block-level reduction is performed on this scratch buffer
to get the total.
# Per-block reduction
Use a block of blockSize threads to accumulate all the values in the
shared array mdata[], which has blockSize elements:
mdata[0] <- mdata[0] + mdata[1] + ... + mdata[blockSize-1]
blockSize is a power of two and less than the maxmimum allowed block
size (usually 512). mdata[] has to be padded with zeros to allow
summation over vectors whose dimension is less than blockSize.
This is done as follows:
1. First, the first half of the threads in the block accumulate
the second half of mdata in the first half:
tid=0: mdata[0] = mdata[0] + mdata[blockSize/2]
...
tid=blockSize/2-1: mdata[blockSize/2-1] = mdata[blockSize/2-1] + mdata[blockSize-1]
Note that half of the threads are idle
2. Then, the first quarter of the threads reduce the result further:
tid=0: mdata[0] = mdata[0] + mdata[blockSize/4]
...
tid=blockSize/4-1: mdata[blockSize/4-1] = mdata[blockSize/4-1] + mdata[blockSize/2-1]
3. This continues until only tid=0 operates:
tid=0: mdata[0] = mdata[0] + mdata[1]
This is further divded into two regimes. In the first regime, tid
may span threads in the same block but different warps. Here
the code must be explicitly snychronized.
In the second regime, tid < WARP_SIZE, and synchronization is not
required as threads are coalesced.
*/
template<typename T>
__forceinline__ __device__ void blockReduce(volatile T * mdata,
unsigned int tid,
unsigned int blockSize,
unsigned int maxDataSize)
{
// todo: get rid of maxDataSize?
__syncthreads();
if (blockSize >= 1024 && maxDataSize + WARP_SIZE >=512) { if (tid < 512) { mdata[tid] += mdata[tid + 512]; } __syncthreads(); } // mdata[0:511] = mdata[0:511] + mdata[512:1023]
if (blockSize >= 512 && maxDataSize + WARP_SIZE >=256) { if (tid < 256) { mdata[tid] += mdata[tid + 256]; } __syncthreads(); } // mdata[0:255] = mdata[0:255] + mdata[256:511]
if (blockSize >= 256 && maxDataSize + WARP_SIZE >=128) { if (tid < 128) { mdata[tid] += mdata[tid + 128]; } __syncthreads(); } // mdata[0:127] = mdata[0:127] + mdata[128:255]
if (blockSize >= 128 && maxDataSize + WARP_SIZE >=64 ) { if (tid < 64) { mdata[tid] += mdata[tid + 64]; } __syncthreads(); } // mdata[0:63] = mdata[0:63] + mdata[64:127]
if (tid < 32) {
// now enter warp
if (blockSize >= 64) { mdata[tid] += mdata[tid + 32]; } // mdata[0:31] = mdata[0:31] + mdata[32:63]
if (blockSize >= 32) { mdata[tid] += mdata[tid + 16]; } // mdata[0:15] = mdata[0:15] + mdata[16:31]
if (blockSize >= 16) { mdata[tid] += mdata[tid + 8]; } // mdata[0:7] = mdata[0:7] + mdata[7:15]
if (blockSize >= 8) { mdata[tid] += mdata[tid + 4]; } // mdata[0:3] = mdata[0:3] + mdata[4:7]
if (blockSize >= 4) { mdata[tid] += mdata[tid + 2]; } // mdata[0:1] = mdata[0:1] + mdata[2:3]
if (blockSize >= 2) { mdata[tid] += mdata[tid + 1]; } // mdata[0] = mdata[0] + mdata[1]
}
}
template<typename T>
__forceinline__ __device__ void blockReduce2(volatile T * mdata,
volatile T * sdata,
unsigned int tid,
unsigned int blockSize,
unsigned int maxDataSize)
{
__syncthreads();
if (blockSize >= 1024 && maxDataSize + WARP_SIZE >=512) { if (tid < 512) { sdata[tid] += sdata[tid + 512]; mdata[tid] += mdata[tid + 512]; } __syncthreads(); }
if (blockSize >= 512 && maxDataSize + WARP_SIZE >=256) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; mdata[tid] += mdata[tid + 256]; } __syncthreads(); }
if (blockSize >= 256 && maxDataSize + WARP_SIZE >=128) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; mdata[tid] += mdata[tid + 128]; } __syncthreads(); }
if (blockSize >= 128 && maxDataSize + WARP_SIZE >=64) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; mdata[tid] += mdata[tid + 64]; } __syncthreads(); }
if (tid < 32) {
if (blockSize >= 64) { sdata[tid] += sdata[tid + 32]; mdata[tid] += mdata[tid + 32]; }
if (blockSize >= 32) { sdata[tid] += sdata[tid + 16]; mdata[tid] += mdata[tid + 16]; }
if (blockSize >= 16) { sdata[tid] += sdata[tid + 8]; mdata[tid] += mdata[tid + 8]; }
if (blockSize >= 8) { sdata[tid] += sdata[tid + 4]; mdata[tid] += mdata[tid + 4]; }
if (blockSize >= 4) { sdata[tid] += sdata[tid + 2]; mdata[tid] += mdata[tid + 2]; }
if (blockSize >= 2) { sdata[tid] += sdata[tid + 1]; mdata[tid] += mdata[tid + 1]; }
}
}
template<typename T>
__forceinline__ __device__ void blockReduce4(volatile T * sdata,
volatile T * mdata,
volatile T * rdata,
volatile T * tdata,
unsigned int tid,
unsigned int blockSize,
unsigned int maxDataSize)
{
__syncthreads();
if (blockSize >= 1024 && maxDataSize + WARP_SIZE >= 512) { if (tid < 512) { sdata[tid] += sdata[tid + 512]; mdata[tid] += mdata[tid + 512]; rdata[tid] += rdata[tid + 512]; tdata[tid] += tdata[tid + 512];} __syncthreads(); }
if (blockSize >= 512 && maxDataSize + WARP_SIZE >= 256) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; mdata[tid] += mdata[tid + 256]; rdata[tid] += rdata[tid + 256]; tdata[tid] += tdata[tid + 256];} __syncthreads(); }
if (blockSize >= 256 && maxDataSize + WARP_SIZE >= 128) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; mdata[tid] += mdata[tid + 128]; rdata[tid] += rdata[tid + 128]; tdata[tid] += tdata[tid + 128];} __syncthreads(); }
if (blockSize >= 128 && maxDataSize + WARP_SIZE >= 64) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; mdata[tid] += mdata[tid + 64]; rdata[tid] += rdata[tid + 64]; tdata[tid] += tdata[tid + 64];} __syncthreads(); }
if (tid < 32) {
if (blockSize >= 64) { sdata[tid] += sdata[tid + 32]; mdata[tid] += mdata[tid + 32]; rdata[tid] += rdata[tid + 32]; tdata[tid] += tdata[tid + 32];}
if (blockSize >= 32) { sdata[tid] += sdata[tid + 16]; mdata[tid] += mdata[tid + 16]; rdata[tid] += rdata[tid + 16]; tdata[tid] += tdata[tid + 16];}
if (blockSize >= 16) { sdata[tid] += sdata[tid + 8]; mdata[tid] += mdata[tid + 8]; rdata[tid] += rdata[tid + 8]; tdata[tid] += tdata[tid + 8];}
if (blockSize >= 8) { sdata[tid] += sdata[tid + 4]; mdata[tid] += mdata[tid + 4]; rdata[tid] += rdata[tid + 4]; tdata[tid] += tdata[tid + 4];}
if (blockSize >= 4) { sdata[tid] += sdata[tid + 2]; mdata[tid] += mdata[tid + 2]; rdata[tid] += rdata[tid + 2]; tdata[tid] += tdata[tid + 2];}
if (blockSize >= 2) { sdata[tid] += sdata[tid + 1]; mdata[tid] += mdata[tid + 1]; rdata[tid] += rdata[tid + 1]; tdata[tid] += tdata[tid + 1];}
}
}
// Get largest memory address that is aligned to a warp worth of T
// and smaller than x.
template<typename T>
__forceinline__ __device__ uintptr_t getBlockBeginning(void const * x)
{
return (uintptr_t)(x) & (~((uintptr_t)(WARP_SIZE*sizeof(T)) - 1)) ;
}
// Use the current block of thread to sum over a given column of a matrix. The selected
// column is given by the thread block index in the block grid.
//
// This function uses an amoutn of scratch memory equal to blockSize*sizeof(T)
// where blockSize=blockDim.x.
template<typename T>
__forceinline__ __device__ T matrixSumHelper(T const * matrix, int numRows)
{
// One thread block per column to sum
// Shared memory is per-block, it holds blockSize intermediate reults
//extern __shared__ T scratch [] ;
SharedMemory<T> smem ;
T * scratch = smem.getPointer() ;
int tid = threadIdx.x ;
int column = blockIdx.x ;
int blockSize = blockDim.x ;
// Note that scratch is different for different blocks, hence
// matrix columns. Now fill scratch with partial sums, in a sliding-window
// manner.
scratch[tid] = 0 ;
T const * columnBegin = matrix + column * numRows ;
T const * columnEnd = columnBegin + numRows ;
T const * block = (T const*) getBlockBeginning<T>(columnBegin) + tid ;
while (block < columnEnd) {
if (block >= columnBegin) {
scratch[tid] += *block ;
}
block += blockSize ;
}
// Now scratch[] has blockSize partial sums for this column
// Finish by reducing and saving
blockReduce<T>(scratch, tid, blockSize, numRows) ;
return scratch[0] ;
}
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
/* compute_moments */
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
// This kernel accumulates means and variances for the data.
// Each block of thread sums over one or more data planes, resulting
// in an array accumulator[] of dimension numChunks x 2*numChannels.
//
// If each thread block scans all the images, then numChunks = 1.
// However, for efficiency different thread blocks do different
// subset of images, resulting in numChunks partial results to be summed
// later by a second kernel.
//
// The first part accumulator[:,0:numChannels-1] stores the data for the mean
// and the second part accumulator[:,numChannels,2*numChannels-1] the data
// for the sigmas.
//
// This function uses the sliding-window summing technique described
// above. It requires
//
// 2*sizeof(T)*blockSize
//
// bytes of shared scratch memory to hold to hold partial sums for
// means and sigmas.
template<typename T>
__global__ void accumulate_moments_partial(T * accumulator,
T const * data,
int planeArea,
int numPlanes,
int numChannels,
int numChunks)
{
int tid = threadIdx.x ;
int plane = blockIdx.x ;
int blockSize = blockDim.x ;
int planeStride = gridDim.x ;
int channel = blockIdx.x % numChannels ;
//extern __shared__ T s [] ;
SharedMemory<T> smem ;
T * s = smem.getPointer() ;
T * mdata = s ;
T * sdata = mdata + blockSize ;
mdata[tid] = 0 ;
sdata[tid] = 0 ;
while (plane < numPlanes) {
T const * planeBegin = data + plane * planeArea ;
T const * planeEnd = planeBegin + planeArea ;
T const * block = (T const*) getBlockBeginning<T>(planeBegin) + tid ;
while (block < planeEnd) {
if (block >= planeBegin) {
T x = *block ;
mdata[tid] += x ;
sdata[tid] += x * x ;
}
block += blockSize ;
}
plane += planeStride ;
}
blockReduce2<T>(sdata, mdata, tid, blockSize, planeArea) ;
if (tid == 0) {
int chunk = blockIdx.x / numChannels ;
int i = chunk + channel * numChunks ;
accumulator[i] = mdata[0];
accumulator[i + gridDim.x] = sdata[0];
}
}
// This kernel sums over the accumulator computed by the function
// above to obtain the moments.
//
// This kernel uses matrixSumHelper() defined above. Hence:
//
// 1. The block grid must be set to have a block
// for each column of accumulator[]. There are here 2*numChannels columns.
//
// 2. There can be any (reasonable) blockSize. Blocks will iterate
// over rows as needed to compte the operation.
//
// 3. It must be called with `blockSize*sizeof(T)` shared
// scratch space.
template<typename T>
__global__ void accumulate_moments_finish(T * moments,
T const * accumulator,
int numRows)
{
int tid = threadIdx.x ;
int column = blockIdx.x ;
T x = matrixSumHelper(accumulator, numRows) ;
if (tid == 0) {
moments[column] = x ;
}
}
// After accumulation, we need to renormalize the moments.
//
// 1. It shoudl be called with enough threads to cover all
// numChannels in the moments.
//
// 2. The actual number of blocks is determined based on the block
// size to satisfy condition (2).
template<typename T>
__global__ void normalize_moments(T * moments,
unsigned int numChannels,
T mass,
T epsilon)
{
int unsigned i = blockIdx.x*blockDim.x+threadIdx.x;
if (i < numChannels){
// max(0, __) is for numerical issues
T mean = moments[i] / mass ;
T sigma2 = max((T).0, moments[i + numChannels]/mass - mean*mean) ;
moments[i] = mean ;
moments[i + numChannels] = sqrt(sigma2 + epsilon);
}
}
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
/* compute_ders */
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
// Same as accumulate_moments above. Call with:
//
// 1. 2*sizeof(T)*blockSize scratch space
// 2.
//
// bytes of shared scratch memory to hold to hold partial sums for
// means and sigmas.
//
// Below, either accumulator is not NULL and derMultipliers, derBiases,
// and moments are, or the function is run in a `final' mode,
// with accumulator set to NULL, and the other points set to their
// `final' destination.
template<typename T>
__global__ void accumulate_ders_partial
(T * accumulator,
T * derMultipliers,
T * derBiases,
T const * data,
T const * derOutput,
int planeArea,
int numPlanes,
int numChannels,
int numChunks)
{
int tid = threadIdx.x ;
int plane = blockIdx.x ;
int blockSize = blockDim.x ;
int planeStride = gridDim.x ;
int channel = blockIdx.x % numChannels ;
//extern __shared__ T s[] ;
SharedMemory<T> smem ;
T * s = smem.getPointer() ;
T * mdata = s ;
T * sdata = mdata + blockSize ;
mdata[tid] = 0 ;
sdata[tid] = 0 ;
while (plane < numPlanes) {
T const * planeBegin = data + plane * planeArea ;
T const * planeEnd = planeBegin + planeArea ;
T const * block = (T const*) getBlockBeginning<T>(planeBegin) + tid ;
T const * dblock = derOutput + (block - data) ;
while (block < planeEnd) {
if (block >= planeBegin) {
T x = *block ;
T dy = *dblock ;
mdata[tid] += x * dy ;
sdata[tid] += dy ;
}
block += blockSize ;
dblock += blockSize ;
}
plane += planeStride ;
}
blockReduce2<T>(sdata, mdata, tid, blockSize, planeArea);
if (tid == 0) {
if (numChannels == gridDim.x) {
// Final output ready
derMultipliers[blockIdx.x] = mdata[0];
derBiases[blockIdx.x] = sdata[0];
} else {
// Partially accumulated outut
int chunk = blockIdx.x / numChannels ;
int i = chunk + channel * numChunks ;
accumulator[i] = mdata[0]; // derMultipliers
accumulator[i + gridDim.x] = sdata[0]; // derBiases
}
}
}
template<typename T>
__global__ void accumulate_ders_finish(T * derMultipliers,
T * derBiases,
T const * accumulator,
int numChunks,
int numChannels)
{
int tid = threadIdx.x ;
int column = blockIdx.x ;
T x = matrixSumHelper(accumulator, numChunks) ;
if (tid == 0) {
// Recall that the matrix stores in order [derMultipliers derBiases means sigmas]
// containing four types of data
int type = column / numChannels ;
int channel = column % numChannels ;
if (type == 0) {
derMultipliers[channel] = x ;
}
else {
derBiases[channel] = x ;
}
}
}
template<typename T>
__global__ void normalize_ders(T * derMultipliers,
T const * derBiases,
T const * moments,
unsigned int numChannels,
T mass,
T epsilon)
{
unsigned int idx = blockIdx.x*blockDim.x+threadIdx.x;
if (idx < numChannels){
T mean = moments[idx] ;
T sigma = moments[idx + numChannels] ;
derMultipliers[idx] = (derMultipliers[idx] - mean*derBiases[idx]) / sigma ;
}
}
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
/* compute_ders_and_moments */
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
// Same as accumulate_moments above. Call with:
//
// 1. 4*sizeof(T)*blockSize scratch space
// 2.
//
// bytes of shared scratch memory to hold to hold partial sums for
// means and sigmas.
//
// Below, either accumulator is not NULL and derMultipliers, derBiases,
// and moments are, or the function is run in a `final' mode,
// with accumulator set to NULL, and the other points set to their
// `final' destination.
template<typename T>
__global__ void accumulate_ders_and_moments_partial
(T * accumulator,
T * derMultipliers,
T * derBiases,
T * moments,
T const * data,
T const * derOutput,
int planeArea,
int numPlanes,
int numChannels,
int numChunks)
{
int tid = threadIdx.x ;
int plane = blockIdx.x ;
int blockSize = blockDim.x ;
int planeStride = gridDim.x ;
int channel = blockIdx.x % numChannels ;
//extern __shared__ T s[] ;
SharedMemory<T> smem ;
T * s = smem.getPointer() ;
T * mdata = s ;
T * sdata = mdata + blockSize ;
T * rdata = sdata + blockSize ;
T * tdata = rdata + blockSize ;
mdata[tid] = 0 ;
sdata[tid] = 0 ;
rdata[tid] = 0 ;
tdata[tid] = 0 ;
while (plane < numPlanes) {
T const * planeBegin = data + plane * planeArea ;
T const * planeEnd = planeBegin + planeArea ;
T const * block = (T const*) getBlockBeginning<T>(planeBegin) + tid ;
T const * dblock = derOutput + (block - data) ;
while (block < planeEnd) {
if (block >= planeBegin) {
T x = *block ;
T dy = *dblock ;
mdata[tid] += x * dy ;
sdata[tid] += dy ;
rdata[tid] += x * x ;
tdata[tid] += x ;
}
block += blockSize ;
dblock += blockSize ;
}
plane += planeStride ;
}
blockReduce4<T>(sdata, mdata, rdata, tdata, tid, blockSize, planeArea);
if (tid == 0) {
if (numChannels == gridDim.x) {
// Final output ready
derMultipliers[blockIdx.x] = mdata[0];
derBiases[blockIdx.x] = sdata[0];
moments[blockIdx.x] = tdata[0];
moments[blockIdx.x+numChannels] = rdata[0];
} else {
// Partially accumulated outut
int chunk = blockIdx.x / numChannels ;
int i = chunk + channel * numChunks ;
accumulator[i] = mdata[0]; // derMultipliers
accumulator[i + gridDim.x] = sdata[0]; // derBiases
accumulator[i + 2*gridDim.x] = tdata[0]; // means
accumulator[i + 3*gridDim.x] = rdata[0]; // sigmas
}
}
}
template<typename T>
__global__ void accumulate_ders_and_moments_finish(T * derMultipliers,
T * derBiases,
T * moments,
T const * accumulator,
int numChunks,
int numChannels)
{
int tid = threadIdx.x ;
int column = blockIdx.x ;
T x = matrixSumHelper(accumulator, numChunks) ;
if (tid == 0) {
// Recall that the matrix stores in order [derMultipliers derBiases means sigmas]
// containing four types of data
int type = column / numChannels ;
int channel = column % numChannels ;
if (type == 0) {
derMultipliers[channel] = x ;
}
else if (type == 1) {
derBiases[channel] = x ;
}
else if (type == 2) {
moments[channel] = x ;
}
else {
moments[channel + numChannels] = x ;
}
}
}
template<typename T>
__global__ void normalize_ders_and_moments(T * derMultipliers,
T * derBiases,
T * moments,
unsigned int numChannels,
T mass,
T epsilon)
{
unsigned int idx = blockIdx.x*blockDim.x+threadIdx.x;
if (idx < numChannels){
T mean = moments[idx] / mass;
T sigma2 = max((T).0, moments[idx + numChannels]/mass - mean*mean) ;
T sigma = sqrt(sigma2 + epsilon);
moments[idx] = mean ;
moments[idx + numChannels] = sigma ;
derMultipliers[idx] = (derMultipliers[idx]-mean*derBiases[idx]) / sigma ;
}
}
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
/* forward and backward */
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
// Call this kernel like compute_moments, but it does not need a scratch sapce
template<typename T>
__global__ void batch_normalize_forward(T * outputData,
T const * moments,
T const * data,
T const * multipliers,
T const * biases,
int planeArea,
int numPlanes,
int numChannels)
{
int tid = threadIdx.x ;
int plane = blockIdx.x ;
int blockSize = blockDim.x ;
int planeStride = gridDim.x ;
int channel = blockIdx.x % numChannels ;
// Not optimized for compute capability < 1.2
T mean = moments[channel];
T sigma = moments[channel+numChannels];
T multiplier = multipliers[channel];
T bias = biases[channel];
T coefficient = multiplier / sigma ;
while (plane < numPlanes) {
T const * planeBegin = data + plane * planeArea ;
T const * planeEnd = planeBegin + planeArea ;
T const * block = (T const*) getBlockBeginning<T>(planeBegin) + tid ;
T * oblock = outputData + (block - data) ;
while (block < planeEnd) {
if (block >= planeBegin) {
*oblock = coefficient * (*block - mean) + bias ;
}
block += blockSize ;
oblock += blockSize ;
}
plane += planeStride ;
}
}
template<typename T>
__global__ void batch_normalize_backward(T * derData,
T const * moments,
T const * data,
T const * multipliers,
T const * derMultipliers,
T const * derBiases,
T const * derOutput,
int planeArea,
int numPlanes,
int numChannels,
T mass)
{
int tid = threadIdx.x ;
int plane = blockIdx.x ;
int blockSize = blockDim.x ;
int planeStride = gridDim.x ;
int channel = blockIdx.x % numChannels ;
// Not optimized for compute capability < 1.2
T mu = moments[channel];
T sigma = moments[channel + numChannels] ;
T multiplier = multipliers[channel] ;
T derMultiplier = derMultipliers[channel] ;
T muz = derBiases[channel] / mass;
T G1 = multiplier / sigma ;
T G2 = G1 * derMultiplier / (mass*sigma);
while (plane < numPlanes) {
T const * planeBegin = data + plane * planeArea ;
T const * planeEnd = planeBegin + planeArea ;
T const * block = (T const*) getBlockBeginning<T> (planeBegin) + tid ;
T const * dblock = derOutput + (block - data) ;
T * oblock = derData + (block - data) ;
while (block < planeEnd) {
if (block >= planeBegin) {
*oblock = G1 * (*dblock - muz) - G2 * (*block - mu);
}
block += blockSize ;
dblock += blockSize ;
oblock += blockSize ;
}
plane += planeStride ;
}
}
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
/* bnorm interface */
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
namespace vl { namespace impl {
template<typename T>
struct bnorm<vl::VLDT_GPU, T>
{
/* ------------------------------------------------------------ */
/* forward */
/* ------------------------------------------------------------ */
static vl::ErrorCode
forward(Context& context,
T* output,
T* moments,
T const* data,
T const* multipliers,
T const* biases,
size_t height, size_t width, size_t depth, size_t size,
T epsilon)
{
hipError_t status ;
unsigned int planeArea = height * width ;
unsigned int numPlanes = depth * size ;
// # Block size
//
// The block size is a multiple of the warp size, and generally
// as large as possible. However, we should avoid making the block
// size too much larger than the area of a plane. In fact,
// blocks process one plane at a time and would be required to discard
// a lot of work in this case.
unsigned int blockSize = getBlockSize(planeArea) ;
// Each channel is processed by one or more blocks.
// There are numChunks >= 1 blocks per channel, each working
// on a subset of one or more images. There are
//
// gridSize = numChunks * depth
//
// blocks in the grid.
//
// We select numChunks to satisfy the following constraints:
//
// 1. There must be at least one block per channel:
//
// numChunks >= 1
//
// 2. There must be at most one block per image:
//
// numChunks <= size
//
// 3. The grid size must be less than 65536 (CUDA limit)
//
// numChunks <= 65536 / depth
//
// Note that constraints (1) and (3) can be satisfied only if
// depth <= 65536. This is usually not a problem, but may fail
// in general.
//
// In general, (1--3) can be satisfied by setting numChunks=1.
// However, this is suboptimal if there are too many operations
// per block.
//
// We would like to do at most
//
// L = 10e3 * blockSize
//
// operations per block and each block does
//
// (planeArea * size)/numChunks
//
// operation. Thus the target value for numChunks is
//
// numChunks = ceil((planeArea * size) / L).
//
const unsigned int L = 10000 * blockSize ;
unsigned int numChunks = (planeArea * size + L - 1) / L ;
numChunks = min(numChunks, size) ;
numChunks = min(numChunks, 65536 / depth) ;
numChunks = max(numChunks, 1) ;
numChunks = 1 ; // <-- to be removed
unsigned int gridSize = depth * numChunks ;
assert(numChunks >= 1) ;
assert(numChunks <= size) ;
assert(gridSize <= 65536) ;
if (numChunks > 1) {
// We need:
//
// * The `accumulator[]` buffer which has size (numChunks x 2*depth) = 2*gridSize
// elements to store the partial moments.
//
// * Potentially, space for moments[], which has size 2 x depth.
unsigned int accumulatorSize = 2 * nextMultipleOf(gridSize, WARP_SIZE) ;
unsigned int workspaceSize = accumulatorSize + (moments ? 0 : 2 * depth) ;
T * workspace = (T*)context.getWorkspace(vl::VLDT_GPU, workspaceSize * sizeof(T)) ;
T * accumulator = workspace;
if (moments == NULL) {
moments = workspace + accumulatorSize ;
}
// Accumulate partial moment summations
hipLaunchKernelGGL(( accumulate_moments_partial) , dim3(gridSize), dim3(blockSize), 2*blockSize*sizeof(T), 0,
accumulator,
data,
planeArea,
numPlanes,
depth,
numChunks) ;
status = hipPeekAtLastError() ;
if (status != hipSuccess) return vl::VLE_Cuda ;
// Sum over the chunks (rows of accumulator[])
int blockSizeSum = getBlockSize(numChunks) ;
hipLaunchKernelGGL(( accumulate_moments_finish) , dim3(2*depth), dim3(blockSizeSum), blockSizeSum*sizeof(T), 0,
moments, accumulator, numChunks) ;
status = hipPeekAtLastError() ;
if (status != hipSuccess) return vl::VLE_Cuda ;
} else {
if (moments == NULL) {
moments = (T*) context.getWorkspace(vl::VLDT_GPU, 2*depth * sizeof(T)) ;
}
hipLaunchKernelGGL(( accumulate_moments_partial) , dim3(gridSize), dim3(blockSize), 2*blockSize*sizeof(T), 0,
moments,
data,
planeArea,
numPlanes,
depth,
1) ;
status = hipPeekAtLastError() ;
if (status != hipSuccess) return vl::VLE_Cuda ;
}
T mass = planeArea*size;
hipLaunchKernelGGL(( normalize_moments) , dim3(divideAndRoundUp(depth,blockSize)),dim3(blockSize), 0, 0,
moments, depth, mass, epsilon) ;
// Finally, normalize the data
hipLaunchKernelGGL(( batch_normalize_forward) , dim3(gridSize), dim3(blockSize), 0, 0,
output,
moments, data, multipliers, biases,
planeArea,
numPlanes,
depth) ;
status = hipPeekAtLastError() ;
return (status == hipSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
/* ------------------------------------------------------------ */
/* forward_given_moments */
/* ------------------------------------------------------------ */
static vl::ErrorCode
forward_given_moments(Context& context,
T* output,
T const* moments,
T const* data,
T const* multipliers,
T const* biases,
size_t height, size_t width, size_t depth, size_t size)
{
hipError_t status ;
unsigned int planeArea = height * width ;
unsigned int numPlanes = depth * size ;
unsigned int blockSize = getBlockSize(planeArea) ;
const unsigned int L = 10000 * blockSize ;
unsigned int numChunks = (planeArea * size + L - 1) / L ;
numChunks = min(numChunks, size) ;
numChunks = min(numChunks, 65536 / depth) ;
numChunks = max(numChunks, 1) ;
numChunks = 1 ; // <-- to be removed
unsigned int gridSize = depth * numChunks ;
assert(numChunks >= 1) ;
assert(numChunks <= size) ;
assert(gridSize <= 65536) ;
hipLaunchKernelGGL(( batch_normalize_forward) , dim3(gridSize), dim3(blockSize), 0, 0,
output,
moments, data, multipliers, biases,
planeArea,
numPlanes,
depth) ;
status = hipPeekAtLastError() ;
return (status == hipSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
/* ------------------------------------------------------------ */
/* backward */
/* ------------------------------------------------------------ */
static vl::ErrorCode
backward(Context& context,
T* derData,
T* derMultipliers,
T* derBiases,
T* moments,
T const* data,
T const* multipliers,
T const* biases,
T const* derOutput,
size_t height, size_t width, size_t depth, size_t size,
T epsilon)
{
hipError_t status = hipSuccess;
unsigned int planeArea = height * width ;
unsigned int numPlanes = depth * size ;
unsigned int blockSize = getBlockSize(planeArea) ;
const unsigned int L = 10000 * blockSize ;
unsigned int numChunks = (planeArea * size + L - 1) / L ;
numChunks = min(numChunks, size) ;
numChunks = min(numChunks, 65536 / depth) ;
numChunks = max(numChunks, 1) ;
numChunks = 1 ; // <-- to be removed
unsigned int gridSize = depth * numChunks ;
assert(numChunks >= 1) ;
assert(numChunks <= size) ;
assert(gridSize <= 65536) ;
if (numChunks > 1) {
unsigned int accumulatorSize = 4 * nextMultipleOf(gridSize, WARP_SIZE) ;
unsigned int workspaceSize = accumulatorSize + (moments ? 0 : 2 * depth) ;
T * workspace = (T*)context.getWorkspace(vl::VLDT_GPU, workspaceSize * sizeof(T)) ;
T * accumulator = workspace;
if (moments == NULL) {
moments = workspace + accumulatorSize ;
}
status = hipPeekAtLastError() ;
if (status != hipSuccess) return vl::VLE_Cuda ;
// Mean, variance, derMultipliers and derBiases computation
hipLaunchKernelGGL(( accumulate_ders_and_moments_partial<T>) , dim3(gridSize), dim3(blockSize), 4*blockSize*sizeof(T), 0,
accumulator,
NULL, NULL, NULL,
data,
derOutput,
planeArea,
numPlanes,
depth,
numChunks) ;
status = hipPeekAtLastError() ;
if (status != hipSuccess) return vl::VLE_Cuda ;
// Sum over the chunks (rows of accumulator[])
int blockSizeSum = getBlockSize(numChunks) ;
hipLaunchKernelGGL(( accumulate_ders_and_moments_finish<T>) , dim3(4*depth), dim3(blockSizeSum), blockSizeSum*sizeof(T), 0,
derMultipliers, derBiases, moments, accumulator, numChunks, depth) ;
status = hipPeekAtLastError() ;
if (status != hipSuccess) return vl::VLE_Cuda ;
} else {
if (moments == NULL) {
moments = (T*) context.getWorkspace(vl::VLDT_GPU, 2*depth * sizeof(T)) ;
}
hipLaunchKernelGGL(( accumulate_ders_and_moments_partial<T>) , dim3(gridSize), dim3(blockSize), 4*blockSize*sizeof(T), 0,
NULL,
derMultipliers, derBiases, moments,
data,
derOutput,
planeArea,
numPlanes,
depth,
1) ;
status = hipPeekAtLastError() ;
if (status != hipSuccess) return vl::VLE_Cuda ;
}
T mass = planeArea*size;
hipLaunchKernelGGL(( normalize_ders_and_moments<T>) , dim3(divideAndRoundUp(depth,blockSize)),dim3(blockSize), 0, 0,
derMultipliers, derBiases, moments, depth, mass, epsilon) ;
// Compute output
hipLaunchKernelGGL(( batch_normalize_backward<T>) , dim3(gridSize), dim3(blockSize), 0, 0,
derData,
moments, data,
multipliers, derMultipliers, derBiases, derOutput,
planeArea, numPlanes, depth,
mass) ;
status = hipPeekAtLastError() ;
return (status == hipSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
/* ------------------------------------------------------------ */
/* backward_given_moments */
/* ------------------------------------------------------------ */
static vl::ErrorCode
backward_given_moments(Context& context,
T* derData,
T* derMultipliers,
T* derBiases,
T const* moments,
T const* data,
T const* multipliers,
T const* biases,
T const* derOutput,
size_t height, size_t width, size_t depth, size_t size,
T epsilon)
{
hipError_t status;
unsigned int planeArea = height * width ;
unsigned int numPlanes = depth * size ;
unsigned int blockSize = getBlockSize(planeArea) ;
const unsigned int L = 10000 * blockSize ;
unsigned int numChunks = (planeArea * size + L - 1) / L ;
numChunks = min(numChunks, size) ;
numChunks = min(numChunks, 65536 / depth) ;
numChunks = max(numChunks, 1) ;
numChunks = 1 ; // <-- to be removed
unsigned int gridSize = depth * numChunks ;
assert(numChunks >= 1) ;
assert(numChunks <= size) ;
assert(gridSize <= 65536) ;
if (numChunks > 1) {
unsigned int workspaceSize = 2 * nextMultipleOf(gridSize, WARP_SIZE) ;
T * accumulator = (T*)context.getWorkspace(vl::VLDT_GPU, workspaceSize * sizeof(T)) ;
status = hipPeekAtLastError() ;
if (status != hipSuccess) return vl::VLE_Cuda ;
// Mean, variance, derMultipliers and derBiases computation
hipLaunchKernelGGL(( accumulate_ders_partial<T>) , dim3(gridSize), dim3(blockSize), 2*blockSize*sizeof(T), 0,
accumulator,
NULL, NULL,
data,
derOutput,
planeArea,
numPlanes,
depth,
numChunks) ;
status = hipPeekAtLastError() ;
if (status != hipSuccess) return vl::VLE_Cuda ;
// Sum over the chunks (rows of accumulator[])
int blockSizeSum = getBlockSize(numChunks) ;
hipLaunchKernelGGL(( accumulate_ders_finish<T>) , dim3(2*depth), dim3(blockSizeSum), blockSizeSum*sizeof(T), 0,
derMultipliers, derBiases, accumulator, numChunks, depth) ;
status = hipPeekAtLastError() ;
if (status != hipSuccess) return vl::VLE_Cuda ;
} else {
hipLaunchKernelGGL(( accumulate_ders_partial<T>) , dim3(gridSize), dim3(blockSize), 2*blockSize*sizeof(T), 0,
NULL,
derMultipliers, derBiases,
data,
derOutput,
planeArea,
numPlanes,
depth,
1) ;
status = hipPeekAtLastError() ;
if (status != hipSuccess) return vl::VLE_Cuda ;
}
T mass = planeArea*size;
hipLaunchKernelGGL(( normalize_ders<T>) , dim3(divideAndRoundUp(depth,blockSize)),dim3(blockSize), 0, 0,
derMultipliers, derBiases, moments, depth, mass, epsilon) ;
// Compute output
hipLaunchKernelGGL(( batch_normalize_backward<T>) , dim3(gridSize), dim3(blockSize), 0, 0,
derData,
moments, data,
multipliers, derMultipliers, derBiases, derOutput,
planeArea, numPlanes, depth,
mass) ;
status = hipPeekAtLastError() ;
return (status == hipSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
} ; // struct bnorm
} } // namespace vl::impl
template struct vl::impl::bnorm<vl::VLDT_GPU, float> ;
#ifdef ENABLE_DOUBLE
template struct vl::impl::bnorm<vl::VLDT_GPU, double> ;
#endif
| 1cde857f6ce472e820a1f25aa743145907523d56.cu | // @file bnorm_gpu.cu
// @brief Batch normalization implementation (GPU)
// @author Sebastien Ehrhardt
// @author Andrea Vedaldi
/*
Copyright (C) 2015-16 Sebastien Ehrhardt and Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "bnorm.hpp"
#include "../datacu.hpp"
#include "blashelper.hpp"
#include "sharedmem.cuh"
#include <assert.h>
#include <float.h>
#include <stdint.h>
// MSB_WARP = log2(WARP_SIZE)
#define WARP_SIZE 32
#define MSB_WARP 5
// macro function
#define min(a,b) (a > b ? b : a);
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
/* Helpers */
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
static inline int getBlockSize(int dataSize)
{
int blockSize = VL_CUDA_NUM_THREADS / 2 ;
if (dataSize < blockSize) {
unsigned int numWarps = dataSize / WARP_SIZE ;
if (numWarps < 4) {
blockSize = 2 * WARP_SIZE ;
}
else if (numWarps < 8) {
blockSize = 4 * WARP_SIZE ;
}
else {
blockSize = 8 * WARP_SIZE ;
}
}
return blockSize ;
}
// get the smallest x which is a multiple of factor
static inline int nextMultipleOf(int x, int factor)
{
return factor * ((x + factor - 1)/factor) ;
}
/*
# Reduction over the whole batch
`bnorm` works by accumulating statistics over planes (channels) and
images in a batch. It then uses these statistics to renormalize the values.
Summing over plens efficiently over planes is a little complex on the GPU.
What we have are threads, block of threads, and a grid of blocks:
* Warps (up to 32 threads). Highly coupled, and in fact *coalesced* and
run essentially in a single stream of vector instructions on the GPU,
which also means that they stay syncrhonized implicitly.
* Blocks (up to 512 threads). Blocks are assigned to a SM, and the SM
breaks them down into warps for execution. Threads in the same block
can be synchronised explicity using __syncthreads(). They all run
concurrently in the same SM.
* Grid. A grid is an array of blocks that are scheduled onto multiple SMs.
Threads in a grid can only be synchronised implicitly at the end of a kernel.
Given these constraints, we explain next how operations are mapped to the
blocks and the threads.
The input data is organised in SIZE images, each of which is composed of
DEPTH planes. The goal is to compute the mean and std deviation of each
plane (across images). In the follwing diagram, planes are enumerated
from left to right and top to bottom, first listing all the planes for
one image (a row) and then subsequent images (in different rows).
+-------+ +-------+ +-------+ +-------+
|plane 1| |p 2 | |p 3 | |p 4 | numPlanes = 12
|ch 1 | |c 2 | |c 3 | |c 4 | depth = 4
|image 1| |i 1 | |i 1 | |i 1 | planeArea = 28
+---+block 1| |b 2 | |b 3 | |b 4 | planeStride = gridSize = 8
| +-------+ +-------+ +-------+ +-------+
|
| +-------+ +-------+ +-------+ +-------+
| |p 5 | |p 6 | |p 7 | |p 8 |
| |c 1 | |c 2 | |c 3 | |c 4 |
| |i 2 | |i 2 | |i 2 | |i 2 |
| |b 5 | |b 6 | |b 7 | |b 8 |
| +-------+ +-------+ +-------+ +-------+
|
| +-------+ +-------+ +-------+ +-------+
| |p 9 | |p 10 | |p 11 | |p 12 |
| |c 1 | |c 2 | |c 3 | |c 4 |
| |i 3 | |i 3 | |i 3 | |i 3 |
+-->+b 1 | |b 2 | |b 3 | |b 4 |
+-------+ +-------+ +-------+ +-------+
We create a certain number of thread blocks. Call this number gridSize.
Each block operates (sums) over a certain number of planes, with
subsequent blocks taking over subsequent planes.
Since there may be less blocks than planes overall, a single block
does more than one plane in general but skips over the ones that are
already processed by neighbour blocks. In the example, the thread block 1
integrates plane 1 and plane 9).
It is important to optimise how blocks access memory. This is organised
in three phases:
1. Blocks accumulate in a shared scratch space (of blockSize elements,
for each block) partial sums. In this manner, the scratch space of each block
contains the statistics for a particular plane (feature channels) and subset
of the images.
2. Blocks reduce the data in their scratch space using within-block reduction.
3. This is still a partial result as blocks do not do in general all the images.
A last pass accumulates the outputs from the individual blocks.
# Sliding-window accumulation
As blocks accumulate over different planes and images and these are not
necessarily aligned to nice memory boundaries, the problem is how to make
computations efficient.
The trick is to look at the block as a jumping window, sliding over the memory
that needs to be summed, but always aligned at good block boundaries. This means
that occasionally threads in a block will access some data that needs to be discarded.
+-------+ +-------+ +-------+ +-------+ aligned blocks (with two warps each)
| : | | : | | : | | : | covering the data
+-------+ +-------+ +-------+ +-------+
+-------------+ +-------------+ data to sum
+-------------------------------------------------------->
increasing memory addresses
As each block slides over the data, it accumulates partial results
in a scratch buffer which has a number of elememts equal to the block size.
Evenetually, block-level reduction is performed on this scratch buffer
to get the total.
# Per-block reduction
Use a block of blockSize threads to accumulate all the values in the
shared array mdata[], which has blockSize elements:
mdata[0] <- mdata[0] + mdata[1] + ... + mdata[blockSize-1]
blockSize is a power of two and less than the maxmimum allowed block
size (usually 512). mdata[] has to be padded with zeros to allow
summation over vectors whose dimension is less than blockSize.
This is done as follows:
1. First, the first half of the threads in the block accumulate
the second half of mdata in the first half:
tid=0: mdata[0] = mdata[0] + mdata[blockSize/2]
...
tid=blockSize/2-1: mdata[blockSize/2-1] = mdata[blockSize/2-1] + mdata[blockSize-1]
Note that half of the threads are idle
2. Then, the first quarter of the threads reduce the result further:
tid=0: mdata[0] = mdata[0] + mdata[blockSize/4]
...
tid=blockSize/4-1: mdata[blockSize/4-1] = mdata[blockSize/4-1] + mdata[blockSize/2-1]
3. This continues until only tid=0 operates:
tid=0: mdata[0] = mdata[0] + mdata[1]
This is further divded into two regimes. In the first regime, tid
may span threads in the same block but different warps. Here
the code must be explicitly snychronized.
In the second regime, tid < WARP_SIZE, and synchronization is not
required as threads are coalesced.
*/
template<typename T>
__forceinline__ __device__ void blockReduce(volatile T * mdata,
unsigned int tid,
unsigned int blockSize,
unsigned int maxDataSize)
{
// todo: get rid of maxDataSize?
__syncthreads();
if (blockSize >= 1024 && maxDataSize + WARP_SIZE >=512) { if (tid < 512) { mdata[tid] += mdata[tid + 512]; } __syncthreads(); } // mdata[0:511] = mdata[0:511] + mdata[512:1023]
if (blockSize >= 512 && maxDataSize + WARP_SIZE >=256) { if (tid < 256) { mdata[tid] += mdata[tid + 256]; } __syncthreads(); } // mdata[0:255] = mdata[0:255] + mdata[256:511]
if (blockSize >= 256 && maxDataSize + WARP_SIZE >=128) { if (tid < 128) { mdata[tid] += mdata[tid + 128]; } __syncthreads(); } // mdata[0:127] = mdata[0:127] + mdata[128:255]
if (blockSize >= 128 && maxDataSize + WARP_SIZE >=64 ) { if (tid < 64) { mdata[tid] += mdata[tid + 64]; } __syncthreads(); } // mdata[0:63] = mdata[0:63] + mdata[64:127]
if (tid < 32) {
// now enter warp
if (blockSize >= 64) { mdata[tid] += mdata[tid + 32]; } // mdata[0:31] = mdata[0:31] + mdata[32:63]
if (blockSize >= 32) { mdata[tid] += mdata[tid + 16]; } // mdata[0:15] = mdata[0:15] + mdata[16:31]
if (blockSize >= 16) { mdata[tid] += mdata[tid + 8]; } // mdata[0:7] = mdata[0:7] + mdata[7:15]
if (blockSize >= 8) { mdata[tid] += mdata[tid + 4]; } // mdata[0:3] = mdata[0:3] + mdata[4:7]
if (blockSize >= 4) { mdata[tid] += mdata[tid + 2]; } // mdata[0:1] = mdata[0:1] + mdata[2:3]
if (blockSize >= 2) { mdata[tid] += mdata[tid + 1]; } // mdata[0] = mdata[0] + mdata[1]
}
}
template<typename T>
__forceinline__ __device__ void blockReduce2(volatile T * mdata,
volatile T * sdata,
unsigned int tid,
unsigned int blockSize,
unsigned int maxDataSize)
{
__syncthreads();
if (blockSize >= 1024 && maxDataSize + WARP_SIZE >=512) { if (tid < 512) { sdata[tid] += sdata[tid + 512]; mdata[tid] += mdata[tid + 512]; } __syncthreads(); }
if (blockSize >= 512 && maxDataSize + WARP_SIZE >=256) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; mdata[tid] += mdata[tid + 256]; } __syncthreads(); }
if (blockSize >= 256 && maxDataSize + WARP_SIZE >=128) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; mdata[tid] += mdata[tid + 128]; } __syncthreads(); }
if (blockSize >= 128 && maxDataSize + WARP_SIZE >=64) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; mdata[tid] += mdata[tid + 64]; } __syncthreads(); }
if (tid < 32) {
if (blockSize >= 64) { sdata[tid] += sdata[tid + 32]; mdata[tid] += mdata[tid + 32]; }
if (blockSize >= 32) { sdata[tid] += sdata[tid + 16]; mdata[tid] += mdata[tid + 16]; }
if (blockSize >= 16) { sdata[tid] += sdata[tid + 8]; mdata[tid] += mdata[tid + 8]; }
if (blockSize >= 8) { sdata[tid] += sdata[tid + 4]; mdata[tid] += mdata[tid + 4]; }
if (blockSize >= 4) { sdata[tid] += sdata[tid + 2]; mdata[tid] += mdata[tid + 2]; }
if (blockSize >= 2) { sdata[tid] += sdata[tid + 1]; mdata[tid] += mdata[tid + 1]; }
}
}
template<typename T>
__forceinline__ __device__ void blockReduce4(volatile T * sdata,
volatile T * mdata,
volatile T * rdata,
volatile T * tdata,
unsigned int tid,
unsigned int blockSize,
unsigned int maxDataSize)
{
__syncthreads();
if (blockSize >= 1024 && maxDataSize + WARP_SIZE >= 512) { if (tid < 512) { sdata[tid] += sdata[tid + 512]; mdata[tid] += mdata[tid + 512]; rdata[tid] += rdata[tid + 512]; tdata[tid] += tdata[tid + 512];} __syncthreads(); }
if (blockSize >= 512 && maxDataSize + WARP_SIZE >= 256) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; mdata[tid] += mdata[tid + 256]; rdata[tid] += rdata[tid + 256]; tdata[tid] += tdata[tid + 256];} __syncthreads(); }
if (blockSize >= 256 && maxDataSize + WARP_SIZE >= 128) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; mdata[tid] += mdata[tid + 128]; rdata[tid] += rdata[tid + 128]; tdata[tid] += tdata[tid + 128];} __syncthreads(); }
if (blockSize >= 128 && maxDataSize + WARP_SIZE >= 64) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; mdata[tid] += mdata[tid + 64]; rdata[tid] += rdata[tid + 64]; tdata[tid] += tdata[tid + 64];} __syncthreads(); }
if (tid < 32) {
if (blockSize >= 64) { sdata[tid] += sdata[tid + 32]; mdata[tid] += mdata[tid + 32]; rdata[tid] += rdata[tid + 32]; tdata[tid] += tdata[tid + 32];}
if (blockSize >= 32) { sdata[tid] += sdata[tid + 16]; mdata[tid] += mdata[tid + 16]; rdata[tid] += rdata[tid + 16]; tdata[tid] += tdata[tid + 16];}
if (blockSize >= 16) { sdata[tid] += sdata[tid + 8]; mdata[tid] += mdata[tid + 8]; rdata[tid] += rdata[tid + 8]; tdata[tid] += tdata[tid + 8];}
if (blockSize >= 8) { sdata[tid] += sdata[tid + 4]; mdata[tid] += mdata[tid + 4]; rdata[tid] += rdata[tid + 4]; tdata[tid] += tdata[tid + 4];}
if (blockSize >= 4) { sdata[tid] += sdata[tid + 2]; mdata[tid] += mdata[tid + 2]; rdata[tid] += rdata[tid + 2]; tdata[tid] += tdata[tid + 2];}
if (blockSize >= 2) { sdata[tid] += sdata[tid + 1]; mdata[tid] += mdata[tid + 1]; rdata[tid] += rdata[tid + 1]; tdata[tid] += tdata[tid + 1];}
}
}
// Get largest memory address that is aligned to a warp worth of T
// and smaller than x.
template<typename T>
__forceinline__ __device__ uintptr_t getBlockBeginning(void const * x)
{
return (uintptr_t)(x) & (~((uintptr_t)(WARP_SIZE*sizeof(T)) - 1)) ;
}
// Use the current block of thread to sum over a given column of a matrix. The selected
// column is given by the thread block index in the block grid.
//
// This function uses an amoutn of scratch memory equal to blockSize*sizeof(T)
// where blockSize=blockDim.x.
template<typename T>
__forceinline__ __device__ T matrixSumHelper(T const * matrix, int numRows)
{
// One thread block per column to sum
// Shared memory is per-block, it holds blockSize intermediate reults
//extern __shared__ T scratch [] ;
SharedMemory<T> smem ;
T * scratch = smem.getPointer() ;
int tid = threadIdx.x ;
int column = blockIdx.x ;
int blockSize = blockDim.x ;
// Note that scratch is different for different blocks, hence
// matrix columns. Now fill scratch with partial sums, in a sliding-window
// manner.
scratch[tid] = 0 ;
T const * columnBegin = matrix + column * numRows ;
T const * columnEnd = columnBegin + numRows ;
T const * block = (T const*) getBlockBeginning<T>(columnBegin) + tid ;
while (block < columnEnd) {
if (block >= columnBegin) {
scratch[tid] += *block ;
}
block += blockSize ;
}
// Now scratch[] has blockSize partial sums for this column
// Finish by reducing and saving
blockReduce<T>(scratch, tid, blockSize, numRows) ;
return scratch[0] ;
}
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
/* compute_moments */
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
// This kernel accumulates means and variances for the data.
// Each block of thread sums over one or more data planes, resulting
// in an array accumulator[] of dimension numChunks x 2*numChannels.
//
// If each thread block scans all the images, then numChunks = 1.
// However, for efficiency different thread blocks do different
// subset of images, resulting in numChunks partial results to be summed
// later by a second kernel.
//
// The first part accumulator[:,0:numChannels-1] stores the data for the mean
// and the second part accumulator[:,numChannels,2*numChannels-1] the data
// for the sigmas.
//
// This function uses the sliding-window summing technique described
// above. It requires
//
// 2*sizeof(T)*blockSize
//
// bytes of shared scratch memory to hold to hold partial sums for
// means and sigmas.
template<typename T>
__global__ void accumulate_moments_partial(T * accumulator,
T const * data,
int planeArea,
int numPlanes,
int numChannels,
int numChunks)
{
int tid = threadIdx.x ;
int plane = blockIdx.x ;
int blockSize = blockDim.x ;
int planeStride = gridDim.x ;
int channel = blockIdx.x % numChannels ;
//extern __shared__ T s [] ;
SharedMemory<T> smem ;
T * s = smem.getPointer() ;
T * mdata = s ;
T * sdata = mdata + blockSize ;
mdata[tid] = 0 ;
sdata[tid] = 0 ;
while (plane < numPlanes) {
T const * planeBegin = data + plane * planeArea ;
T const * planeEnd = planeBegin + planeArea ;
T const * block = (T const*) getBlockBeginning<T>(planeBegin) + tid ;
while (block < planeEnd) {
if (block >= planeBegin) {
T x = *block ;
mdata[tid] += x ;
sdata[tid] += x * x ;
}
block += blockSize ;
}
plane += planeStride ;
}
blockReduce2<T>(sdata, mdata, tid, blockSize, planeArea) ;
if (tid == 0) {
int chunk = blockIdx.x / numChannels ;
int i = chunk + channel * numChunks ;
accumulator[i] = mdata[0];
accumulator[i + gridDim.x] = sdata[0];
}
}
// This kernel sums over the accumulator computed by the function
// above to obtain the moments.
//
// This kernel uses matrixSumHelper() defined above. Hence:
//
// 1. The block grid must be set to have a block
// for each column of accumulator[]. There are here 2*numChannels columns.
//
// 2. There can be any (reasonable) blockSize. Blocks will iterate
// over rows as needed to compte the operation.
//
// 3. It must be called with `blockSize*sizeof(T)` shared
// scratch space.
template<typename T>
__global__ void accumulate_moments_finish(T * moments,
T const * accumulator,
int numRows)
{
int tid = threadIdx.x ;
int column = blockIdx.x ;
T x = matrixSumHelper(accumulator, numRows) ;
if (tid == 0) {
moments[column] = x ;
}
}
// After accumulation, we need to renormalize the moments.
//
// 1. It shoudl be called with enough threads to cover all
// numChannels in the moments.
//
// 2. The actual number of blocks is determined based on the block
// size to satisfy condition (2).
template<typename T>
__global__ void normalize_moments(T * moments,
unsigned int numChannels,
T mass,
T epsilon)
{
int unsigned i = blockIdx.x*blockDim.x+threadIdx.x;
if (i < numChannels){
// max(0, __) is for numerical issues
T mean = moments[i] / mass ;
T sigma2 = max((T).0, moments[i + numChannels]/mass - mean*mean) ;
moments[i] = mean ;
moments[i + numChannels] = sqrt(sigma2 + epsilon);
}
}
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
/* compute_ders */
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
// Same as accumulate_moments above. Call with:
//
// 1. 2*sizeof(T)*blockSize scratch space
// 2.
//
// bytes of shared scratch memory to hold to hold partial sums for
// means and sigmas.
//
// Below, either accumulator is not NULL and derMultipliers, derBiases,
// and moments are, or the function is run in a `final' mode,
// with accumulator set to NULL, and the other points set to their
// `final' destination.
template<typename T>
__global__ void accumulate_ders_partial
(T * accumulator,
T * derMultipliers,
T * derBiases,
T const * data,
T const * derOutput,
int planeArea,
int numPlanes,
int numChannels,
int numChunks)
{
int tid = threadIdx.x ;
int plane = blockIdx.x ;
int blockSize = blockDim.x ;
int planeStride = gridDim.x ;
int channel = blockIdx.x % numChannels ;
//extern __shared__ T s[] ;
SharedMemory<T> smem ;
T * s = smem.getPointer() ;
T * mdata = s ;
T * sdata = mdata + blockSize ;
mdata[tid] = 0 ;
sdata[tid] = 0 ;
while (plane < numPlanes) {
T const * planeBegin = data + plane * planeArea ;
T const * planeEnd = planeBegin + planeArea ;
T const * block = (T const*) getBlockBeginning<T>(planeBegin) + tid ;
T const * dblock = derOutput + (block - data) ;
while (block < planeEnd) {
if (block >= planeBegin) {
T x = *block ;
T dy = *dblock ;
mdata[tid] += x * dy ;
sdata[tid] += dy ;
}
block += blockSize ;
dblock += blockSize ;
}
plane += planeStride ;
}
blockReduce2<T>(sdata, mdata, tid, blockSize, planeArea);
if (tid == 0) {
if (numChannels == gridDim.x) {
// Final output ready
derMultipliers[blockIdx.x] = mdata[0];
derBiases[blockIdx.x] = sdata[0];
} else {
// Partially accumulated outut
int chunk = blockIdx.x / numChannels ;
int i = chunk + channel * numChunks ;
accumulator[i] = mdata[0]; // derMultipliers
accumulator[i + gridDim.x] = sdata[0]; // derBiases
}
}
}
template<typename T>
__global__ void accumulate_ders_finish(T * derMultipliers,
T * derBiases,
T const * accumulator,
int numChunks,
int numChannels)
{
int tid = threadIdx.x ;
int column = blockIdx.x ;
T x = matrixSumHelper(accumulator, numChunks) ;
if (tid == 0) {
// Recall that the matrix stores in order [derMultipliers derBiases means sigmas]
// containing four types of data
int type = column / numChannels ;
int channel = column % numChannels ;
if (type == 0) {
derMultipliers[channel] = x ;
}
else {
derBiases[channel] = x ;
}
}
}
template<typename T>
__global__ void normalize_ders(T * derMultipliers,
T const * derBiases,
T const * moments,
unsigned int numChannels,
T mass,
T epsilon)
{
unsigned int idx = blockIdx.x*blockDim.x+threadIdx.x;
if (idx < numChannels){
T mean = moments[idx] ;
T sigma = moments[idx + numChannels] ;
derMultipliers[idx] = (derMultipliers[idx] - mean*derBiases[idx]) / sigma ;
}
}
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
/* compute_ders_and_moments */
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
// Same as accumulate_moments above. Call with:
//
// 1. 4*sizeof(T)*blockSize scratch space
// 2.
//
// bytes of shared scratch memory to hold to hold partial sums for
// means and sigmas.
//
// Below, either accumulator is not NULL and derMultipliers, derBiases,
// and moments are, or the function is run in a `final' mode,
// with accumulator set to NULL, and the other points set to their
// `final' destination.
template<typename T>
__global__ void accumulate_ders_and_moments_partial
(T * accumulator,
T * derMultipliers,
T * derBiases,
T * moments,
T const * data,
T const * derOutput,
int planeArea,
int numPlanes,
int numChannels,
int numChunks)
{
int tid = threadIdx.x ;
int plane = blockIdx.x ;
int blockSize = blockDim.x ;
int planeStride = gridDim.x ;
int channel = blockIdx.x % numChannels ;
//extern __shared__ T s[] ;
SharedMemory<T> smem ;
T * s = smem.getPointer() ;
T * mdata = s ;
T * sdata = mdata + blockSize ;
T * rdata = sdata + blockSize ;
T * tdata = rdata + blockSize ;
mdata[tid] = 0 ;
sdata[tid] = 0 ;
rdata[tid] = 0 ;
tdata[tid] = 0 ;
while (plane < numPlanes) {
T const * planeBegin = data + plane * planeArea ;
T const * planeEnd = planeBegin + planeArea ;
T const * block = (T const*) getBlockBeginning<T>(planeBegin) + tid ;
T const * dblock = derOutput + (block - data) ;
while (block < planeEnd) {
if (block >= planeBegin) {
T x = *block ;
T dy = *dblock ;
mdata[tid] += x * dy ;
sdata[tid] += dy ;
rdata[tid] += x * x ;
tdata[tid] += x ;
}
block += blockSize ;
dblock += blockSize ;
}
plane += planeStride ;
}
blockReduce4<T>(sdata, mdata, rdata, tdata, tid, blockSize, planeArea);
if (tid == 0) {
if (numChannels == gridDim.x) {
// Final output ready
derMultipliers[blockIdx.x] = mdata[0];
derBiases[blockIdx.x] = sdata[0];
moments[blockIdx.x] = tdata[0];
moments[blockIdx.x+numChannels] = rdata[0];
} else {
// Partially accumulated outut
int chunk = blockIdx.x / numChannels ;
int i = chunk + channel * numChunks ;
accumulator[i] = mdata[0]; // derMultipliers
accumulator[i + gridDim.x] = sdata[0]; // derBiases
accumulator[i + 2*gridDim.x] = tdata[0]; // means
accumulator[i + 3*gridDim.x] = rdata[0]; // sigmas
}
}
}
template<typename T>
__global__ void accumulate_ders_and_moments_finish(T * derMultipliers,
T * derBiases,
T * moments,
T const * accumulator,
int numChunks,
int numChannels)
{
int tid = threadIdx.x ;
int column = blockIdx.x ;
T x = matrixSumHelper(accumulator, numChunks) ;
if (tid == 0) {
// Recall that the matrix stores in order [derMultipliers derBiases means sigmas]
// containing four types of data
int type = column / numChannels ;
int channel = column % numChannels ;
if (type == 0) {
derMultipliers[channel] = x ;
}
else if (type == 1) {
derBiases[channel] = x ;
}
else if (type == 2) {
moments[channel] = x ;
}
else {
moments[channel + numChannels] = x ;
}
}
}
template<typename T>
__global__ void normalize_ders_and_moments(T * derMultipliers,
T * derBiases,
T * moments,
unsigned int numChannels,
T mass,
T epsilon)
{
unsigned int idx = blockIdx.x*blockDim.x+threadIdx.x;
if (idx < numChannels){
T mean = moments[idx] / mass;
T sigma2 = max((T).0, moments[idx + numChannels]/mass - mean*mean) ;
T sigma = sqrt(sigma2 + epsilon);
moments[idx] = mean ;
moments[idx + numChannels] = sigma ;
derMultipliers[idx] = (derMultipliers[idx]-mean*derBiases[idx]) / sigma ;
}
}
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
/* forward and backward */
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
// Call this kernel like compute_moments, but it does not need a scratch sapce
template<typename T>
__global__ void batch_normalize_forward(T * outputData,
T const * moments,
T const * data,
T const * multipliers,
T const * biases,
int planeArea,
int numPlanes,
int numChannels)
{
int tid = threadIdx.x ;
int plane = blockIdx.x ;
int blockSize = blockDim.x ;
int planeStride = gridDim.x ;
int channel = blockIdx.x % numChannels ;
// Not optimized for compute capability < 1.2
T mean = moments[channel];
T sigma = moments[channel+numChannels];
T multiplier = multipliers[channel];
T bias = biases[channel];
T coefficient = multiplier / sigma ;
while (plane < numPlanes) {
T const * planeBegin = data + plane * planeArea ;
T const * planeEnd = planeBegin + planeArea ;
T const * block = (T const*) getBlockBeginning<T>(planeBegin) + tid ;
T * oblock = outputData + (block - data) ;
while (block < planeEnd) {
if (block >= planeBegin) {
*oblock = coefficient * (*block - mean) + bias ;
}
block += blockSize ;
oblock += blockSize ;
}
plane += planeStride ;
}
}
template<typename T>
__global__ void batch_normalize_backward(T * derData,
T const * moments,
T const * data,
T const * multipliers,
T const * derMultipliers,
T const * derBiases,
T const * derOutput,
int planeArea,
int numPlanes,
int numChannels,
T mass)
{
int tid = threadIdx.x ;
int plane = blockIdx.x ;
int blockSize = blockDim.x ;
int planeStride = gridDim.x ;
int channel = blockIdx.x % numChannels ;
// Not optimized for compute capability < 1.2
T mu = moments[channel];
T sigma = moments[channel + numChannels] ;
T multiplier = multipliers[channel] ;
T derMultiplier = derMultipliers[channel] ;
T muz = derBiases[channel] / mass;
T G1 = multiplier / sigma ;
T G2 = G1 * derMultiplier / (mass*sigma);
while (plane < numPlanes) {
T const * planeBegin = data + plane * planeArea ;
T const * planeEnd = planeBegin + planeArea ;
T const * block = (T const*) getBlockBeginning<T> (planeBegin) + tid ;
T const * dblock = derOutput + (block - data) ;
T * oblock = derData + (block - data) ;
while (block < planeEnd) {
if (block >= planeBegin) {
*oblock = G1 * (*dblock - muz) - G2 * (*block - mu);
}
block += blockSize ;
dblock += blockSize ;
oblock += blockSize ;
}
plane += planeStride ;
}
}
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
/* bnorm interface */
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
namespace vl { namespace impl {
template<typename T>
struct bnorm<vl::VLDT_GPU, T>
{
/* ------------------------------------------------------------ */
/* forward */
/* ------------------------------------------------------------ */
static vl::ErrorCode
forward(Context& context,
T* output,
T* moments,
T const* data,
T const* multipliers,
T const* biases,
size_t height, size_t width, size_t depth, size_t size,
T epsilon)
{
cudaError_t status ;
unsigned int planeArea = height * width ;
unsigned int numPlanes = depth * size ;
// # Block size
//
// The block size is a multiple of the warp size, and generally
// as large as possible. However, we should avoid making the block
// size too much larger than the area of a plane. In fact,
// blocks process one plane at a time and would be required to discard
// a lot of work in this case.
unsigned int blockSize = getBlockSize(planeArea) ;
// Each channel is processed by one or more blocks.
// There are numChunks >= 1 blocks per channel, each working
// on a subset of one or more images. There are
//
// gridSize = numChunks * depth
//
// blocks in the grid.
//
// We select numChunks to satisfy the following constraints:
//
// 1. There must be at least one block per channel:
//
// numChunks >= 1
//
// 2. There must be at most one block per image:
//
// numChunks <= size
//
// 3. The grid size must be less than 65536 (CUDA limit)
//
// numChunks <= 65536 / depth
//
// Note that constraints (1) and (3) can be satisfied only if
// depth <= 65536. This is usually not a problem, but may fail
// in general.
//
// In general, (1--3) can be satisfied by setting numChunks=1.
// However, this is suboptimal if there are too many operations
// per block.
//
// We would like to do at most
//
// L = 10e3 * blockSize
//
// operations per block and each block does
//
// (planeArea * size)/numChunks
//
// operation. Thus the target value for numChunks is
//
// numChunks = ceil((planeArea * size) / L).
//
const unsigned int L = 10000 * blockSize ;
unsigned int numChunks = (planeArea * size + L - 1) / L ;
numChunks = min(numChunks, size) ;
numChunks = min(numChunks, 65536 / depth) ;
numChunks = max(numChunks, 1) ;
numChunks = 1 ; // <-- to be removed
unsigned int gridSize = depth * numChunks ;
assert(numChunks >= 1) ;
assert(numChunks <= size) ;
assert(gridSize <= 65536) ;
if (numChunks > 1) {
// We need:
//
// * The `accumulator[]` buffer which has size (numChunks x 2*depth) = 2*gridSize
// elements to store the partial moments.
//
// * Potentially, space for moments[], which has size 2 x depth.
unsigned int accumulatorSize = 2 * nextMultipleOf(gridSize, WARP_SIZE) ;
unsigned int workspaceSize = accumulatorSize + (moments ? 0 : 2 * depth) ;
T * workspace = (T*)context.getWorkspace(vl::VLDT_GPU, workspaceSize * sizeof(T)) ;
T * accumulator = workspace;
if (moments == NULL) {
moments = workspace + accumulatorSize ;
}
// Accumulate partial moment summations
accumulate_moments_partial <<<gridSize, blockSize, 2*blockSize*sizeof(T)>>>
(accumulator,
data,
planeArea,
numPlanes,
depth,
numChunks) ;
status = cudaPeekAtLastError() ;
if (status != cudaSuccess) return vl::VLE_Cuda ;
// Sum over the chunks (rows of accumulator[])
int blockSizeSum = getBlockSize(numChunks) ;
accumulate_moments_finish <<<2*depth, blockSizeSum, blockSizeSum*sizeof(T)>>>
(moments, accumulator, numChunks) ;
status = cudaPeekAtLastError() ;
if (status != cudaSuccess) return vl::VLE_Cuda ;
} else {
if (moments == NULL) {
moments = (T*) context.getWorkspace(vl::VLDT_GPU, 2*depth * sizeof(T)) ;
}
accumulate_moments_partial <<<gridSize, blockSize, 2*blockSize*sizeof(T)>>>
(moments,
data,
planeArea,
numPlanes,
depth,
1) ;
status = cudaPeekAtLastError() ;
if (status != cudaSuccess) return vl::VLE_Cuda ;
}
T mass = planeArea*size;
normalize_moments <<<divideAndRoundUp(depth,blockSize),blockSize>>>
(moments, depth, mass, epsilon) ;
// Finally, normalize the data
batch_normalize_forward <<<gridSize, blockSize>>>
(output,
moments, data, multipliers, biases,
planeArea,
numPlanes,
depth) ;
status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
/* ------------------------------------------------------------ */
/* forward_given_moments */
/* ------------------------------------------------------------ */
static vl::ErrorCode
forward_given_moments(Context& context,
T* output,
T const* moments,
T const* data,
T const* multipliers,
T const* biases,
size_t height, size_t width, size_t depth, size_t size)
{
cudaError_t status ;
unsigned int planeArea = height * width ;
unsigned int numPlanes = depth * size ;
unsigned int blockSize = getBlockSize(planeArea) ;
const unsigned int L = 10000 * blockSize ;
unsigned int numChunks = (planeArea * size + L - 1) / L ;
numChunks = min(numChunks, size) ;
numChunks = min(numChunks, 65536 / depth) ;
numChunks = max(numChunks, 1) ;
numChunks = 1 ; // <-- to be removed
unsigned int gridSize = depth * numChunks ;
assert(numChunks >= 1) ;
assert(numChunks <= size) ;
assert(gridSize <= 65536) ;
batch_normalize_forward <<<gridSize, blockSize>>>
(output,
moments, data, multipliers, biases,
planeArea,
numPlanes,
depth) ;
status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
/* ------------------------------------------------------------ */
/* backward */
/* ------------------------------------------------------------ */
static vl::ErrorCode
backward(Context& context,
T* derData,
T* derMultipliers,
T* derBiases,
T* moments,
T const* data,
T const* multipliers,
T const* biases,
T const* derOutput,
size_t height, size_t width, size_t depth, size_t size,
T epsilon)
{
cudaError_t status = cudaSuccess;
unsigned int planeArea = height * width ;
unsigned int numPlanes = depth * size ;
unsigned int blockSize = getBlockSize(planeArea) ;
const unsigned int L = 10000 * blockSize ;
unsigned int numChunks = (planeArea * size + L - 1) / L ;
numChunks = min(numChunks, size) ;
numChunks = min(numChunks, 65536 / depth) ;
numChunks = max(numChunks, 1) ;
numChunks = 1 ; // <-- to be removed
unsigned int gridSize = depth * numChunks ;
assert(numChunks >= 1) ;
assert(numChunks <= size) ;
assert(gridSize <= 65536) ;
if (numChunks > 1) {
unsigned int accumulatorSize = 4 * nextMultipleOf(gridSize, WARP_SIZE) ;
unsigned int workspaceSize = accumulatorSize + (moments ? 0 : 2 * depth) ;
T * workspace = (T*)context.getWorkspace(vl::VLDT_GPU, workspaceSize * sizeof(T)) ;
T * accumulator = workspace;
if (moments == NULL) {
moments = workspace + accumulatorSize ;
}
status = cudaPeekAtLastError() ;
if (status != cudaSuccess) return vl::VLE_Cuda ;
// Mean, variance, derMultipliers and derBiases computation
accumulate_ders_and_moments_partial<T> <<<gridSize, blockSize, 4*blockSize*sizeof(T)>>>
(accumulator,
NULL, NULL, NULL,
data,
derOutput,
planeArea,
numPlanes,
depth,
numChunks) ;
status = cudaPeekAtLastError() ;
if (status != cudaSuccess) return vl::VLE_Cuda ;
// Sum over the chunks (rows of accumulator[])
int blockSizeSum = getBlockSize(numChunks) ;
accumulate_ders_and_moments_finish<T> <<<4*depth, blockSizeSum, blockSizeSum*sizeof(T)>>>
(derMultipliers, derBiases, moments, accumulator, numChunks, depth) ;
status = cudaPeekAtLastError() ;
if (status != cudaSuccess) return vl::VLE_Cuda ;
} else {
if (moments == NULL) {
moments = (T*) context.getWorkspace(vl::VLDT_GPU, 2*depth * sizeof(T)) ;
}
accumulate_ders_and_moments_partial<T> <<<gridSize, blockSize, 4*blockSize*sizeof(T)>>>
(NULL,
derMultipliers, derBiases, moments,
data,
derOutput,
planeArea,
numPlanes,
depth,
1) ;
status = cudaPeekAtLastError() ;
if (status != cudaSuccess) return vl::VLE_Cuda ;
}
T mass = planeArea*size;
normalize_ders_and_moments<T> <<<divideAndRoundUp(depth,blockSize),blockSize>>>
(derMultipliers, derBiases, moments, depth, mass, epsilon) ;
// Compute output
batch_normalize_backward<T> <<<gridSize, blockSize>>>
(derData,
moments, data,
multipliers, derMultipliers, derBiases, derOutput,
planeArea, numPlanes, depth,
mass) ;
status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
/* ------------------------------------------------------------ */
/* backward_given_moments */
/* ------------------------------------------------------------ */
static vl::ErrorCode
backward_given_moments(Context& context,
T* derData,
T* derMultipliers,
T* derBiases,
T const* moments,
T const* data,
T const* multipliers,
T const* biases,
T const* derOutput,
size_t height, size_t width, size_t depth, size_t size,
T epsilon)
{
cudaError_t status;
unsigned int planeArea = height * width ;
unsigned int numPlanes = depth * size ;
unsigned int blockSize = getBlockSize(planeArea) ;
const unsigned int L = 10000 * blockSize ;
unsigned int numChunks = (planeArea * size + L - 1) / L ;
numChunks = min(numChunks, size) ;
numChunks = min(numChunks, 65536 / depth) ;
numChunks = max(numChunks, 1) ;
numChunks = 1 ; // <-- to be removed
unsigned int gridSize = depth * numChunks ;
assert(numChunks >= 1) ;
assert(numChunks <= size) ;
assert(gridSize <= 65536) ;
if (numChunks > 1) {
unsigned int workspaceSize = 2 * nextMultipleOf(gridSize, WARP_SIZE) ;
T * accumulator = (T*)context.getWorkspace(vl::VLDT_GPU, workspaceSize * sizeof(T)) ;
status = cudaPeekAtLastError() ;
if (status != cudaSuccess) return vl::VLE_Cuda ;
// Mean, variance, derMultipliers and derBiases computation
accumulate_ders_partial<T> <<<gridSize, blockSize, 2*blockSize*sizeof(T)>>>
(accumulator,
NULL, NULL,
data,
derOutput,
planeArea,
numPlanes,
depth,
numChunks) ;
status = cudaPeekAtLastError() ;
if (status != cudaSuccess) return vl::VLE_Cuda ;
// Sum over the chunks (rows of accumulator[])
int blockSizeSum = getBlockSize(numChunks) ;
accumulate_ders_finish<T> <<<2*depth, blockSizeSum, blockSizeSum*sizeof(T)>>>
(derMultipliers, derBiases, accumulator, numChunks, depth) ;
status = cudaPeekAtLastError() ;
if (status != cudaSuccess) return vl::VLE_Cuda ;
} else {
accumulate_ders_partial<T> <<<gridSize, blockSize, 2*blockSize*sizeof(T)>>>
(NULL,
derMultipliers, derBiases,
data,
derOutput,
planeArea,
numPlanes,
depth,
1) ;
status = cudaPeekAtLastError() ;
if (status != cudaSuccess) return vl::VLE_Cuda ;
}
T mass = planeArea*size;
normalize_ders<T> <<<divideAndRoundUp(depth,blockSize),blockSize>>>
(derMultipliers, derBiases, moments, depth, mass, epsilon) ;
// Compute output
batch_normalize_backward<T> <<<gridSize, blockSize>>>
(derData,
moments, data,
multipliers, derMultipliers, derBiases, derOutput,
planeArea, numPlanes, depth,
mass) ;
status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
} ; // struct bnorm
} } // namespace vl::impl
template struct vl::impl::bnorm<vl::VLDT_GPU, float> ;
#ifdef ENABLE_DOUBLE
template struct vl::impl::bnorm<vl::VLDT_GPU, double> ;
#endif
|
9b0aa613b0dfb9748daddea358bb801cd3fcd22d.hip | // !!! This is a file automatically generated by hipify!!!
#include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <stdio.h>
#include <math.h>
#include <iostream>
#include <stdlib.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <sys/time.h>
using namespace cv;
const int height = 480;
const int width = 854;
/*****************************************************************************
/*kernel
*****************************************************************************/
__global__ void DownSampleRGBAImageKernel(uint8_t *src_m, uint8_t *dst_m,
int src_row, int src_col,
int dst_row, int dst_col)
{
int div = (width * height + blockDim.x * gridDim.x - 1) / (blockDim.x * gridDim.x);
for (int l = 0; l < div; l++)
{
int ind = blockDim.x * blockIdx.x + threadIdx.x + l * (blockDim.x * gridDim.x);
if (ind >= dst_row * dst_col)
return;
int cn = 3;
int image_row = ind / dst_col;
int image_col = ind % dst_col;
float x_ratio = (src_col - 1) / (dst_col - 1);
float y_ratio = (src_row - 1) / (dst_row - 1);
uint8_t a, b, c, d, pixel;
int x_l = floor(x_ratio * image_col), y_l = floor(y_ratio * image_row);
int x_h = ceil(x_ratio * image_col), y_h = ceil(y_ratio * image_row);
float x_weight = (x_ratio * image_col) - x_l;
float y_weight = (y_ratio * image_row) - y_l;
for (int k = 0; k < cn; k++)
{
a = src_m[y_l * src_col * cn + x_l * cn + k];
b = src_m[y_l * src_col * cn + x_h * cn + k];
c = src_m[y_h * src_col * cn + x_l * cn + k];
d = src_m[y_h * src_col * cn + x_h * cn + k];
pixel = (a & 0xff) * (1 - x_weight) * (1 - y_weight) + (b & 0xff) * x_weight * (1 - y_weight) + (c & 0xff) * y_weight * (1 - x_weight) + (d & 0xff) * x_weight * y_weight;
dst_m[(image_row * dst_col + image_col) * cn + k] = pixel;
}
}
//printf("Blue value: %d", pixelPtr[i*img.cols*cn + j*cn + 0] );
}
/*****************************************************************************
/*Main
*****************************************************************************/
int main(int argc, char **argv)
{
// ******************************************************* Vars initialization
int blocksPerGrid, threadsPerBlock, totalThreads;
std::string image_path = argv[1];
std::string image_out_path = argv[2];
int n_threads = atoi(argv[3]);
int n_blocks = atoi(argv[4]);
Mat img;
uint8_t *resized, *d_resized, *d_img;
hipError_t err = hipSuccess;
struct timeval tval_before, tval_after, tval_result;
// ******************************************************* get device info
hipSetDevice(0);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
int cores_mp = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
printf(" (%2d) Multiprocessors, (%3d) CUDA Cores/MP: %d CUDA Cores\n",
deviceProp.multiProcessorCount,
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount);
if (n_threads != 0)
blocksPerGrid = n_blocks;
else
blocksPerGrid = deviceProp.multiProcessorCount;
if (n_threads != 0)
threadsPerBlock = n_threads/n_blocks;
else
threadsPerBlock = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
// 2;//(width * height + threadsPerBlock - 1) / threadsPerBlock;
// ******************************************************* Read Matrix and declare resized matrix
//printf("block,threads,time\n");
img = imread(image_path, IMREAD_COLOR);
if (img.empty())
{
std::cout << "Could not read the image: " << image_path << std::endl;
return 1;
}
resized = (uint8_t *)malloc(img.channels() * height * width * sizeof(uint8_t));
//******************************************************* device matrix declaration
err = hipMalloc((void **)&d_img, img.rows * img.step);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc((void **)&d_resized, img.channels() * height * width * sizeof(uint8_t));
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// ******************************************************* copy from img to d_img
uint8_t *pixelPtr = (uint8_t *)img.data;
err = hipMemcpy(d_img, pixelPtr, img.channels() * img.cols * img.rows * sizeof(uint8_t), hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// ******************************************************* Execute kernel
gettimeofday(&tval_before, NULL); // get time
dim3 dim_grid(blocksPerGrid);
dim3 dim_block(threadsPerBlock);
hipLaunchKernelGGL(( DownSampleRGBAImageKernel), dim3(dim_grid), dim3(dim_block), 0, 0,
d_img, d_resized, img.rows, img.cols, height, width);
err = hipGetLastError();
// if (err == hipSuccess)
// {
// printf("All ok!");
// }
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Time calculation
gettimeofday(&tval_after, NULL);
timersub(&tval_after, &tval_before, &tval_result);
printf("%d,%d,%ld.%06ld\n", blocksPerGrid, threadsPerBlock * blocksPerGrid, (long int)tval_result.tv_sec, (long int)tval_result.tv_usec);
// ******************************************************* Copy d_resized to resized
err = hipMemcpy(resized, d_resized, img.channels() * height * width * sizeof(uint8_t), hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// ******************************************************* Matrix convertion to Mat
Mat resized_img(height, width, CV_8UC(3), resized);
imshow("Display window", resized_img);
int k = waitKey(0); // Wait for a keystroke in the window
if (k == 's')
{
imwrite(image_out_path, resized_img);
}
return 0;
}
| 9b0aa613b0dfb9748daddea358bb801cd3fcd22d.cu | #include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <stdio.h>
#include <math.h>
#include <iostream>
#include <stdlib.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <sys/time.h>
using namespace cv;
const int height = 480;
const int width = 854;
/*****************************************************************************
/*kernel
*****************************************************************************/
__global__ void DownSampleRGBAImageKernel(uint8_t *src_m, uint8_t *dst_m,
int src_row, int src_col,
int dst_row, int dst_col)
{
int div = (width * height + blockDim.x * gridDim.x - 1) / (blockDim.x * gridDim.x);
for (int l = 0; l < div; l++)
{
int ind = blockDim.x * blockIdx.x + threadIdx.x + l * (blockDim.x * gridDim.x);
if (ind >= dst_row * dst_col)
return;
int cn = 3;
int image_row = ind / dst_col;
int image_col = ind % dst_col;
float x_ratio = (src_col - 1) / (dst_col - 1);
float y_ratio = (src_row - 1) / (dst_row - 1);
uint8_t a, b, c, d, pixel;
int x_l = floor(x_ratio * image_col), y_l = floor(y_ratio * image_row);
int x_h = ceil(x_ratio * image_col), y_h = ceil(y_ratio * image_row);
float x_weight = (x_ratio * image_col) - x_l;
float y_weight = (y_ratio * image_row) - y_l;
for (int k = 0; k < cn; k++)
{
a = src_m[y_l * src_col * cn + x_l * cn + k];
b = src_m[y_l * src_col * cn + x_h * cn + k];
c = src_m[y_h * src_col * cn + x_l * cn + k];
d = src_m[y_h * src_col * cn + x_h * cn + k];
pixel = (a & 0xff) * (1 - x_weight) * (1 - y_weight) + (b & 0xff) * x_weight * (1 - y_weight) + (c & 0xff) * y_weight * (1 - x_weight) + (d & 0xff) * x_weight * y_weight;
dst_m[(image_row * dst_col + image_col) * cn + k] = pixel;
}
}
//printf("Blue value: %d", pixelPtr[i*img.cols*cn + j*cn + 0] );
}
/*****************************************************************************
/*Main
*****************************************************************************/
int main(int argc, char **argv)
{
// ******************************************************* Vars initialization
int blocksPerGrid, threadsPerBlock, totalThreads;
std::string image_path = argv[1];
std::string image_out_path = argv[2];
int n_threads = atoi(argv[3]);
int n_blocks = atoi(argv[4]);
Mat img;
uint8_t *resized, *d_resized, *d_img;
cudaError_t err = cudaSuccess;
struct timeval tval_before, tval_after, tval_result;
// ******************************************************* get device info
cudaSetDevice(0);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
int cores_mp = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
printf(" (%2d) Multiprocessors, (%3d) CUDA Cores/MP: %d CUDA Cores\n",
deviceProp.multiProcessorCount,
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount);
if (n_threads != 0)
blocksPerGrid = n_blocks;
else
blocksPerGrid = deviceProp.multiProcessorCount;
if (n_threads != 0)
threadsPerBlock = n_threads/n_blocks;
else
threadsPerBlock = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
// 2;//(width * height + threadsPerBlock - 1) / threadsPerBlock;
// ******************************************************* Read Matrix and declare resized matrix
//printf("block,threads,time\n");
img = imread(image_path, IMREAD_COLOR);
if (img.empty())
{
std::cout << "Could not read the image: " << image_path << std::endl;
return 1;
}
resized = (uint8_t *)malloc(img.channels() * height * width * sizeof(uint8_t));
//******************************************************* device matrix declaration
err = cudaMalloc((void **)&d_img, img.rows * img.step);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&d_resized, img.channels() * height * width * sizeof(uint8_t));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// ******************************************************* copy from img to d_img
uint8_t *pixelPtr = (uint8_t *)img.data;
err = cudaMemcpy(d_img, pixelPtr, img.channels() * img.cols * img.rows * sizeof(uint8_t), cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// ******************************************************* Execute kernel
gettimeofday(&tval_before, NULL); // get time
dim3 dim_grid(blocksPerGrid);
dim3 dim_block(threadsPerBlock);
DownSampleRGBAImageKernel<<<dim_grid, dim_block>>>(
d_img, d_resized, img.rows, img.cols, height, width);
err = cudaGetLastError();
// if (err == cudaSuccess)
// {
// printf("All ok!");
// }
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Time calculation
gettimeofday(&tval_after, NULL);
timersub(&tval_after, &tval_before, &tval_result);
printf("%d,%d,%ld.%06ld\n", blocksPerGrid, threadsPerBlock * blocksPerGrid, (long int)tval_result.tv_sec, (long int)tval_result.tv_usec);
// ******************************************************* Copy d_resized to resized
err = cudaMemcpy(resized, d_resized, img.channels() * height * width * sizeof(uint8_t), cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// ******************************************************* Matrix convertion to Mat
Mat resized_img(height, width, CV_8UC(3), resized);
imshow("Display window", resized_img);
int k = waitKey(0); // Wait for a keystroke in the window
if (k == 's')
{
imwrite(image_out_path, resized_img);
}
return 0;
}
|
893d0d6b8bf60c6880345bcd4099958d9dece189.hip | // !!! This is a file automatically generated by hipify!!!
/*
* FluxLimiter.cu
*
* Created on: Oct 22, 2015
* Author: bazow
*/
#include <math.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "../include/FluxLimiter.cuh"
#include "../include/DynamicalVariables.cuh"
#define THETA 2.0
__device__
inline PRECISION sign(PRECISION x) {
if (x<0) return -1;
else return 1;
}
__device__
inline PRECISION minmod(PRECISION x, PRECISION y) {
return (sign(x)+sign(y))*fminf(fabsf(x),fabsf(y))/2;
}
__device__
PRECISION minmod3(PRECISION x, PRECISION y, PRECISION z) {
return minmod(x, minmod(y,z));
}
__device__
PRECISION approximateDerivative(PRECISION x, PRECISION y, PRECISION z) {
PRECISION left = THETA * (y - x);
PRECISION ctr = (z - x) / 2;
PRECISION right = THETA * (z - y);
return minmod3(left, ctr, right);
}
| 893d0d6b8bf60c6880345bcd4099958d9dece189.cu | /*
* FluxLimiter.cu
*
* Created on: Oct 22, 2015
* Author: bazow
*/
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include "../include/FluxLimiter.cuh"
#include "../include/DynamicalVariables.cuh"
#define THETA 2.0
__device__
inline PRECISION sign(PRECISION x) {
if (x<0) return -1;
else return 1;
}
__device__
inline PRECISION minmod(PRECISION x, PRECISION y) {
return (sign(x)+sign(y))*fminf(fabsf(x),fabsf(y))/2;
}
__device__
PRECISION minmod3(PRECISION x, PRECISION y, PRECISION z) {
return minmod(x, minmod(y,z));
}
__device__
PRECISION approximateDerivative(PRECISION x, PRECISION y, PRECISION z) {
PRECISION left = THETA * (y - x);
PRECISION ctr = (z - x) / 2;
PRECISION right = THETA * (z - y);
return minmod3(left, ctr, right);
}
|
96e6e63315ae15a7079ec3f0e30e7639bbe4ed0e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2016, National University of Defense Technology
// Author: Xuhao Chen <[email protected]>
#define BFS_VARIANT "atomic_free"
#include "bfs.h"
#include "cuda_launch_config.hpp"
#include "cutil_subset.h"
#include "timer.h"
__global__ void initialize(int m, int source, bool *visited, bool *expanded) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < m) {
expanded[id] = false;
if(id == source) visited[id] = true;
else visited[id] = false;
}
}
__global__ void bfs_kernel(int m, int *row_offsets, int *column_indices, DistT *dist, bool *changed, bool *visited, bool *expanded, int *num_frontier, int depth) {
int src = blockIdx.x * blockDim.x + threadIdx.x;
if(src < m && visited[src] && !expanded[src]) { // visited but not expanded
expanded[src] = true;
//atomicAdd(num_frontier, 1);
int row_begin = row_offsets[src];
int row_end = row_offsets[src + 1];
for (int offset = row_begin; offset < row_end; ++ offset) {
int dst = column_indices[offset];
if (dist[dst] > depth) {
dist[dst] = depth;
*changed = true;
}
}
}
}
__global__ void bfs_update(int m, DistT *dist, bool *visited) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < m) {
if(dist[id] < MYINFINITY && !visited[id])
visited[id] = true;
}
}
void BFSSolver(int m, int nnz, int source, int *in_row_offsets, int *in_column_indices, int *h_row_offsets, int *h_column_indices, int *h_degree, DistT *h_dist) {
//print_device_info(0);
DistT zero = 0;
int *d_row_offsets, *d_column_indices;
CUDA_SAFE_CALL(hipMalloc((void **)&d_row_offsets, (m + 1) * sizeof(int)));
CUDA_SAFE_CALL(hipMalloc((void **)&d_column_indices, nnz * sizeof(int)));
CUDA_SAFE_CALL(hipMemcpy(d_row_offsets, h_row_offsets, (m + 1) * sizeof(int), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_column_indices, h_column_indices, nnz * sizeof(int), hipMemcpyHostToDevice));
DistT * d_dist;
CUDA_SAFE_CALL(hipMalloc((void **)&d_dist, m * sizeof(DistT)));
CUDA_SAFE_CALL(hipMemcpy(d_dist, h_dist, m * sizeof(DistT), hipMemcpyHostToDevice));
bool *d_changed, h_changed, *d_visited, *d_expanded;
CUDA_SAFE_CALL(hipMalloc((void **)&d_changed, sizeof(bool)));
CUDA_SAFE_CALL(hipMalloc((void **)&d_visited, m * sizeof(bool)));
CUDA_SAFE_CALL(hipMalloc((void **)&d_expanded, m * sizeof(bool)));
//CUDA_SAFE_CALL(hipMemset(d_visited, 0, m * sizeof(bool)));
//CUDA_SAFE_CALL(hipMemset(d_expanded, 0, m * sizeof(bool)));
int *d_num_frontier;
//CUDA_SAFE_CALL(hipMalloc((void **)&d_num_frontier, sizeof(int)));
int iter = 0;
int nthreads = BLOCK_SIZE;
int nblocks = (m - 1) / nthreads + 1;
hipLaunchKernelGGL(( initialize) , dim3(nblocks), dim3(nthreads), 0, 0, m, source, d_visited, d_expanded);
CudaTest("initializing failed");
CUDA_SAFE_CALL(hipMemcpy(&d_dist[source], &zero, sizeof(DistT), hipMemcpyHostToDevice));
//int h_num_frontier = 1;
Timer t;
t.Start();
do {
++ iter;
h_changed = false;
CUDA_SAFE_CALL(hipMemcpy(d_changed, &h_changed, sizeof(bool), hipMemcpyHostToDevice));
//CUDA_SAFE_CALL(hipMemcpy(d_num_frontier, &zero, sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( bfs_kernel) , dim3(nblocks), dim3(nthreads), 0, 0, m, d_row_offsets, d_column_indices, d_dist, d_changed, d_visited, d_expanded, d_num_frontier, iter);
hipLaunchKernelGGL(( bfs_update) , dim3(nblocks), dim3(nthreads), 0, 0, m, d_dist, d_visited);
CudaTest("solving failed");
CUDA_SAFE_CALL(hipMemcpy(&h_changed, d_changed, sizeof(bool), hipMemcpyDeviceToHost));
//CUDA_SAFE_CALL(hipMemcpy(&h_num_frontier, d_num_frontier, sizeof(int), hipMemcpyDeviceToHost));
//printf("iteration %d: num_frontier = %d\n", iter, h_num_frontier);
} while (h_changed);
CUDA_SAFE_CALL(hipDeviceSynchronize());
t.Stop();
printf("\titerations = %d.\n", iter);
printf("\truntime [%s] = %f ms.\n", BFS_VARIANT, t.Millisecs());
CUDA_SAFE_CALL(hipMemcpy(h_dist, d_dist, m * sizeof(DistT), hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipFree(d_row_offsets));
CUDA_SAFE_CALL(hipFree(d_column_indices));
CUDA_SAFE_CALL(hipFree(d_dist));
CUDA_SAFE_CALL(hipFree(d_changed));
CUDA_SAFE_CALL(hipFree(d_num_frontier));
return;
}
| 96e6e63315ae15a7079ec3f0e30e7639bbe4ed0e.cu | // Copyright 2016, National University of Defense Technology
// Author: Xuhao Chen <[email protected]>
#define BFS_VARIANT "atomic_free"
#include "bfs.h"
#include "cuda_launch_config.hpp"
#include "cutil_subset.h"
#include "timer.h"
__global__ void initialize(int m, int source, bool *visited, bool *expanded) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < m) {
expanded[id] = false;
if(id == source) visited[id] = true;
else visited[id] = false;
}
}
__global__ void bfs_kernel(int m, int *row_offsets, int *column_indices, DistT *dist, bool *changed, bool *visited, bool *expanded, int *num_frontier, int depth) {
int src = blockIdx.x * blockDim.x + threadIdx.x;
if(src < m && visited[src] && !expanded[src]) { // visited but not expanded
expanded[src] = true;
//atomicAdd(num_frontier, 1);
int row_begin = row_offsets[src];
int row_end = row_offsets[src + 1];
for (int offset = row_begin; offset < row_end; ++ offset) {
int dst = column_indices[offset];
if (dist[dst] > depth) {
dist[dst] = depth;
*changed = true;
}
}
}
}
__global__ void bfs_update(int m, DistT *dist, bool *visited) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < m) {
if(dist[id] < MYINFINITY && !visited[id])
visited[id] = true;
}
}
void BFSSolver(int m, int nnz, int source, int *in_row_offsets, int *in_column_indices, int *h_row_offsets, int *h_column_indices, int *h_degree, DistT *h_dist) {
//print_device_info(0);
DistT zero = 0;
int *d_row_offsets, *d_column_indices;
CUDA_SAFE_CALL(cudaMalloc((void **)&d_row_offsets, (m + 1) * sizeof(int)));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_column_indices, nnz * sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpy(d_row_offsets, h_row_offsets, (m + 1) * sizeof(int), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_column_indices, h_column_indices, nnz * sizeof(int), cudaMemcpyHostToDevice));
DistT * d_dist;
CUDA_SAFE_CALL(cudaMalloc((void **)&d_dist, m * sizeof(DistT)));
CUDA_SAFE_CALL(cudaMemcpy(d_dist, h_dist, m * sizeof(DistT), cudaMemcpyHostToDevice));
bool *d_changed, h_changed, *d_visited, *d_expanded;
CUDA_SAFE_CALL(cudaMalloc((void **)&d_changed, sizeof(bool)));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_visited, m * sizeof(bool)));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_expanded, m * sizeof(bool)));
//CUDA_SAFE_CALL(cudaMemset(d_visited, 0, m * sizeof(bool)));
//CUDA_SAFE_CALL(cudaMemset(d_expanded, 0, m * sizeof(bool)));
int *d_num_frontier;
//CUDA_SAFE_CALL(cudaMalloc((void **)&d_num_frontier, sizeof(int)));
int iter = 0;
int nthreads = BLOCK_SIZE;
int nblocks = (m - 1) / nthreads + 1;
initialize <<<nblocks, nthreads>>> (m, source, d_visited, d_expanded);
CudaTest("initializing failed");
CUDA_SAFE_CALL(cudaMemcpy(&d_dist[source], &zero, sizeof(DistT), cudaMemcpyHostToDevice));
//int h_num_frontier = 1;
Timer t;
t.Start();
do {
++ iter;
h_changed = false;
CUDA_SAFE_CALL(cudaMemcpy(d_changed, &h_changed, sizeof(bool), cudaMemcpyHostToDevice));
//CUDA_SAFE_CALL(cudaMemcpy(d_num_frontier, &zero, sizeof(int), cudaMemcpyHostToDevice));
bfs_kernel <<<nblocks, nthreads>>> (m, d_row_offsets, d_column_indices, d_dist, d_changed, d_visited, d_expanded, d_num_frontier, iter);
bfs_update <<<nblocks, nthreads>>> (m, d_dist, d_visited);
CudaTest("solving failed");
CUDA_SAFE_CALL(cudaMemcpy(&h_changed, d_changed, sizeof(bool), cudaMemcpyDeviceToHost));
//CUDA_SAFE_CALL(cudaMemcpy(&h_num_frontier, d_num_frontier, sizeof(int), cudaMemcpyDeviceToHost));
//printf("iteration %d: num_frontier = %d\n", iter, h_num_frontier);
} while (h_changed);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
t.Stop();
printf("\titerations = %d.\n", iter);
printf("\truntime [%s] = %f ms.\n", BFS_VARIANT, t.Millisecs());
CUDA_SAFE_CALL(cudaMemcpy(h_dist, d_dist, m * sizeof(DistT), cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaFree(d_row_offsets));
CUDA_SAFE_CALL(cudaFree(d_column_indices));
CUDA_SAFE_CALL(cudaFree(d_dist));
CUDA_SAFE_CALL(cudaFree(d_changed));
CUDA_SAFE_CALL(cudaFree(d_num_frontier));
return;
}
|
f3150468f6499315b5e685b297fa8ee3fe25ca73.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
__global__ void Reduce(int* in_data, int* out_data) {
extern __shared__ int shared_data[];
unsigned int tid = threadIdx.x;
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
shared_data[tid] = in_data[index];
__syncthreads();
for (unsigned int s = 1; s < blockDim.x; s *= 2) {
if (tid % (2 * s) == 0) {
shared_data[tid] += shared_data[tid + s];
}
__syncthreads();
}
if (tid == 0) {
out_data[blockIdx.x] = shared_data[0];
}
}
int main() {
const int block_size = 1024;
// __shared__ int shared_data[];
const int array_size = 1 << 20;
int* h_array = new int[array_size];
for (int i = 0; i < array_size; ++i) {
h_array[i] = 1;
}
int* d_array;
hipMalloc(&d_array, sizeof(int) * array_size);
hipMemcpy(d_array, h_array, sizeof(int) * array_size, hipMemcpyHostToDevice);
int num_blocks = array_size / block_size;
int* d_blocksum;
hipMalloc(&d_blocksum, sizeof(int) * num_blocks);
int* h_blocksum = new int[num_blocks];
hipEvent_t start;
hipEvent_t stop;
// Creating event
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipLaunchKernelGGL(( Reduce), dim3(num_blocks), dim3(block_size), sizeof(int) * block_size, 0, d_array, d_blocksum);
hipEventRecord(stop);
hipMemcpy(h_blocksum, d_blocksum, sizeof(int) * num_blocks, hipMemcpyDeviceToHost);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << " elapsed" << std::endl;
int sum = 0;
for (int i = 0; i < num_blocks; ++i) {
sum += h_blocksum[i];
}
std::cout << sum << std::endl;
hipFree(d_blocksum);
hipFree(d_array);
delete[] h_array;
delete[] h_blocksum;
}
| f3150468f6499315b5e685b297fa8ee3fe25ca73.cu | #include <iostream>
__global__ void Reduce(int* in_data, int* out_data) {
extern __shared__ int shared_data[];
unsigned int tid = threadIdx.x;
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
shared_data[tid] = in_data[index];
__syncthreads();
for (unsigned int s = 1; s < blockDim.x; s *= 2) {
if (tid % (2 * s) == 0) {
shared_data[tid] += shared_data[tid + s];
}
__syncthreads();
}
if (tid == 0) {
out_data[blockIdx.x] = shared_data[0];
}
}
int main() {
const int block_size = 1024;
// __shared__ int shared_data[];
const int array_size = 1 << 20;
int* h_array = new int[array_size];
for (int i = 0; i < array_size; ++i) {
h_array[i] = 1;
}
int* d_array;
cudaMalloc(&d_array, sizeof(int) * array_size);
cudaMemcpy(d_array, h_array, sizeof(int) * array_size, cudaMemcpyHostToDevice);
int num_blocks = array_size / block_size;
int* d_blocksum;
cudaMalloc(&d_blocksum, sizeof(int) * num_blocks);
int* h_blocksum = new int[num_blocks];
cudaEvent_t start;
cudaEvent_t stop;
// Creating event
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
Reduce<<<num_blocks, block_size, sizeof(int) * block_size>>>(d_array, d_blocksum);
cudaEventRecord(stop);
cudaMemcpy(h_blocksum, d_blocksum, sizeof(int) * num_blocks, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << " elapsed" << std::endl;
int sum = 0;
for (int i = 0; i < num_blocks; ++i) {
sum += h_blocksum[i];
}
std::cout << sum << std::endl;
cudaFree(d_blocksum);
cudaFree(d_array);
delete[] h_array;
delete[] h_blocksum;
}
|
1377053421dbea617c03f93d76c9e3d7767b5722.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#define CHECK(call)\
{\
const hipError_t error = call;\
if (error != hipSuccess)\
{\
printf("Error: %s:%d, ", __FILE__, __LINE__);\
printf("code: %d, reason: %s\n", error, hipGetErrorString(error));\
exit(1);\
}\
}
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.0e-6);
}
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-30;
bool match = 1;
for (int i=0; i<N; i++)
{
if ( abs(hostRef[i] - gpuRef[i]) > epsilon )
{
match = 0;
printf("Arrays do not match! \n");
printf("host: %5.2f, gpu: %5.2f at current %d \n", hostRef[i], gpuRef[i], i);
break;
}
}
if (match)
printf("Array match.\n\n");
}
void initialData(float *ip, int size)
{
// Generate different seed for random number.
time_t t;
srand((unsigned int)time(&t));
for (int i=0; i<size; i++)
{
ip[i] = (float)(rand() & 0xFF )/10.0f;
}
}
void sumArrayOnHost(float *A, float *B, float *C, const int N)
{
for (int idx=0; idx<N; idx++)
{
C[idx] = A[idx] + B[idx];
}
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
}
int main(int argc, char **argv)
{
printf("%s Strarting...\n", argv[0]);
// set up device
int dev = 0;
hipSetDevice(dev);
// set up data size of vectors
int nElem = 1 << 27;
printf("Vector size %d\n", nElem);
// malloc host memory
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
initialData(h_A, nElem);
initialData(h_B, nElem);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// start time
double time_start = cpuSecond();
// malloc gpu global memory
float *d_A, *d_B, *d_C;
hipMalloc((float **)&d_A, nBytes);
hipMalloc((float **)&d_B, nBytes);
hipMalloc((float **)&d_C, nBytes);
// transfer data from host to gpu
hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice);
// invoke kernel at host side
int iLen = 1024;
dim3 block (iLen);
dim3 grid ( (nElem + block.x - 1)/block.x );
hipLaunchKernelGGL(( sumArraysOnGPU) , dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C);
printf("Execution configuration <<<%d, %d>>>\n", grid.x, block.x);
// copy kernel result back to host side
hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost);
// gpu finished time
double time_gpu_finish = cpuSecond();
// add vector at host side for result check.
sumArrayOnHost(h_A, h_B, hostRef, nElem);
// cpu finished time
double time_cpu_finish = cpuSecond();
// Check device results
checkResult(hostRef, gpuRef, nElem);
// free device global memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
printf("CPU job Done in %lf. \n", time_cpu_finish - time_gpu_finish);
printf("GPU job Done in %lf. \n", time_gpu_finish - time_start);
return(0);
}
| 1377053421dbea617c03f93d76c9e3d7767b5722.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#define CHECK(call)\
{\
const cudaError_t error = call;\
if (error != cudaSuccess)\
{\
printf("Error: %s:%d, ", __FILE__, __LINE__);\
printf("code: %d, reason: %s\n", error, cudaGetErrorString(error));\
exit(1);\
}\
}
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.0e-6);
}
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-30;
bool match = 1;
for (int i=0; i<N; i++)
{
if ( abs(hostRef[i] - gpuRef[i]) > epsilon )
{
match = 0;
printf("Arrays do not match! \n");
printf("host: %5.2f, gpu: %5.2f at current %d \n", hostRef[i], gpuRef[i], i);
break;
}
}
if (match)
printf("Array match.\n\n");
}
void initialData(float *ip, int size)
{
// Generate different seed for random number.
time_t t;
srand((unsigned int)time(&t));
for (int i=0; i<size; i++)
{
ip[i] = (float)(rand() & 0xFF )/10.0f;
}
}
void sumArrayOnHost(float *A, float *B, float *C, const int N)
{
for (int idx=0; idx<N; idx++)
{
C[idx] = A[idx] + B[idx];
}
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
}
int main(int argc, char **argv)
{
printf("%s Strarting...\n", argv[0]);
// set up device
int dev = 0;
cudaSetDevice(dev);
// set up data size of vectors
int nElem = 1 << 27;
printf("Vector size %d\n", nElem);
// malloc host memory
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
initialData(h_A, nElem);
initialData(h_B, nElem);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// start time
double time_start = cpuSecond();
// malloc gpu global memory
float *d_A, *d_B, *d_C;
cudaMalloc((float **)&d_A, nBytes);
cudaMalloc((float **)&d_B, nBytes);
cudaMalloc((float **)&d_C, nBytes);
// transfer data from host to gpu
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
// invoke kernel at host side
int iLen = 1024;
dim3 block (iLen);
dim3 grid ( (nElem + block.x - 1)/block.x );
sumArraysOnGPU <<<grid, block>>> (d_A, d_B, d_C);
printf("Execution configuration <<<%d, %d>>>\n", grid.x, block.x);
// copy kernel result back to host side
cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
// gpu finished time
double time_gpu_finish = cpuSecond();
// add vector at host side for result check.
sumArrayOnHost(h_A, h_B, hostRef, nElem);
// cpu finished time
double time_cpu_finish = cpuSecond();
// Check device results
checkResult(hostRef, gpuRef, nElem);
// free device global memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
printf("CPU job Done in %lf. \n", time_cpu_finish - time_gpu_finish);
printf("GPU job Done in %lf. \n", time_gpu_finish - time_start);
return(0);
}
|
aadc9f429071092062af8d0d95242e8110dd31d9.hip | // !!! This is a file automatically generated by hipify!!!
/*
MIT License
Copyright (c) 2019 Michael Ksel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "kernel/init.h"
#include "common.h"
#include "cuda_utils.h"
#include <thrust/random.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
__global__ void initParticlesKernel(Particle* particle_array, int grid_size, int particle_count)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < particle_count; i += blockDim.x * gridDim.x)
{
unsigned int seed = hash(i);
thrust::default_random_engine rng(seed);
thrust::uniform_int_distribution<int> dist_idx(0, grid_size * grid_size);
thrust::normal_distribution<float> dist_vel(0.0f, 12.0f);
int index = dist_idx(rng);
float x = index % grid_size;
float y = index / grid_size;
particle_array[i].weight = 1.0 / static_cast<float>(particle_count);
particle_array[i].state = glm::vec4(x, y, dist_vel(rng), dist_vel(rng));
//printf("w: %f, x: %f, y: %f, vx: %f, vy: %f\n", particle_array[i].weight, particle_array[i].state[0], particle_array[i].state[1],
// particle_array[i].state[2], particle_array[i].state[3]);
}
}
__global__ void initGridCellsKernel(GridCell* grid_cell_array, MeasurementCell* meas_cell_array, int grid_size, int cell_count)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < cell_count; i += blockDim.x * gridDim.x)
{
const int x = i % grid_size;
const int y = i / grid_size;
grid_cell_array[i].pos = make_int2(x, y);
grid_cell_array[i].free_mass = 0.0f;
grid_cell_array[i].occ_mass = 0.0f;
//grid_cell_array[i].start_idx = -1;
//grid_cell_array[i].end_idx = -1;
meas_cell_array[i].occ_mass = 0.0f;
meas_cell_array[i].free_mass = 0.0f;
meas_cell_array[i].likelihood = 1.0f;
meas_cell_array[i].p_A = 1.0f;
}
}
__global__ void reinitGridParticleIndices(GridCell* grid_cell_array, int cell_count)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < cell_count; i += blockDim.x * gridDim.x)
{
grid_cell_array[i].start_idx = -1;
grid_cell_array[i].end_idx = -1;
}
}
| aadc9f429071092062af8d0d95242e8110dd31d9.cu | /*
MIT License
Copyright (c) 2019 Michael Kösel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "kernel/init.h"
#include "common.h"
#include "cuda_utils.h"
#include <thrust/random.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
__global__ void initParticlesKernel(Particle* particle_array, int grid_size, int particle_count)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < particle_count; i += blockDim.x * gridDim.x)
{
unsigned int seed = hash(i);
thrust::default_random_engine rng(seed);
thrust::uniform_int_distribution<int> dist_idx(0, grid_size * grid_size);
thrust::normal_distribution<float> dist_vel(0.0f, 12.0f);
int index = dist_idx(rng);
float x = index % grid_size;
float y = index / grid_size;
particle_array[i].weight = 1.0 / static_cast<float>(particle_count);
particle_array[i].state = glm::vec4(x, y, dist_vel(rng), dist_vel(rng));
//printf("w: %f, x: %f, y: %f, vx: %f, vy: %f\n", particle_array[i].weight, particle_array[i].state[0], particle_array[i].state[1],
// particle_array[i].state[2], particle_array[i].state[3]);
}
}
__global__ void initGridCellsKernel(GridCell* grid_cell_array, MeasurementCell* meas_cell_array, int grid_size, int cell_count)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < cell_count; i += blockDim.x * gridDim.x)
{
const int x = i % grid_size;
const int y = i / grid_size;
grid_cell_array[i].pos = make_int2(x, y);
grid_cell_array[i].free_mass = 0.0f;
grid_cell_array[i].occ_mass = 0.0f;
//grid_cell_array[i].start_idx = -1;
//grid_cell_array[i].end_idx = -1;
meas_cell_array[i].occ_mass = 0.0f;
meas_cell_array[i].free_mass = 0.0f;
meas_cell_array[i].likelihood = 1.0f;
meas_cell_array[i].p_A = 1.0f;
}
}
__global__ void reinitGridParticleIndices(GridCell* grid_cell_array, int cell_count)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < cell_count; i += blockDim.x * gridDim.x)
{
grid_cell_array[i].start_idx = -1;
grid_cell_array[i].end_idx = -1;
}
}
|
703ac1527ddc48f0b55a739168b9616c37851cb8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <stdbool.h>
#include <time.h>
#define K 3 // K is from K-SAT, currently we are working on 3-SAT
#define THREAD_PER_BLOCK_log2 10
// current Var Limit is 32;
void preProcessing(){
// removes comment
while(getchar() == 'c'){
while(getchar()!='\n');
}
getchar();
char format[100];
scanf("%s", format);
if(strcmp(format, "cnf") != 0){ // format assertion
printf("Format Error, expected cnf but %s was provided\n", format);
exit(1);
}
printf("Preprocessing Successfull\n");
}
int cpuSolve(int varCount, int clauseCount, int* clauseStore){
int limit = pow(2, varCount);
int satCount = 0;
for(int perIndex=0; perIndex<limit; perIndex++){
bool result = true;
for(int i=0; i<clauseCount; i++){
bool clauseResult = false;
for(int j=0; j<K; j++){
int var = clauseStore[K*i + j];
int absVar = abs(var);
bool varValue;
if(var < 0)
varValue = !((perIndex >> (absVar-1))&1);
else
varValue = (perIndex >> (absVar-1))&1;
clauseResult = clauseResult || varValue;
}
result = result && clauseResult;
}
if(result)
satCount++;
// if(perIndex%10000 == 0)
// printf("completed = %d\n", perIndex);
}
return satCount;
}
__global__ void gpuSolver(int varCount, int clauseCount, int limit, int* clauseStore, int *gpu_sat_count){
bool result = true;
int perIndex = (blockIdx.x << THREAD_PER_BLOCK_log2) + threadIdx.x;
if(perIndex >= limit)
return;
for(int i=0; i<clauseCount; i++){
bool clauseResult = false;
for(int j=0; j<K; j++){
int var = clauseStore[K*i + j];
int absVar = abs(var);
bool varValue;
if(var < 0)
varValue = !((perIndex >> (absVar-1))&1);
else
varValue = (perIndex >> (absVar-1))&1;
clauseResult = clauseResult || varValue;
}
result = result && clauseResult;
}
if(result)
atomicAdd(gpu_sat_count, 1);
}
int main(int argc, char* argv[]){
if(argc<2){
printf("Invalid Options: One options is required to indetity type of execution\n");
return 1;
}
preProcessing();
int varCount, clauseCount;
scanf("%d%d", &varCount, &clauseCount);
printf("\nNo. of Variables = %d | No. of clauses = %d\n", varCount, clauseCount);
// clauses Input
int *clauseStore = (int*)malloc(sizeof(int)*clauseCount*K);
for(int i=0; i<clauseCount; i++){
for(int j=0; j<K; j++){ // one clause with K variables
scanf("%d", clauseStore + (K * i) + j);
}
int tmp;
scanf("%d\n", &tmp);
}
clock_t start, end;
/* for(int i=0; i<clauseCount; i++){
for(int j=0; j<K; j++){
printf("%d ", clauseStore[K*i + j]);
}
printf("\n");
} */
if(strcmp(argv[1], "cpu")==0){ // cpu implementations
start = clock();
int satCount = cpuSolve(varCount, clauseCount, clauseStore);
end = clock();
printf("\n\nSAT Count = %d\n", satCount);
}
else if(strcmp(argv[1], "gpu") ==0){ // gpu implementations
int *gpuClauseStore;
hipMalloc(&gpuClauseStore, sizeof(int)*clauseCount*K);
hipMemcpy(gpuClauseStore, clauseStore, sizeof(int)*clauseCount*K, hipMemcpyHostToDevice);
int *gpu_sat_count;
hipMalloc(&gpu_sat_count, sizeof(int));
hipMemset(gpu_sat_count, 0, sizeof(int));
hipDeviceSynchronize();
int limit = pow(2, varCount);
int threadPerBlock = pow(2, THREAD_PER_BLOCK_log2);
int noOfBlock = ceil((float)limit / threadPerBlock);
start = clock();
hipLaunchKernelGGL(( gpuSolver), dim3(noOfBlock), dim3(threadPerBlock), 0, 0, varCount, clauseCount, limit, gpuClauseStore, gpu_sat_count);
hipDeviceSynchronize();
end = clock();
int *satCount= (int*)malloc(sizeof(int));
hipMemcpy(satCount, gpu_sat_count, sizeof(int), hipMemcpyDeviceToHost);
printf("\n\nSAT Count = %d\n", *satCount);
}
else{
printf("Invalid Option");
return 0;
}
double executionTime = (double)(end-start)/CLOCKS_PER_SEC;
printf("execution Time = %lf\n", executionTime);
return 0;
}
| 703ac1527ddc48f0b55a739168b9616c37851cb8.cu | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <stdbool.h>
#include <time.h>
#define K 3 // K is from K-SAT, currently we are working on 3-SAT
#define THREAD_PER_BLOCK_log2 10
// current Var Limit is 32;
void preProcessing(){
// removes comment
while(getchar() == 'c'){
while(getchar()!='\n');
}
getchar();
char format[100];
scanf("%s", format);
if(strcmp(format, "cnf") != 0){ // format assertion
printf("Format Error, expected cnf but %s was provided\n", format);
exit(1);
}
printf("Preprocessing Successfull\n");
}
int cpuSolve(int varCount, int clauseCount, int* clauseStore){
int limit = pow(2, varCount);
int satCount = 0;
for(int perIndex=0; perIndex<limit; perIndex++){
bool result = true;
for(int i=0; i<clauseCount; i++){
bool clauseResult = false;
for(int j=0; j<K; j++){
int var = clauseStore[K*i + j];
int absVar = abs(var);
bool varValue;
if(var < 0)
varValue = !((perIndex >> (absVar-1))&1);
else
varValue = (perIndex >> (absVar-1))&1;
clauseResult = clauseResult || varValue;
}
result = result && clauseResult;
}
if(result)
satCount++;
// if(perIndex%10000 == 0)
// printf("completed = %d\n", perIndex);
}
return satCount;
}
__global__ void gpuSolver(int varCount, int clauseCount, int limit, int* clauseStore, int *gpu_sat_count){
bool result = true;
int perIndex = (blockIdx.x << THREAD_PER_BLOCK_log2) + threadIdx.x;
if(perIndex >= limit)
return;
for(int i=0; i<clauseCount; i++){
bool clauseResult = false;
for(int j=0; j<K; j++){
int var = clauseStore[K*i + j];
int absVar = abs(var);
bool varValue;
if(var < 0)
varValue = !((perIndex >> (absVar-1))&1);
else
varValue = (perIndex >> (absVar-1))&1;
clauseResult = clauseResult || varValue;
}
result = result && clauseResult;
}
if(result)
atomicAdd(gpu_sat_count, 1);
}
int main(int argc, char* argv[]){
if(argc<2){
printf("Invalid Options: One options is required to indetity type of execution\n");
return 1;
}
preProcessing();
int varCount, clauseCount;
scanf("%d%d", &varCount, &clauseCount);
printf("\nNo. of Variables = %d | No. of clauses = %d\n", varCount, clauseCount);
// clauses Input
int *clauseStore = (int*)malloc(sizeof(int)*clauseCount*K);
for(int i=0; i<clauseCount; i++){
for(int j=0; j<K; j++){ // one clause with K variables
scanf("%d", clauseStore + (K * i) + j);
}
int tmp;
scanf("%d\n", &tmp);
}
clock_t start, end;
/* for(int i=0; i<clauseCount; i++){
for(int j=0; j<K; j++){
printf("%d ", clauseStore[K*i + j]);
}
printf("\n");
} */
if(strcmp(argv[1], "cpu")==0){ // cpu implementations
start = clock();
int satCount = cpuSolve(varCount, clauseCount, clauseStore);
end = clock();
printf("\n\nSAT Count = %d\n", satCount);
}
else if(strcmp(argv[1], "gpu") ==0){ // gpu implementations
int *gpuClauseStore;
cudaMalloc(&gpuClauseStore, sizeof(int)*clauseCount*K);
cudaMemcpy(gpuClauseStore, clauseStore, sizeof(int)*clauseCount*K, cudaMemcpyHostToDevice);
int *gpu_sat_count;
cudaMalloc(&gpu_sat_count, sizeof(int));
cudaMemset(gpu_sat_count, 0, sizeof(int));
cudaDeviceSynchronize();
int limit = pow(2, varCount);
int threadPerBlock = pow(2, THREAD_PER_BLOCK_log2);
int noOfBlock = ceil((float)limit / threadPerBlock);
start = clock();
gpuSolver<<<noOfBlock, threadPerBlock>>>(varCount, clauseCount, limit, gpuClauseStore, gpu_sat_count);
cudaDeviceSynchronize();
end = clock();
int *satCount= (int*)malloc(sizeof(int));
cudaMemcpy(satCount, gpu_sat_count, sizeof(int), cudaMemcpyDeviceToHost);
printf("\n\nSAT Count = %d\n", *satCount);
}
else{
printf("Invalid Option");
return 0;
}
double executionTime = (double)(end-start)/CLOCKS_PER_SEC;
printf("execution Time = %lf\n", executionTime);
return 0;
}
|
f93ebaeb08f681b15b31e6550590b05e66e26b7b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#include "kernel.h"
size_t width;
size_t height;
float* cudaInput;
float* cudaOutputX;
float* cudaOutputY;
float* cudaOutputAux;
float* cudaOutputAux2;
float* cudaOutputAux3;
float* gaussianKernelCuda;
float* cudaSobelX;
float* cudaSobelY;
float* cudaA_X_X;
float* cudaA_X_Y;
float* cudaA_Y_Y;
float* cuda_R;
int* cudaFeatures;
__global__ void gaussianBlurKernel(const float* const __restrict__ input,
float* const __restrict__ output,
const size_t width,
const size_t height,
const float* const __restrict__ gaussianKernel)
{
//x and y maxs are width and height
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
float inputs[9];
if((x > 0) && (x < (height - 1)) && (y > 0) && (y < (width - 1)))
{
inputs[0] = input[(y - 1) * width + (x - 1)];
inputs[1] = input[(y - 1) * width + x];
inputs[2] = input[(y - 1) * width + (x + 1)];
inputs[3] = input[y * width + (x - 1)];
inputs[4] = input[y * width + x];
inputs[5] = input[y * width + (x + 1)];
inputs[6] = input[(y + 1) * width + (x - 1)];
inputs[7] = input[(y + 1) * width + x];
inputs[8] = input[(y + 1) * width + (x + 1)];
float tempValue = 0;
for (unsigned int it = 0; it < 9; ++it)
tempValue += inputs[it] * gaussianKernel[it];
output[y * width + x] = tempValue;
}
};
__global__ void sobelKernel(const float* const __restrict__ input,
float* const __restrict__ outputX,
float* const __restrict__ outputY,
const size_t width,
const size_t height,
const float* const __restrict__ sobelKernelX,
const float* const __restrict__ sobelKernelY)
{
//x and y maxs are width and height
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
float inputs[9];
if((x > 0) && (x < (height - 1)) && (y > 0) && (y < (width - 1)))
{
inputs[0] = input[(y - 1) * width + (x - 1)];
inputs[1] = input[(y - 1) * width + x];
inputs[2] = input[(y - 1) * width + (x + 1)];
inputs[3] = input[y * width + (x - 1)];
inputs[4] = input[y * width + x];
inputs[5] = input[y * width + (x + 1)];
inputs[6] = input[(y + 1) * width + (x - 1)];
inputs[7] = input[(y + 1) * width + x];
inputs[8] = input[(y + 1) * width + (x + 1)];
float tempValueX = 0.0f;
float tempValueY = 0.0f;
for (unsigned int it = 0; it < 9; ++it)
{
tempValueX += inputs[it] * sobelKernelX[it];
tempValueY += inputs[it] * sobelKernelY[it];
}
outputX[y * width + x] = tempValueX;
outputY[y * width + x] = tempValueY;
}
};
__global__ void cwiseProduct(const float* const matrix1,
const float* const matrix2,
float* const output,
const size_t width,
const size_t height)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if((x > 0) && (x < (height - 1)) && (y > 0) && (y < (width - 1)))
{
output[y * width + x] = matrix1[y * width + x] * matrix2[y * width + x];
}
};
__global__ void calculate_k_product(const float * const __restrict__ matrix1,
const float * const __restrict__ matrix2,
const float k,
float * const __restrict__ output,
const size_t width,
const size_t height)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if((x > 0) && (x < (height - 1)) && (y > 0) && (y < (width - 1)))
{
float aux = matrix1[y * width + x] + matrix2[y * width + x];
output[y * width + x] = k * aux * aux;
}
};
__global__ void calculate_diff(float * const __restrict__ matrix1,
const float * const __restrict__ matrix2,
const float * const __restrict__ matrix3,
const size_t width,
const size_t height)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if((x > 0) && (x < (height - 1)) && (y > 0) && (y < (width - 1)))
{
matrix1[y * width + x] = matrix1[y * width + x] - matrix2[y * width + x] - matrix3[y * width + x];
}
};
__global__ void threshold_cuda(float * const R,
const float threshold,
const size_t width,
const size_t height)
{
// THRESH_TOZERO
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if((x > 0) && (x < (height - 1)) && (y > 0) && (y < (width - 1)))
{
if(R[y * width + x] < threshold)
{
R[y * width + x] = 0.0f;
}
}
};
/*
Almacena en features un 1 si ese pixel es maximo o 0 en c.c.
Util para graficar mas adelante.
*/
__global__ void nonMaximaSupression_cuda(const float * const __restrict__ input,
int * const __restrict__ features,
const size_t width,
const size_t height)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
float neighbours[8]; //todos menos si mismo
if((x > 0) && (x < (height - 1)) && (y > 0) && (y < (width - 1)))
{
neighbours[0] = input[(y - 1) * width + (x - 1)];
neighbours[1] = input[(y - 1) * width + x];
neighbours[2] = input[(y - 1) * width + (x + 1)];
neighbours[3] = input[y * width + (x - 1)];
neighbours[4] = input[y * width + (x + 1)];
neighbours[5] = input[(y + 1) * width + (x - 1)];
neighbours[6] = input[(y + 1) * width + x];
neighbours[7] = input[(y + 1) * width + (x + 1)];
int is_max = 1;
for (unsigned int it = 0; it < 8 && is_max; ++it)
is_max = neighbours[it] < input[y * width + x];
features[y * width + x] = is_max;
}
};
__global__ void normalize_R(float * const __restrict__ R,
const float max,
const float min,
const size_t width,
const size_t height)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if((x > 0) && (x < (height - 1)) && (y > 0) && (y < (width - 1)))
{
R[y * width + x] = R[y * width + x] * (1.0 / (max - min)) - min / (max - min);
}
};
void harrisCornersFilter(const float* const image,
const size_t imageWidth,
const size_t imageHeight,
const float* const gaussianKernel,
float* output,
int* features)
{
//Inicializacion de memoria
width = imageWidth;
height = imageHeight;
hipMalloc(reinterpret_cast<void**>(&cudaInput), width * height * sizeof(float));
hipMalloc(reinterpret_cast<void**>(&gaussianKernelCuda), 9 * sizeof(float));
hipMalloc(reinterpret_cast<void**>(&cudaSobelX), 9 * sizeof(float));
hipMalloc(reinterpret_cast<void**>(&cudaSobelY), 9 * sizeof(float));
hipMalloc(reinterpret_cast<void**>(&cudaOutputX), width * height * sizeof(float));
hipMalloc(reinterpret_cast<void**>(&cudaOutputY), width * height * sizeof(float));
hipMalloc(reinterpret_cast<void**>(&cudaOutputAux), width * height * sizeof(float));
hipMalloc(reinterpret_cast<void**>(&cudaOutputAux2), width * height * sizeof(float));
hipMalloc(reinterpret_cast<void**>(&cudaOutputAux3), width * height * sizeof(float));
hipMalloc(reinterpret_cast<void**>(&cudaA_X_X), width * height * sizeof(float));
hipMalloc(reinterpret_cast<void**>(&cudaA_X_Y), width * height * sizeof(float));
hipMalloc(reinterpret_cast<void**>(&cudaA_Y_Y), width * height * sizeof(float));
hipMalloc(reinterpret_cast<void**>(&cuda_R), width * height * sizeof(float));
hipMalloc(reinterpret_cast<void**>(&cudaFeatures), width * height * sizeof(int));
float sobelKernelX[] = {-1.0f, 0.0f, 1.0f,
-2.0f, 0.0f, 2.0f,
-1.0f, 0.0f, 1.0f
};
float sobelKernelY[] = {-1.0f, -2.0f, -1.0f,
0.0f, 0.0f, 0.0f,
1.0f, 2.0f, 1.0f
};
hipMemcpy(cudaInput, image, width * height * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(gaussianKernelCuda, gaussianKernel, 9 * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(cudaSobelX, sobelKernelX, 9 * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(cudaSobelY, sobelKernelY, 9 * sizeof(float), hipMemcpyHostToDevice);
//Comienzo del calculo
gradientSobelCuda();
hipDeviceSynchronize();
calculateA();
gaussianBlurCuda(cudaA_X_X,cudaOutputAux);
hipDeviceSynchronize();
hipMemcpy(cudaA_X_X, cudaOutputAux, width * height * sizeof(float), hipMemcpyDeviceToDevice);
gaussianBlurCuda(cudaA_X_Y,cudaOutputAux);
hipDeviceSynchronize();
hipMemcpy(cudaA_X_Y, cudaOutputAux, width * height * sizeof(float), hipMemcpyDeviceToDevice);
gaussianBlurCuda(cudaA_Y_Y,cudaOutputAux);
hipDeviceSynchronize();
hipMemcpy(cudaA_Y_Y, cudaOutputAux, width * height * sizeof(float), hipMemcpyDeviceToDevice);
calculateR();
hipDeviceSynchronize();
threshold();
hipDeviceSynchronize();
//Aqui dentro dejar en el rango [0, 1] a cada pixel de la imagen;
nonMaximaSupression();
hipDeviceSynchronize();
//copiamos el resultado
hipMemcpy(output, cudaOutputAux, width * height * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(features, cudaFeatures, width * height * sizeof(int), hipMemcpyDeviceToHost);
//Liberamos memoria
hipFree(cudaInput);
hipFree(cudaOutputX);
hipFree(cudaOutputY);
hipFree(cudaOutputAux);
hipFree(cudaOutputAux3);
hipFree(cudaOutputAux2);
hipFree(gaussianKernelCuda);
hipFree(cudaSobelX);
hipFree(cudaSobelY);
hipFree(cudaA_X_X);
hipFree(cudaA_X_Y);
hipFree(cudaA_Y_Y);
hipFree(cudaFeatures);
}
void gaussianBlurCuda(const float* const input,
float* const output)
{
dim3 blockSize(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 gridSize(width / BLOCK_SIZE_X, height / BLOCK_SIZE_Y);
hipLaunchKernelGGL(( gaussianBlurKernel), dim3(gridSize), dim3(blockSize), 0, 0, input,
output,
width,
height,
gaussianKernelCuda);
}
void gradientSobelCuda()
{
dim3 blockSize(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 gridSize(width / BLOCK_SIZE_X, height / BLOCK_SIZE_Y);
hipLaunchKernelGGL(( sobelKernel), dim3(gridSize), dim3(blockSize), 0, 0, cudaInput,
cudaOutputX,
cudaOutputY,
width,
height,
cudaSobelX,
cudaSobelY);
}
void calculateA()
{
dim3 blockSize(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 gridSize(width / BLOCK_SIZE_X, height / BLOCK_SIZE_Y);
//cudaA_X_X = cudaOutputX * cudaOutputX;
hipLaunchKernelGGL(( cwiseProduct), dim3(gridSize), dim3(blockSize), 0, 0, cudaOutputX,
cudaOutputX,
cudaA_X_X,
width,
height);
//cudaA_X_Y = cudaOutputX * cudaOutputY;
hipLaunchKernelGGL(( cwiseProduct), dim3(gridSize), dim3(blockSize), 0, 0, cudaOutputX,
cudaOutputY,
cudaA_X_Y,
width,
height);
//cudaA_Y_Y = cudaOutputY * cudaOutputY;
hipLaunchKernelGGL(( cwiseProduct), dim3(gridSize), dim3(blockSize), 0, 0, cudaOutputY,
cudaOutputY,
cudaA_Y_Y,
width,
height);
}
void calculateR()
{
dim3 blockSize(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 gridSize(width / BLOCK_SIZE_X, height / BLOCK_SIZE_Y);
const float k = 0.04f;
hipLaunchKernelGGL(( cwiseProduct), dim3(gridSize), dim3(blockSize), 0, 0, cudaA_X_X,
cudaA_Y_Y,
cudaOutputAux,
width,
height);
hipLaunchKernelGGL(( cwiseProduct), dim3(gridSize), dim3(blockSize), 0, 0, cudaA_X_Y,
cudaA_X_Y,
cudaOutputAux2,
width,
height);
hipLaunchKernelGGL(( calculate_k_product), dim3(gridSize), dim3(blockSize), 0, 0, cudaA_X_X,
cudaA_Y_Y,
k,
cudaOutputAux3,
width,
height);
hipLaunchKernelGGL(( calculate_diff), dim3(gridSize), dim3(blockSize), 0, 0, cudaOutputAux,
cudaOutputAux2,
cudaOutputAux3,
width,
height);
}
void threshold()
{
dim3 blockSize(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 gridSize(width / BLOCK_SIZE_X, height / BLOCK_SIZE_Y);
hipLaunchKernelGGL(( threshold_cuda), dim3(gridSize), dim3(blockSize), 0, 0, cudaOutputAux,
THRESHOLD,
width,
height);
}
void nonMaximaSupression()
{
dim3 blockSize(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 gridSize(width / BLOCK_SIZE_X, height / BLOCK_SIZE_Y);
hipLaunchKernelGGL(( nonMaximaSupression_cuda), dim3(gridSize), dim3(blockSize), 0, 0, cudaOutputAux,
cudaFeatures,
width,
height);
thrust::device_ptr<float> img = thrust::device_pointer_cast(cudaOutputAux);
thrust::device_vector<float>::iterator max_elem = thrust::max_element(img, img + width * height);
thrust::device_vector<float>::iterator min_elem = thrust::min_element(img, img + width * height);
const float max = *max_elem;
const float min = *min_elem;
hipLaunchKernelGGL(( normalize_R), dim3(gridSize), dim3(blockSize), 0, 0, cudaOutputAux,
min,
max,
width,
height);
}
| f93ebaeb08f681b15b31e6550590b05e66e26b7b.cu | #include <thrust/device_vector.h>
#include <thrust/extrema.h>
#include "kernel.h"
size_t width;
size_t height;
float* cudaInput;
float* cudaOutputX;
float* cudaOutputY;
float* cudaOutputAux;
float* cudaOutputAux2;
float* cudaOutputAux3;
float* gaussianKernelCuda;
float* cudaSobelX;
float* cudaSobelY;
float* cudaA_X_X;
float* cudaA_X_Y;
float* cudaA_Y_Y;
float* cuda_R;
int* cudaFeatures;
__global__ void gaussianBlurKernel(const float* const __restrict__ input,
float* const __restrict__ output,
const size_t width,
const size_t height,
const float* const __restrict__ gaussianKernel)
{
//x and y maxs are width and height
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
float inputs[9];
if((x > 0) && (x < (height - 1)) && (y > 0) && (y < (width - 1)))
{
inputs[0] = input[(y - 1) * width + (x - 1)];
inputs[1] = input[(y - 1) * width + x];
inputs[2] = input[(y - 1) * width + (x + 1)];
inputs[3] = input[y * width + (x - 1)];
inputs[4] = input[y * width + x];
inputs[5] = input[y * width + (x + 1)];
inputs[6] = input[(y + 1) * width + (x - 1)];
inputs[7] = input[(y + 1) * width + x];
inputs[8] = input[(y + 1) * width + (x + 1)];
float tempValue = 0;
for (unsigned int it = 0; it < 9; ++it)
tempValue += inputs[it] * gaussianKernel[it];
output[y * width + x] = tempValue;
}
};
__global__ void sobelKernel(const float* const __restrict__ input,
float* const __restrict__ outputX,
float* const __restrict__ outputY,
const size_t width,
const size_t height,
const float* const __restrict__ sobelKernelX,
const float* const __restrict__ sobelKernelY)
{
//x and y maxs are width and height
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
float inputs[9];
if((x > 0) && (x < (height - 1)) && (y > 0) && (y < (width - 1)))
{
inputs[0] = input[(y - 1) * width + (x - 1)];
inputs[1] = input[(y - 1) * width + x];
inputs[2] = input[(y - 1) * width + (x + 1)];
inputs[3] = input[y * width + (x - 1)];
inputs[4] = input[y * width + x];
inputs[5] = input[y * width + (x + 1)];
inputs[6] = input[(y + 1) * width + (x - 1)];
inputs[7] = input[(y + 1) * width + x];
inputs[8] = input[(y + 1) * width + (x + 1)];
float tempValueX = 0.0f;
float tempValueY = 0.0f;
for (unsigned int it = 0; it < 9; ++it)
{
tempValueX += inputs[it] * sobelKernelX[it];
tempValueY += inputs[it] * sobelKernelY[it];
}
outputX[y * width + x] = tempValueX;
outputY[y * width + x] = tempValueY;
}
};
__global__ void cwiseProduct(const float* const matrix1,
const float* const matrix2,
float* const output,
const size_t width,
const size_t height)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if((x > 0) && (x < (height - 1)) && (y > 0) && (y < (width - 1)))
{
output[y * width + x] = matrix1[y * width + x] * matrix2[y * width + x];
}
};
__global__ void calculate_k_product(const float * const __restrict__ matrix1,
const float * const __restrict__ matrix2,
const float k,
float * const __restrict__ output,
const size_t width,
const size_t height)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if((x > 0) && (x < (height - 1)) && (y > 0) && (y < (width - 1)))
{
float aux = matrix1[y * width + x] + matrix2[y * width + x];
output[y * width + x] = k * aux * aux;
}
};
__global__ void calculate_diff(float * const __restrict__ matrix1,
const float * const __restrict__ matrix2,
const float * const __restrict__ matrix3,
const size_t width,
const size_t height)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if((x > 0) && (x < (height - 1)) && (y > 0) && (y < (width - 1)))
{
matrix1[y * width + x] = matrix1[y * width + x] - matrix2[y * width + x] - matrix3[y * width + x];
}
};
__global__ void threshold_cuda(float * const R,
const float threshold,
const size_t width,
const size_t height)
{
// THRESH_TOZERO
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if((x > 0) && (x < (height - 1)) && (y > 0) && (y < (width - 1)))
{
if(R[y * width + x] < threshold)
{
R[y * width + x] = 0.0f;
}
}
};
/*
Almacena en features un 1 si ese pixel es maximo o 0 en c.c.
Util para graficar mas adelante.
*/
__global__ void nonMaximaSupression_cuda(const float * const __restrict__ input,
int * const __restrict__ features,
const size_t width,
const size_t height)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
float neighbours[8]; //todos menos si mismo
if((x > 0) && (x < (height - 1)) && (y > 0) && (y < (width - 1)))
{
neighbours[0] = input[(y - 1) * width + (x - 1)];
neighbours[1] = input[(y - 1) * width + x];
neighbours[2] = input[(y - 1) * width + (x + 1)];
neighbours[3] = input[y * width + (x - 1)];
neighbours[4] = input[y * width + (x + 1)];
neighbours[5] = input[(y + 1) * width + (x - 1)];
neighbours[6] = input[(y + 1) * width + x];
neighbours[7] = input[(y + 1) * width + (x + 1)];
int is_max = 1;
for (unsigned int it = 0; it < 8 && is_max; ++it)
is_max = neighbours[it] < input[y * width + x];
features[y * width + x] = is_max;
}
};
__global__ void normalize_R(float * const __restrict__ R,
const float max,
const float min,
const size_t width,
const size_t height)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if((x > 0) && (x < (height - 1)) && (y > 0) && (y < (width - 1)))
{
R[y * width + x] = R[y * width + x] * (1.0 / (max - min)) - min / (max - min);
}
};
void harrisCornersFilter(const float* const image,
const size_t imageWidth,
const size_t imageHeight,
const float* const gaussianKernel,
float* output,
int* features)
{
//Inicializacion de memoria
width = imageWidth;
height = imageHeight;
cudaMalloc(reinterpret_cast<void**>(&cudaInput), width * height * sizeof(float));
cudaMalloc(reinterpret_cast<void**>(&gaussianKernelCuda), 9 * sizeof(float));
cudaMalloc(reinterpret_cast<void**>(&cudaSobelX), 9 * sizeof(float));
cudaMalloc(reinterpret_cast<void**>(&cudaSobelY), 9 * sizeof(float));
cudaMalloc(reinterpret_cast<void**>(&cudaOutputX), width * height * sizeof(float));
cudaMalloc(reinterpret_cast<void**>(&cudaOutputY), width * height * sizeof(float));
cudaMalloc(reinterpret_cast<void**>(&cudaOutputAux), width * height * sizeof(float));
cudaMalloc(reinterpret_cast<void**>(&cudaOutputAux2), width * height * sizeof(float));
cudaMalloc(reinterpret_cast<void**>(&cudaOutputAux3), width * height * sizeof(float));
cudaMalloc(reinterpret_cast<void**>(&cudaA_X_X), width * height * sizeof(float));
cudaMalloc(reinterpret_cast<void**>(&cudaA_X_Y), width * height * sizeof(float));
cudaMalloc(reinterpret_cast<void**>(&cudaA_Y_Y), width * height * sizeof(float));
cudaMalloc(reinterpret_cast<void**>(&cuda_R), width * height * sizeof(float));
cudaMalloc(reinterpret_cast<void**>(&cudaFeatures), width * height * sizeof(int));
float sobelKernelX[] = {-1.0f, 0.0f, 1.0f,
-2.0f, 0.0f, 2.0f,
-1.0f, 0.0f, 1.0f
};
float sobelKernelY[] = {-1.0f, -2.0f, -1.0f,
0.0f, 0.0f, 0.0f,
1.0f, 2.0f, 1.0f
};
cudaMemcpy(cudaInput, image, width * height * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(gaussianKernelCuda, gaussianKernel, 9 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(cudaSobelX, sobelKernelX, 9 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(cudaSobelY, sobelKernelY, 9 * sizeof(float), cudaMemcpyHostToDevice);
//Comienzo del calculo
gradientSobelCuda();
cudaDeviceSynchronize();
calculateA();
gaussianBlurCuda(cudaA_X_X,cudaOutputAux);
cudaDeviceSynchronize();
cudaMemcpy(cudaA_X_X, cudaOutputAux, width * height * sizeof(float), cudaMemcpyDeviceToDevice);
gaussianBlurCuda(cudaA_X_Y,cudaOutputAux);
cudaDeviceSynchronize();
cudaMemcpy(cudaA_X_Y, cudaOutputAux, width * height * sizeof(float), cudaMemcpyDeviceToDevice);
gaussianBlurCuda(cudaA_Y_Y,cudaOutputAux);
cudaDeviceSynchronize();
cudaMemcpy(cudaA_Y_Y, cudaOutputAux, width * height * sizeof(float), cudaMemcpyDeviceToDevice);
calculateR();
cudaDeviceSynchronize();
threshold();
cudaDeviceSynchronize();
//Aqui dentro dejar en el rango [0, 1] a cada pixel de la imagen;
nonMaximaSupression();
cudaDeviceSynchronize();
//copiamos el resultado
cudaMemcpy(output, cudaOutputAux, width * height * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(features, cudaFeatures, width * height * sizeof(int), cudaMemcpyDeviceToHost);
//Liberamos memoria
cudaFree(cudaInput);
cudaFree(cudaOutputX);
cudaFree(cudaOutputY);
cudaFree(cudaOutputAux);
cudaFree(cudaOutputAux3);
cudaFree(cudaOutputAux2);
cudaFree(gaussianKernelCuda);
cudaFree(cudaSobelX);
cudaFree(cudaSobelY);
cudaFree(cudaA_X_X);
cudaFree(cudaA_X_Y);
cudaFree(cudaA_Y_Y);
cudaFree(cudaFeatures);
}
void gaussianBlurCuda(const float* const input,
float* const output)
{
dim3 blockSize(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 gridSize(width / BLOCK_SIZE_X, height / BLOCK_SIZE_Y);
gaussianBlurKernel<<<gridSize, blockSize>>>(input,
output,
width,
height,
gaussianKernelCuda);
}
void gradientSobelCuda()
{
dim3 blockSize(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 gridSize(width / BLOCK_SIZE_X, height / BLOCK_SIZE_Y);
sobelKernel<<<gridSize, blockSize>>>(cudaInput,
cudaOutputX,
cudaOutputY,
width,
height,
cudaSobelX,
cudaSobelY);
}
void calculateA()
{
dim3 blockSize(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 gridSize(width / BLOCK_SIZE_X, height / BLOCK_SIZE_Y);
//cudaA_X_X = cudaOutputX * cudaOutputX;
cwiseProduct<<<gridSize, blockSize>>>(cudaOutputX,
cudaOutputX,
cudaA_X_X,
width,
height);
//cudaA_X_Y = cudaOutputX * cudaOutputY;
cwiseProduct<<<gridSize, blockSize>>>(cudaOutputX,
cudaOutputY,
cudaA_X_Y,
width,
height);
//cudaA_Y_Y = cudaOutputY * cudaOutputY;
cwiseProduct<<<gridSize, blockSize>>>(cudaOutputY,
cudaOutputY,
cudaA_Y_Y,
width,
height);
}
void calculateR()
{
dim3 blockSize(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 gridSize(width / BLOCK_SIZE_X, height / BLOCK_SIZE_Y);
const float k = 0.04f;
cwiseProduct<<<gridSize, blockSize>>>(cudaA_X_X,
cudaA_Y_Y,
cudaOutputAux,
width,
height);
cwiseProduct<<<gridSize, blockSize>>>(cudaA_X_Y,
cudaA_X_Y,
cudaOutputAux2,
width,
height);
calculate_k_product<<<gridSize, blockSize>>>(cudaA_X_X,
cudaA_Y_Y,
k,
cudaOutputAux3,
width,
height);
calculate_diff<<<gridSize, blockSize>>>(cudaOutputAux,
cudaOutputAux2,
cudaOutputAux3,
width,
height);
}
void threshold()
{
dim3 blockSize(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 gridSize(width / BLOCK_SIZE_X, height / BLOCK_SIZE_Y);
threshold_cuda<<<gridSize, blockSize>>>(cudaOutputAux,
THRESHOLD,
width,
height);
}
void nonMaximaSupression()
{
dim3 blockSize(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 gridSize(width / BLOCK_SIZE_X, height / BLOCK_SIZE_Y);
nonMaximaSupression_cuda<<<gridSize, blockSize>>>(cudaOutputAux,
cudaFeatures,
width,
height);
thrust::device_ptr<float> img = thrust::device_pointer_cast(cudaOutputAux);
thrust::device_vector<float>::iterator max_elem = thrust::max_element(img, img + width * height);
thrust::device_vector<float>::iterator min_elem = thrust::min_element(img, img + width * height);
const float max = *max_elem;
const float min = *min_elem;
normalize_R<<<gridSize, blockSize>>>(cudaOutputAux,
min,
max,
width,
height);
}
|
8f64b6058fa8af412cce482368fdb008cf7ffba0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hipsparse.h"
#include <hip/hip_runtime.h>
#include "utils.h"
#include "time.h"
int main(int argc, char *argv[]) {
/***********************************************
* initialize program's input parameters *
***********************************************/
double alpha = 1;
double beta = 0;
double norm = 0;
h_vec_t<double> distance_1;
int num_feat_1 = atoi(argv[2]);
ReadMatrix(distance_1, argv[1], num_feat_1);
#ifdef ACCELERATE
std::cout << "CUDA" << std::endl;
d_vec_t<double> d_distance_1 = distance_1;
#endif
h_vec_t<double> distance_2;
int num_feat_2 = atoi(argv[4]);
ReadMatrix(distance_2, argv[3], num_feat_2);
#ifdef ACCELERATE
d_vec_t<double> d_distance_2 = distance_2;
#endif
h_vec_t<double> distance_3;
int num_feat_3 = atoi(argv[6]);
ReadMatrix(distance_3, argv[5], num_feat_3);
#ifdef ACCELERATE
d_vec_t<double> d_distance_3 = distance_3;
#endif
int match_len = atoi(argv[8]);
h_vec_t<int> matched_feat_1(match_len);
h_vec_t<int> matched_feat_2(match_len);
h_vec_t<int> matched_feat_3(match_len);
ReadMatchedFeatures(matched_feat_1, matched_feat_2, matched_feat_3, argv[7],
match_len);
#ifdef ACCELERATE
d_vec_t<int> d_matched_feat_1 = matched_feat_1;
d_vec_t<int> d_matched_feat_2 = matched_feat_2;
d_vec_t<int> d_matched_feat_3 = matched_feat_3;
#endif
int num_iters = 20;
if (10 == argc)
num_iters = atoi(argv[9]);
/**************************************************
* construct affinity matrix *
***************************************************/
double *distance1 = raw_pointer_cast(distance_1.data());
double *distance2 = raw_pointer_cast(distance_2.data());
double *distance3 = raw_pointer_cast(distance_3.data());
int *h_matched_1 = raw_pointer_cast(matched_feat_1.data());
int *h_matched_2 = raw_pointer_cast(matched_feat_2.data());
int *h_matched_3 = raw_pointer_cast(matched_feat_3.data());
double *affinity = new double[match_len * match_len];
affinity =
AffinityInitialMatches(distance1, distance2, distance3, num_feat_1, num_feat_2, num_feat_3, h_matched_1, h_matched_2, h_matched_3, match_len);
#ifdef ACCELERATE
d_vec_t<double> d_affinity(affinity, affinity + match_len * match_len);
#else
h_vec_t<double> h_affinity(affinity, affinity + match_len * match_len);
#endif
/************************************************
* convert full matrix to CSR matrix *
************************************************/
h_vec_t<double> value;
h_vec_t<int> column;
h_vec_t<int> row;
const clock_t begin_time = clock();
CompressMatrix(value, column, row, affinity, match_len, match_len);
d_vec_t<double> d_value = value;
d_vec_t<int> d_column = column;
d_vec_t<int> d_row = row;
std::cout << "affinity runtime: "
<< float(clock() - begin_time) / CLOCKS_PER_SEC * 1000 << std::endl;
//std::cout << "affinity" << std::endl;
//std::cout << "values "
// << " "
// << "columns"
// << " "
// << "rows" << std::endl;
// std::cout << d_value.size() << std::endl;
// for (int i = 0; i < value.size(); ++i) {
// std::cout << value[i] << " " << column[i] << " " << std::endl;
// }
// std::cout << std::endl;
//
// for (int i = 0; i < row.size(); ++i) {
// std::cout << row[i] << " " << std::endl;
// }
// std::cout << std::endl;
/************************************************
* initialize eigen vectors *
************************************************/
int len_eigen_vec = match_len;
d_vec_t<double> d_eigen_new(len_eigen_vec);
fill(d_eigen_new.begin(), d_eigen_new.end(), 0);
d_vec_t<double> d_eigen_old(len_eigen_vec);
norm = 1.0 / sqrt(len_eigen_vec);
fill(d_eigen_old.begin(), d_eigen_old.end(), norm);
hipsparseHandle_t handle = 0;
hipsparseMatDescr_t descr = 0;
///// create and setup matrix descriptor
hipsparseCreate(&handle);
hipsparseCreateMatDescr(&descr);
hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO);
/************************************************
* computing eigen vector *
************************************************/
const clock_t begin_time2 = clock();
for (int i = 0; i < num_iters; ++i) {
hipsparseDcsrmv(
handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, match_len, match_len,
d_value.size(), &alpha, descr, raw_pointer_cast(d_value.data()),
raw_pointer_cast(d_row.data()), raw_pointer_cast(d_column.data()),
raw_pointer_cast(d_eigen_old.data()), &beta,
raw_pointer_cast(d_eigen_new.data()));
double init = 0;
norm = std::sqrt(transform_reduce(d_eigen_new.begin(), d_eigen_new.end(),
square(), init, thrust::plus<double>()));
transform(d_eigen_new.begin(), d_eigen_new.end(), d_eigen_old.begin(),
division(norm));
fill(d_eigen_new.begin(), d_eigen_new.end(), 0);
}
std::cout << "Eigen runtime: "
<< float(clock() - begin_time2) / CLOCKS_PER_SEC * 1000 << std::endl;
// std::cout << "eigen values" << std::endl;
// for (int i = 0; i < d_eigen_old.size(); i++) {
// std::cout << "eigen new value = " << d_eigen_new[i] << " ";
// std::cout << "eigen old value = " << d_eigen_old[i] << std::endl;
// }
hipsparseDestroyMatDescr(descr);
descr = 0;
// destroy handle
hipsparseDestroy(handle);
handle = 0;
return (0);
}
| 8f64b6058fa8af412cce482368fdb008cf7ffba0.cu | #include "cusparse.h"
#include <cuda_runtime.h>
#include "utils.h"
#include "time.h"
int main(int argc, char *argv[]) {
/***********************************************
* initialize program's input parameters *
***********************************************/
double alpha = 1;
double beta = 0;
double norm = 0;
h_vec_t<double> distance_1;
int num_feat_1 = atoi(argv[2]);
ReadMatrix(distance_1, argv[1], num_feat_1);
#ifdef ACCELERATE
std::cout << "CUDA" << std::endl;
d_vec_t<double> d_distance_1 = distance_1;
#endif
h_vec_t<double> distance_2;
int num_feat_2 = atoi(argv[4]);
ReadMatrix(distance_2, argv[3], num_feat_2);
#ifdef ACCELERATE
d_vec_t<double> d_distance_2 = distance_2;
#endif
h_vec_t<double> distance_3;
int num_feat_3 = atoi(argv[6]);
ReadMatrix(distance_3, argv[5], num_feat_3);
#ifdef ACCELERATE
d_vec_t<double> d_distance_3 = distance_3;
#endif
int match_len = atoi(argv[8]);
h_vec_t<int> matched_feat_1(match_len);
h_vec_t<int> matched_feat_2(match_len);
h_vec_t<int> matched_feat_3(match_len);
ReadMatchedFeatures(matched_feat_1, matched_feat_2, matched_feat_3, argv[7],
match_len);
#ifdef ACCELERATE
d_vec_t<int> d_matched_feat_1 = matched_feat_1;
d_vec_t<int> d_matched_feat_2 = matched_feat_2;
d_vec_t<int> d_matched_feat_3 = matched_feat_3;
#endif
int num_iters = 20;
if (10 == argc)
num_iters = atoi(argv[9]);
/**************************************************
* construct affinity matrix *
***************************************************/
double *distance1 = raw_pointer_cast(distance_1.data());
double *distance2 = raw_pointer_cast(distance_2.data());
double *distance3 = raw_pointer_cast(distance_3.data());
int *h_matched_1 = raw_pointer_cast(matched_feat_1.data());
int *h_matched_2 = raw_pointer_cast(matched_feat_2.data());
int *h_matched_3 = raw_pointer_cast(matched_feat_3.data());
double *affinity = new double[match_len * match_len];
affinity =
AffinityInitialMatches(distance1, distance2, distance3, num_feat_1, num_feat_2, num_feat_3, h_matched_1, h_matched_2, h_matched_3, match_len);
#ifdef ACCELERATE
d_vec_t<double> d_affinity(affinity, affinity + match_len * match_len);
#else
h_vec_t<double> h_affinity(affinity, affinity + match_len * match_len);
#endif
/************************************************
* convert full matrix to CSR matrix *
************************************************/
h_vec_t<double> value;
h_vec_t<int> column;
h_vec_t<int> row;
const clock_t begin_time = clock();
CompressMatrix(value, column, row, affinity, match_len, match_len);
d_vec_t<double> d_value = value;
d_vec_t<int> d_column = column;
d_vec_t<int> d_row = row;
std::cout << "affinity runtime: "
<< float(clock() - begin_time) / CLOCKS_PER_SEC * 1000 << std::endl;
//std::cout << "affinity" << std::endl;
//std::cout << "values "
// << " "
// << "columns"
// << " "
// << "rows" << std::endl;
// std::cout << d_value.size() << std::endl;
// for (int i = 0; i < value.size(); ++i) {
// std::cout << value[i] << " " << column[i] << " " << std::endl;
// }
// std::cout << std::endl;
//
// for (int i = 0; i < row.size(); ++i) {
// std::cout << row[i] << " " << std::endl;
// }
// std::cout << std::endl;
/************************************************
* initialize eigen vectors *
************************************************/
int len_eigen_vec = match_len;
d_vec_t<double> d_eigen_new(len_eigen_vec);
fill(d_eigen_new.begin(), d_eigen_new.end(), 0);
d_vec_t<double> d_eigen_old(len_eigen_vec);
norm = 1.0 / sqrt(len_eigen_vec);
fill(d_eigen_old.begin(), d_eigen_old.end(), norm);
cusparseHandle_t handle = 0;
cusparseMatDescr_t descr = 0;
///// create and setup matrix descriptor
cusparseCreate(&handle);
cusparseCreateMatDescr(&descr);
cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
/************************************************
* computing eigen vector *
************************************************/
const clock_t begin_time2 = clock();
for (int i = 0; i < num_iters; ++i) {
cusparseDcsrmv(
handle, CUSPARSE_OPERATION_NON_TRANSPOSE, match_len, match_len,
d_value.size(), &alpha, descr, raw_pointer_cast(d_value.data()),
raw_pointer_cast(d_row.data()), raw_pointer_cast(d_column.data()),
raw_pointer_cast(d_eigen_old.data()), &beta,
raw_pointer_cast(d_eigen_new.data()));
double init = 0;
norm = std::sqrt(transform_reduce(d_eigen_new.begin(), d_eigen_new.end(),
square(), init, thrust::plus<double>()));
transform(d_eigen_new.begin(), d_eigen_new.end(), d_eigen_old.begin(),
division(norm));
fill(d_eigen_new.begin(), d_eigen_new.end(), 0);
}
std::cout << "Eigen runtime: "
<< float(clock() - begin_time2) / CLOCKS_PER_SEC * 1000 << std::endl;
// std::cout << "eigen values" << std::endl;
// for (int i = 0; i < d_eigen_old.size(); i++) {
// std::cout << "eigen new value = " << d_eigen_new[i] << " ";
// std::cout << "eigen old value = " << d_eigen_old[i] << std::endl;
// }
cusparseDestroyMatDescr(descr);
descr = 0;
// destroy handle
cusparseDestroy(handle);
handle = 0;
return (0);
}
|
7a8809858ffcdeb2b05a57c4528f876254ddb1d2.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
//#include <hip/hip_runtime.h>
#include <cstdio>
#include <stdio.h>
#include <stdlib.h>
#include "Graph.hpp"
#define CUDA_CALL(x) do { hipError_t error = x; if((x)!=hipSuccess) { \
printf("Cuda Error at %s:%d -- ",__FILE__,__LINE__); \
printf("%s\nAbort.\n",hipGetErrorString(error)); \
exit(0); \
}} while(0)
__global__ void INITIALIZE
(
uint32_t * const d_distances,
bool * const d_frontier,
const uint32_t source,
const uint32_t n_vertex
)
{
const uint32_t node = blockDim.x * blockIdx.x + threadIdx.x;
if(node<n_vertex)
{
d_frontier[node] = false;
d_distances[node] = 0;
if(node == source)
{
d_frontier[node] = true;
}
}
}
__global__ void GET_DISTANCES
(
const uint32_t * const d_adjacency_offsets,
const uint32_t * const d_adjacency_list,
uint32_t * const d_distances,
bool * const d_frontier,
const int round,
const uint32_t n_vertex,
const uint32_t n_edges
)
{
//uint32_t max_threads = blockDim.x * gridDim.x;
uint32_t node = blockDim.x * blockIdx.x + threadIdx.x;
if(node<n_vertex && d_frontier[node])
{
d_frontier[node] = 0;
int offset = d_adjacency_offsets[node];
while(offset < d_adjacency_offsets[node+1])
{
int adj_node = d_adjacency_list[offset];
d_distances[adj_node] |= (1 << (round));
d_frontier[adj_node] = 1;
++offset;
}
}
}
void Graph::get_all_distances(const uint32_t MAX_ROUNDS)
{
// GPU pointers location prparation:
//Graph
uint32_t *d_adjacency_offsets= NULL;
uint32_t *d_adjacency_list= NULL;
// Algorithm
bool *d_frontier = NULL;
// Results:
uint32_t *d_distances= NULL;
distances = (uint32_t **)malloc(n_vertex * sizeof(uint32_t *));
CUDA_CALL(hipMalloc((void **)&d_adjacency_offsets, n_vertex*sizeof(uint32_t)));
CUDA_CALL(hipMalloc((void **)&d_adjacency_list, n_edges*sizeof(uint32_t)));
CUDA_CALL(hipMalloc((void **)&d_frontier, n_vertex*sizeof(bool)));
CUDA_CALL(hipMalloc((void **)&d_distances, n_vertex*sizeof(uint32_t)));
// Copy graph into GPU:
CUDA_CALL(hipMemcpy(d_adjacency_offsets, adjacency_offsets, n_vertex*sizeof(uint32_t), hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(d_adjacency_list, adjacency_list, n_edges*sizeof(uint32_t), hipMemcpyHostToDevice));
dim3 BLOCK(WARP_SIZE);
dim3 GRID((n_vertex+BLOCK.x-1)/BLOCK.x);
int source_counter = 0;
do{ // Get all distances from each source node
// Set kernell beginning parameters
hipLaunchKernelGGL(( INITIALIZE), dim3(GRID),dim3(BLOCK), 0, 0,
d_distances,
d_frontier,
source_counter,
n_vertex
);
// KERNEL
int round = 0;
while(round<MAX_ROUNDS)
{
hipLaunchKernelGGL(( GET_DISTANCES), dim3(GRID),dim3(BLOCK), 0, 0,
d_adjacency_offsets,
d_adjacency_list,
d_distances,
d_frontier,
round,
n_vertex,
n_edges
);
//CUDA_CALL(hipDeviceSynchronize());
round ++;
}
// Get results
uint32_t *_distances = (uint32_t *)malloc(n_vertex*sizeof(uint32_t));
CUDA_CALL(hipMemcpy(_distances, d_distances, n_vertex*sizeof(uint32_t), hipMemcpyDeviceToHost));
*(distances+source_counter) = _distances;
++source_counter;
} while(source_counter < n_vertex);
hipFree(d_frontier);
hipFree(d_adjacency_offsets);
hipFree(d_adjacency_list);
return;
};
void Graph::get_all_distances_from_single_source(const uint32_t source, const uint32_t MAX_ROUNDS)
{
// GPU pointers location prparation:
//Graph
uint32_t *d_adjacency_offsets= NULL;
uint32_t *d_adjacency_list= NULL;
// Algorithm
bool *d_frontier = NULL;
// Results:
uint32_t *d_distances= NULL;
distances = (uint32_t **)malloc(n_vertex * sizeof(uint32_t *));
CUDA_CALL(hipMalloc((void **)&d_adjacency_offsets, n_vertex*sizeof(uint32_t)));
CUDA_CALL(hipMalloc((void **)&d_adjacency_list, n_edges*sizeof(uint32_t)));
CUDA_CALL(hipMalloc((void **)&d_frontier, n_vertex*sizeof(bool)));
CUDA_CALL(hipMalloc((void **)&d_distances, n_vertex*sizeof(uint32_t)));
// Copy graph into GPU:
CUDA_CALL(hipMemcpy(d_adjacency_offsets, adjacency_offsets, n_vertex*sizeof(uint32_t), hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(d_adjacency_list, adjacency_list, n_edges*sizeof(uint32_t), hipMemcpyHostToDevice));
dim3 BLOCK(WARP_SIZE);
dim3 GRID((n_vertex+BLOCK.x-1)/BLOCK.x);
// Get all distances from each source node
// Set kernell beginning parameters
hipLaunchKernelGGL(( INITIALIZE), dim3(GRID),dim3(BLOCK), 0, 0,
d_distances,
d_frontier,
source,
n_vertex
);
// KERNEL
int round = 0;
while(round<MAX_ROUNDS)
{
hipLaunchKernelGGL(( GET_DISTANCES), dim3(GRID),dim3(BLOCK), 0, 0,
d_adjacency_offsets,
d_adjacency_list,
d_distances,
d_frontier,
round,
n_vertex,
n_edges
);
//CUDA_CALL(hipDeviceSynchronize());
round ++;
}
// Get results
uint32_t *_distances = (uint32_t *)malloc(n_vertex*sizeof(uint32_t));
CUDA_CALL(hipMemcpy(_distances, d_distances, n_vertex*sizeof(uint32_t), hipMemcpyDeviceToHost));
*(distances+source) = _distances;
hipFree(d_frontier);
hipFree(d_adjacency_offsets);
hipFree(d_adjacency_list);
return;
};
| 7a8809858ffcdeb2b05a57c4528f876254ddb1d2.cu | #include <cuda.h>
//#include <cuda_runtime.h>
#include <cstdio>
#include <stdio.h>
#include <stdlib.h>
#include "Graph.hpp"
#define CUDA_CALL(x) do { cudaError_t error = x; if((x)!=cudaSuccess) { \
printf("Cuda Error at %s:%d -- ",__FILE__,__LINE__); \
printf("%s\nAbort.\n",cudaGetErrorString(error)); \
exit(0); \
}} while(0)
__global__ void INITIALIZE
(
uint32_t * const d_distances,
bool * const d_frontier,
const uint32_t source,
const uint32_t n_vertex
)
{
const uint32_t node = blockDim.x * blockIdx.x + threadIdx.x;
if(node<n_vertex)
{
d_frontier[node] = false;
d_distances[node] = 0;
if(node == source)
{
d_frontier[node] = true;
}
}
}
__global__ void GET_DISTANCES
(
const uint32_t * const d_adjacency_offsets,
const uint32_t * const d_adjacency_list,
uint32_t * const d_distances,
bool * const d_frontier,
const int round,
const uint32_t n_vertex,
const uint32_t n_edges
)
{
//uint32_t max_threads = blockDim.x * gridDim.x;
uint32_t node = blockDim.x * blockIdx.x + threadIdx.x;
if(node<n_vertex && d_frontier[node])
{
d_frontier[node] = 0;
int offset = d_adjacency_offsets[node];
while(offset < d_adjacency_offsets[node+1])
{
int adj_node = d_adjacency_list[offset];
d_distances[adj_node] |= (1 << (round));
d_frontier[adj_node] = 1;
++offset;
}
}
}
void Graph::get_all_distances(const uint32_t MAX_ROUNDS)
{
// GPU pointers location prparation:
//Graph
uint32_t *d_adjacency_offsets= NULL;
uint32_t *d_adjacency_list= NULL;
// Algorithm
bool *d_frontier = NULL;
// Results:
uint32_t *d_distances= NULL;
distances = (uint32_t **)malloc(n_vertex * sizeof(uint32_t *));
CUDA_CALL(cudaMalloc((void **)&d_adjacency_offsets, n_vertex*sizeof(uint32_t)));
CUDA_CALL(cudaMalloc((void **)&d_adjacency_list, n_edges*sizeof(uint32_t)));
CUDA_CALL(cudaMalloc((void **)&d_frontier, n_vertex*sizeof(bool)));
CUDA_CALL(cudaMalloc((void **)&d_distances, n_vertex*sizeof(uint32_t)));
// Copy graph into GPU:
CUDA_CALL(cudaMemcpy(d_adjacency_offsets, adjacency_offsets, n_vertex*sizeof(uint32_t), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(d_adjacency_list, adjacency_list, n_edges*sizeof(uint32_t), cudaMemcpyHostToDevice));
dim3 BLOCK(WARP_SIZE);
dim3 GRID((n_vertex+BLOCK.x-1)/BLOCK.x);
int source_counter = 0;
do{ // Get all distances from each source node
// Set kernell beginning parameters
INITIALIZE<<<GRID,BLOCK>>>
(
d_distances,
d_frontier,
source_counter,
n_vertex
);
// KERNEL
int round = 0;
while(round<MAX_ROUNDS)
{
GET_DISTANCES<<<GRID,BLOCK>>>
(
d_adjacency_offsets,
d_adjacency_list,
d_distances,
d_frontier,
round,
n_vertex,
n_edges
);
//CUDA_CALL(cudaDeviceSynchronize());
round ++;
}
// Get results
uint32_t *_distances = (uint32_t *)malloc(n_vertex*sizeof(uint32_t));
CUDA_CALL(cudaMemcpy(_distances, d_distances, n_vertex*sizeof(uint32_t), cudaMemcpyDeviceToHost));
*(distances+source_counter) = _distances;
++source_counter;
} while(source_counter < n_vertex);
cudaFree(d_frontier);
cudaFree(d_adjacency_offsets);
cudaFree(d_adjacency_list);
return;
};
void Graph::get_all_distances_from_single_source(const uint32_t source, const uint32_t MAX_ROUNDS)
{
// GPU pointers location prparation:
//Graph
uint32_t *d_adjacency_offsets= NULL;
uint32_t *d_adjacency_list= NULL;
// Algorithm
bool *d_frontier = NULL;
// Results:
uint32_t *d_distances= NULL;
distances = (uint32_t **)malloc(n_vertex * sizeof(uint32_t *));
CUDA_CALL(cudaMalloc((void **)&d_adjacency_offsets, n_vertex*sizeof(uint32_t)));
CUDA_CALL(cudaMalloc((void **)&d_adjacency_list, n_edges*sizeof(uint32_t)));
CUDA_CALL(cudaMalloc((void **)&d_frontier, n_vertex*sizeof(bool)));
CUDA_CALL(cudaMalloc((void **)&d_distances, n_vertex*sizeof(uint32_t)));
// Copy graph into GPU:
CUDA_CALL(cudaMemcpy(d_adjacency_offsets, adjacency_offsets, n_vertex*sizeof(uint32_t), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(d_adjacency_list, adjacency_list, n_edges*sizeof(uint32_t), cudaMemcpyHostToDevice));
dim3 BLOCK(WARP_SIZE);
dim3 GRID((n_vertex+BLOCK.x-1)/BLOCK.x);
// Get all distances from each source node
// Set kernell beginning parameters
INITIALIZE<<<GRID,BLOCK>>>
(
d_distances,
d_frontier,
source,
n_vertex
);
// KERNEL
int round = 0;
while(round<MAX_ROUNDS)
{
GET_DISTANCES<<<GRID,BLOCK>>>
(
d_adjacency_offsets,
d_adjacency_list,
d_distances,
d_frontier,
round,
n_vertex,
n_edges
);
//CUDA_CALL(cudaDeviceSynchronize());
round ++;
}
// Get results
uint32_t *_distances = (uint32_t *)malloc(n_vertex*sizeof(uint32_t));
CUDA_CALL(cudaMemcpy(_distances, d_distances, n_vertex*sizeof(uint32_t), cudaMemcpyDeviceToHost));
*(distances+source) = _distances;
cudaFree(d_frontier);
cudaFree(d_adjacency_offsets);
cudaFree(d_adjacency_list);
return;
};
|
f5ff927e3b4862d63975de02aa45c8a114ec5319.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
********************************* BLUEBOTTLE **********************************
*******************************************************************************
*
* Copyright 2012 - 2018 Adam Sierakowski and Daniel Willen,
* The Johns Hopkins University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Please contact the Johns Hopkins University to use Bluebottle for
* commercial and/or for-profit applications.
******************************************************************************/
#include "cuda_physalis.h"
__device__ void rtp2xyz(real r, real theta, real phi, real *x, real *y, real *z)
{
*x = r * sin(theta) * cos(phi);
*y = r * sin(theta) * sin(phi);
*z = r * cos(theta);
}
__device__ void cart2sphere(real u, real v, real w, real theta, real phi,
real *ur, real *ut, real *up)
{
real st = sin(theta);
real ct = cos(theta);
real sp = sin(phi);
real cp = cos(phi);
*ur = st * (u * cp + v * sp) + w * ct;
*ut = ct * (u * cp + v * sp) - w * st;
*up = -u * sp + v * cp;
}
__device__ real nnm(int n, int m)
{
real fact_top = 1;
real fact_bot = 1;
for (int i = 1; i <= (n - m); i++) fact_top *= (real) i;
for (int i = 1; i <= (n + m); i++) fact_bot *= (real) i;
return sqrt((2.*n + 1.) / 4. / PI * fact_top / fact_bot);
}
__device__ real pnm(int n, int m, real theta)
{
real x = cos(theta);
real y = sin(theta);
switch(n) {
case 0: return 1;
case 1:
switch(m) {
//case -1: return -0.5*y;
case 0: return x;
case 1: return -y;
}
case 2:
switch(m) {
//case -2: return 0.125*y*y;
//case -1: return -0.5*x*y;
case 0: return 0.5*(3.*x*x - 1.);
case 1: return -3.*x*y;
case 2: return 3.*y*y;
}
case 3:
switch(m) {
//case -3: return -0.02083333333333*y*y*y;
//case -2: return 0.125*x*y*y;
//case -1: return -0.125*(1. - 5.*x*x)*y;
case 0: return 0.5*x*(5.*x*x - 3.);
case 1: return -1.5*(5.*x*x - 1.)*y;
case 2: return 15.*x*y*y;
case 3: return -15.*y*y*y;
}
case 4:
switch(m) {
//case -4: return .002604166666667*y*y*y*y;
//case -3: return -0.02083333333333*x*y*y*y*y;
//case -2: return 0.02083333333333*(7.*x*x - 1.)*y*y;
//case -1: return -0.125*x*(3. - 7.*x*x)*y;
case 0: return 0.125*(35.*x*x*x*x - 30.*x*x + 3.);
case 1: return -2.5*(7.*x*x - 3.)*x*y;
case 2: return 7.5*(7.*x*x - 1.)*y*y;
case 3: return -105.*x*y*y*y;
case 4: return 105.*y*y*y*y;
}
case 5:
switch(m) {
//case -5: return -0.000260416666667*y*y*y*y*y;
//case -4: return 0.002604166666667*x*y*y*y*y;
//case -3: return -0.002604166666667*y*y*y*(9.*x*x - 1.);
//case -2: return 0.0625*x*y*y*(3.*x*x - 1.);
//case -1: return -0.0625*(21.*x*x*x*x - 14.*x*x + 1.);
case 0: return 0.125*x*(63.*x*x*x*x - 70.*x*x + 15.);
case 1: return -1.875*y*(21.*x*x*x*x - 14.*x*x + 1.);
case 2: return 52.5*x*y*y*(3.*x*x - 1.);
case 3: return -52.5*y*y*y*(9.*x*x - 1.);
case 4: return 945.*x*y*y*y*y;
case 5: return -945.*y*y*y*y*y;
}
}
return 0; // this should never be reached
}
__global__ void check_nodes(int nparts, part_struct *parts, BC *bc,
dom_struct *DOM)
{
int node = threadIdx.x;
int part = blockIdx.x;
/* Convert node (r, theta, phi) to (x, y, z) */
real xp, yp, zp; // Cartesian radial vector
real x, y, z; // Cartesian location of node
rtp2xyz(parts[part].rs, _node_t[node], _node_p[node], &xp, &yp, &zp);
/* shift from particle center */
x = xp + parts[part].x;
y = yp + parts[part].y;
z = zp + parts[part].z;
// start off with all -1's
parts[part].nodes[node] = -1;
/* check if the node is interfered with by a wall */
// compute distance between node and walls
// set equal to some number to identify which wall is interfering
// We use <= for E,N,T and > for W,S,B -- allows us to do [start,end) on all
// subdomains regardless of bc
parts[part].nodes[node] += (WEST_WALL_D + 1) * // set equal to WEST_WALL_D...
(x - _dom.xs < 0) * // if outside domain &
(_dom.I == DOM->Is) * // if edge domain & DIRICHLET
(bc->uW == DIRICHLET || bc->vW == DIRICHLET || bc->wW == DIRICHLET)*
(parts[part].nodes[node] == -1);
parts[part].nodes[node] += (WEST_WALL_N + 1) *
(x - _dom.xs < 0) *
(_dom.I == DOM->Is) *
(bc->uW == NEUMANN || bc->vW == NEUMANN || bc->wW == NEUMANN)*
(parts[part].nodes[node] == -1);
parts[part].nodes[node] += (EAST_WALL_D + 1) *
(x - _dom.xe >= 0) *
(_dom.I == DOM->Ie) *
(bc->uE == DIRICHLET || bc->vE == DIRICHLET || bc->wE == DIRICHLET)*
(parts[part].nodes[node] == -1);
parts[part].nodes[node] += (EAST_WALL_N + 1) *
(x - _dom.xe >= 0) *
(_dom.I == DOM->Ie) *
(bc->uE == NEUMANN || bc->vE == NEUMANN || bc->wE == NEUMANN)*
(parts[part].nodes[node] == -1);
parts[part].nodes[node] += (SOUTH_WALL_D + 1) *
(y - _dom.ys < 0) *
(_dom.J == DOM->Js) *
(bc->uS == DIRICHLET || bc->vS == DIRICHLET || bc->wS == DIRICHLET)*
(parts[part].nodes[node] == -1);
parts[part].nodes[node] += (SOUTH_WALL_N + 1) *
(y - _dom.ys < 0) *
(_dom.J == DOM->Js) *
(bc->uS == NEUMANN || bc->vS == NEUMANN || bc->wS == NEUMANN)*
(parts[part].nodes[node] == -1);
parts[part].nodes[node] += (NORTH_WALL_D + 1) *
(y - _dom.ye >= 0) *
(_dom.J == DOM->Je) *
(bc->uN == DIRICHLET || bc->vN == DIRICHLET || bc->wN == DIRICHLET)*
(parts[part].nodes[node] == -1);
parts[part].nodes[node] += (NORTH_WALL_N + 1) *
(y - _dom.ye >= 0) *
(_dom.J == DOM->Je) *
(bc->uN == NEUMANN || bc->vN == NEUMANN || bc->wN == NEUMANN)*
(parts[part].nodes[node] == -1);
parts[part].nodes[node] += (BOTTOM_WALL_D + 1) *
(z - _dom.zs < 0) *
(_dom.K == DOM->Ks) *
(bc->uB == DIRICHLET || bc->vB == DIRICHLET || bc->wB == DIRICHLET)*
(parts[part].nodes[node] == -1);
parts[part].nodes[node] += (BOTTOM_WALL_N + 1) *
(z - _dom.zs < 0) *
(_dom.K == DOM->Ks) *
(bc->uB == NEUMANN || bc->vB == NEUMANN || bc->wB == NEUMANN)*
(parts[part].nodes[node] == -1);
parts[part].nodes[node] += (TOP_WALL_D + 1) *
(z - _dom.ze >= 0) *
(_dom.K == DOM->Ke) *
(bc->uT == DIRICHLET || bc->vT == DIRICHLET || bc->wT == DIRICHLET)*
(parts[part].nodes[node] == -1);
parts[part].nodes[node] += (TOP_WALL_N + 1) *
(z - _dom.ze >= 0) *
(_dom.K == DOM->Ke) *
(bc->uT == NEUMANN || bc->vT == NEUMANN || bc->wT == NEUMANN)*
(parts[part].nodes[node] == -1);
}
__global__ void interpolate_nodes(real *p, real *u, real *v, real *w,
real rho_f, real nu, gradP_struct gradP, part_struct *parts, real *pp,
real *ur, real *ut, real *up, BC *bc, real s_beta, real s_ref, g_struct g)
{
int node = threadIdx.x;
int part = blockIdx.x;
real ddx = 1. / _dom.dx;
real ddy = 1. / _dom.dy;
real ddz = 1. / _dom.dz;
real irho_f = 1. / rho_f;
real inu = 1. / nu;
real ox = parts[part].ox;
real oy = parts[part].oy;
real oz = parts[part].oz;
real oxdot = parts[part].oxdot;
real oydot = parts[part].oydot;
real ozdot = parts[part].ozdot;
real udot = parts[part].udot;
real vdot = parts[part].vdot;
real wdot = parts[part].wdot;
real rs2 = parts[part].rs * parts[part].rs;
real rs3 = rs2 * parts[part].rs;
real rs5 = rs3 * rs2;
real irs3 = 1./rs3;
real a5 = parts[part].r * parts[part].r; // r^2
a5 *= a5 * parts[part].r; // r^5
real uu, vv, ww; // temporary nodes for Cartesian result of interpolation
real uuwalli, uuwallj, uuwallk;
real vvwalli, vvwallj, vvwallk;
real wwwalli, wwwallj, wwwallk;
int i, j, k; // index of cells containing node
int oobi, oobj, oobk, oob; // out of bounds indicator, 1 if out of bounds else 0
int C, Ce, Cw, Cn, Cs, Ct, Cb; // cell indices
real xx, yy, zz; // Cartesian location of p,u,v,w
// convert node (r, theta, phi) to (x, y, z)
real xp, yp, zp; // Cartesian radial vector
real x, y, z; // Cartesian location of node
rtp2xyz(parts[part].rs, _node_t[node], _node_p[node], &xp, &yp, &zp);
// shift from particle center
x = xp + parts[part].x;
y = yp + parts[part].y;
z = zp + parts[part].z;
/* Find index of cell containing node. */
// Do this in GLOBAL coordinates so that magnitude of floating point error is
// the same on each subdomain.
real arg_x = (x - (_dom.xs - _dom.dx)) * ddx + _dom.Gcc.isb;
real arg_y = (y - (_dom.ys - _dom.dy)) * ddy + _dom.Gcc.jsb;
real arg_z = (z - (_dom.zs - _dom.dz)) * ddz + _dom.Gcc.ksb;
/* Deal with floating point errors in position so we don't lose nodes */
// Similar to bin_fill_{i,j,k}. If floor != round and round is "close enough"
// to the nearest integer, use round instead. this ensures that all nodes are
// accounted for between subdomains
// Using global indices makes sure that the floating point representation
// error is the same for each subdomain, since the magnitude of the index will
// be the similar/the same.
i = floor(arg_x);
j = floor(arg_y);
k = floor(arg_z);
int round_x = lrint(arg_x);
int round_y = lrint(arg_y);
int round_z = lrint(arg_z);
// Better way to do this? no if-statement... abs?
if ((round_x != i) && (abs(round_x - arg_x) <= DBL_EPSILON)) {
i = round_x;
}
if ((round_y != j) && (abs(round_y - arg_y) <= DBL_EPSILON)) {
j = round_y;
}
if ((round_z != k) && (abs(round_z - arg_z) <= DBL_EPSILON)) {
k = round_z;
}
// Convert back to LOCAL coodrinates
i -= _dom.Gcc.isb;
j -= _dom.Gcc.jsb;
k -= _dom.Gcc.ksb;
/* Interpolate Pressure */
// Find if out-of-bounds -- 1 if oob, 0 if in bounds
oob = i < _dom.Gcc._is || i > _dom.Gcc._ie
|| j < _dom.Gcc._js || j > _dom.Gcc._je
|| k < _dom.Gcc._ks || k > _dom.Gcc._ke;
// Correct indices so we don't have out-of-bounds reads
// If out out bounds, we'll read good info but trash the results
i += (_dom.Gcc._is - i) * (i < _dom.Gcc._is);
j += (_dom.Gcc._js - j) * (j < _dom.Gcc._js);
k += (_dom.Gcc._ks - k) * (k < _dom.Gcc._ks);
i += (_dom.Gcc._ie - i) * (i > _dom.Gcc._ie);
j += (_dom.Gcc._je - j) * (j > _dom.Gcc._je);
k += (_dom.Gcc._ke - k) * (k > _dom.Gcc._ke);
// Cell-centered indices
C = GCC_LOC(i, j, k, _dom.Gcc.s1b, _dom.Gcc.s2b);
Ce = GCC_LOC(i + 1, j, k, _dom.Gcc.s1b, _dom.Gcc.s2b);
Cw = GCC_LOC(i - 1, j, k, _dom.Gcc.s1b, _dom.Gcc.s2b);
Cn = GCC_LOC(i, j + 1, k, _dom.Gcc.s1b, _dom.Gcc.s2b);
Cs = GCC_LOC(i, j - 1, k, _dom.Gcc.s1b, _dom.Gcc.s2b);
Ct = GCC_LOC(i, j, k + 1, _dom.Gcc.s1b, _dom.Gcc.s2b);
Cb = GCC_LOC(i, j, k - 1, _dom.Gcc.s1b, _dom.Gcc.s2b);
// Cartesian location of center of cell
xx = (i - 0.5) * _dom.dx + _dom.xs;
yy = (j - 0.5) * _dom.dy + _dom.ys;
zz = (k - 0.5) * _dom.dz + _dom.zs;
// perform tri-linear interpolation
real dpdx = 0.5*(p[Ce] - p[Cw]) * ddx;
real dpdy = 0.5*(p[Cn] - p[Cs]) * ddy;
real dpdz = 0.5*(p[Ct] - p[Cb]) * ddz;
pp[node + NNODES*part] = p[C] + dpdx*(x - xx) + dpdy*(y - yy) + dpdz*(z - zz);
// set ppwall equal to
/* ppwall = (parts[part].nodes[node] == WEST_WALL_D || parts[part].nodes[node] == WEST_WALL_N)*p[Cw]
+ (parts[part].nodes[node] == EAST_WALL_D || parts[part].nodes[node] == EAST_WALL_N)*p[Ce]
+ (parts[part].nodes[node] == SOUTH_WALL_D || parts[part].nodes[node] == SOUTH_WALL_N)*p[Cs]
+ (parts[part].nodes[node] == NORTH_WALL_D || parts[part].nodes[node] == NORTH_WALL_N)*p[Cn]
+ (parts[part].nodes[node] == BOTTOM_WALL_D || parts[part].nodes[node] == BOTTOM_WALL_N)*p[Cb]
+ (parts[part].nodes[node] == TOP_WALL_D || parts[part].nodes[node] == TOP_WALL_N)*p[Ct];
*/
// switch to particle rest frame
real ocrossr2 = (oy*zp - oz*yp) * (oy*zp - oz*yp);
ocrossr2 += (ox*zp - oz*xp) * (ox*zp - oz*xp);
ocrossr2 += (ox*yp - oy*xp) * (ox*yp - oy*xp);
real bousiq_x = -s_beta*(parts[part].s - s_ref)*g.x;
real bousiq_y = -s_beta*(parts[part].s - s_ref)*g.y;
real bousiq_z = -s_beta*(parts[part].s - s_ref)*g.z;
real accdotr = (-gradP.x * irho_f - udot + bousiq_x)*xp +
(-gradP.y * irho_f - vdot + bousiq_y)*yp +
(-gradP.z * irho_f - wdot + bousiq_z)*zp;
pp[node + NNODES*part] -= 0.5 * rho_f * ocrossr2 + rho_f * accdotr;
// ppwall -= 0.5 * rho_f * ocrossr2 + rho_f * accdotr;
// Zero if this node intersects wall or is out of bounds
pp[node + NNODES*part] = pp[node+part*NNODES] * (1 - oob) *
(parts[part].nodes[node] == -1);
// pp[node + NNODES*part] = ppwall * oob * (parts[part].nodes[node] < -1) +
// pp[node + NNODES*part] * (1 - oob) * (parts[part].nodes[node] == -1);
/* Interpolate Velocities */
// don't work with cell-center anymore; find closest cell face in x-direction
/* Interpolate u-velocity */
arg_x = (x - (_dom.xs - _dom.dx)) * ddx + _dom.Gfx.isb;
arg_y = (y - (_dom.ys - _dom.dy)) * ddy + _dom.Gfx.jsb;
arg_z = (z - (_dom.zs - _dom.dz)) * ddz + _dom.Gfx.ksb;
i = floor(arg_x);
j = floor(arg_y);
k = floor(arg_z);
round_x = lrint(arg_x);
round_y = lrint(arg_y);
round_z = lrint(arg_z);
if ((round_x != i) && (abs(round_x - arg_x) <= DBL_EPSILON)) {
i = round_x;
}
if ((round_y != j) && (abs(round_y - arg_y) <= DBL_EPSILON)) {
j = round_y;
}
if ((round_z != k) && (abs(round_z - arg_z) <= DBL_EPSILON)) {
k = round_z;
}
i -= _dom.Gfx.isb;
j -= _dom.Gfx.jsb;
k -= _dom.Gfx.ksb;
//i = round((x - _dom.xs) * ddx) + DOM_BUF;
//j = floor((y - _dom.ys) * ddy) + DOM_BUF;
//k = floor((z - _dom.zs) * ddz) + DOM_BUF;
// Find if out-of-bounds -- 1 if oob, 0 if in bounds
// Use >= so domain is [start, end)
oobi = i < _dom.Gcc._is || i > _dom.Gcc._ie;
oobj = j < _dom.Gcc._js || j > _dom.Gcc._je;
oobk = k < _dom.Gcc._ks || k > _dom.Gcc._ke;
// Correct indices so we don't have out-of-bounds reads
// If out out bounds, we'll read good info but trash the results
i += (_dom.Gfx._is - i) * (i < _dom.Gfx._is);
j += (_dom.Gfx._js - j) * (j < _dom.Gfx._js);
k += (_dom.Gfx._ks - k) * (k < _dom.Gfx._ks);
i += (_dom.Gfx._ie - i) * (i >= _dom.Gfx._ie);
j += (_dom.Gfx._je - j) * (j > _dom.Gfx._je);
k += (_dom.Gfx._ke - k) * (k > _dom.Gfx._ke);
// Face-centered indices
C = GFX_LOC(i, j, k, _dom.Gfx.s1b, _dom.Gfx.s2b);
Ce = GFX_LOC(i + 1, j, k, _dom.Gfx.s1b, _dom.Gfx.s2b);
Cw = GFX_LOC(i - 1, j, k, _dom.Gfx.s1b, _dom.Gfx.s2b);
Cn = GFX_LOC(i, j + 1, k, _dom.Gfx.s1b, _dom.Gfx.s2b);
Cs = GFX_LOC(i, j - 1, k, _dom.Gfx.s1b, _dom.Gfx.s2b);
Ct = GFX_LOC(i, j, k + 1, _dom.Gfx.s1b, _dom.Gfx.s2b);
Cb = GFX_LOC(i, j, k - 1, _dom.Gfx.s1b, _dom.Gfx.s2b);
// Cartesian location of face
xx = (i - DOM_BUF) * _dom.dx + _dom.xs;
yy = (j - 0.5) * _dom.dy + _dom.ys;
zz = (k - 0.5) * _dom.dz + _dom.zs;
// Tri-linear interpolation
real dudx = 0.5*(u[Ce] - u[Cw]) * ddx;
real dudy = 0.5*(u[Cn] - u[Cs]) * ddy;
real dudz = 0.5*(u[Ct] - u[Cb]) * ddz;
uu = u[C] + dudx * (x - xx) + dudy * (y - yy) + dudz * (z - zz);
// set uuwall equal to interfering wall u-velocity
uuwalli = (parts[part].nodes[node] == WEST_WALL_D)*bc->uWD
+ (parts[part].nodes[node] == EAST_WALL_D)*bc->uED;
uuwallj = (parts[part].nodes[node] == SOUTH_WALL_D)*bc->uSD
+ (parts[part].nodes[node] == NORTH_WALL_D)*bc->uND;
uuwallk = (parts[part].nodes[node] == BOTTOM_WALL_D)*bc->uBD
+ (parts[part].nodes[node] == TOP_WALL_D)*bc->uTD;
// switch to particle rest frame
real ocrossr_x = oy*zp - oz*yp;
real odotcrossr_x = oydot*zp - ozdot*yp;
real tmp_u = parts[part].u + ocrossr_x +
0.1 * inu * (rs5 - a5) * irs3 * odotcrossr_x;
uu -= tmp_u;
uuwalli -= tmp_u;
uuwallj -= tmp_u;
uuwallk -= tmp_u;
// set actual node value based on whether it is interfered with
uu = (1-oobi) * (1-oobj) * (1-oobk) * (parts[part].nodes[node] == -1) * uu
+ oobi * (1-oobj) * (1-oobk) * (parts[part].nodes[node] < -1) * uuwalli
+ (1-oobi) * oobj * (1-oobk) * (parts[part].nodes[node] < -1) * uuwallj
+ (1-oobi) * (1-oobj) * oobk * (parts[part].nodes[node] < -1) * uuwallk;
/* interpolate v-velocity */
//i = floor((x - _dom.xs) * ddx) + DOM_BUF;
//j = round((y - _dom.ys) * ddy) + DOM_BUF;
//k = floor((z - _dom.zs) * ddz) + DOM_BUF;
arg_x = (x - (_dom.xs - _dom.dx)) * ddx + _dom.Gfy.isb;
arg_y = (y - (_dom.ys - _dom.dy)) * ddy + _dom.Gfy.jsb;
arg_z = (z - (_dom.zs - _dom.dz)) * ddz + _dom.Gfy.ksb;
i = floor(arg_x);
j = floor(arg_y);
k = floor(arg_z);
round_x = lrint(arg_x);
round_y = lrint(arg_y);
round_z = lrint(arg_z);
if ((round_x != i) && (abs(round_x - arg_x) <= DBL_EPSILON)) {
i = round_x;
}
if ((round_y != j) && (abs(round_y - arg_y) <= DBL_EPSILON)) {
j = round_y;
}
if ((round_z != k) && (abs(round_z - arg_z) <= DBL_EPSILON)) {
k = round_z;
}
i -= _dom.Gfy.isb;
j -= _dom.Gfy.jsb;
k -= _dom.Gfy.ksb;
// Find if out-of-bounds -- 1 if oob, 0 if in bounds
oobi = i < _dom.Gcc._is || i > _dom.Gcc._ie;
oobj = j < _dom.Gcc._js || j > _dom.Gcc._je;
oobk = k < _dom.Gcc._ks || k > _dom.Gcc._ke;
// Correct indices so we don't have out-of-bounds reads
// If out out bounds, we'll read good info but trash the results
i += (_dom.Gfy._is - i) * (i < _dom.Gfy._is);
j += (_dom.Gfy._js - j) * (j < _dom.Gfy._js);
k += (_dom.Gfy._ks - k) * (k < _dom.Gfy._ks);
i += (_dom.Gfy._ie - i) * (i > _dom.Gfy._ie);
j += (_dom.Gfy._je - j) * (j >= _dom.Gfy._je);
k += (_dom.Gfy._ke - k) * (k > _dom.Gfy._ke);
// Face-centered indices
C = GFY_LOC(i, j, k, _dom.Gfy.s1b, _dom.Gfy.s2b);
Ce = GFY_LOC(i + 1, j, k, _dom.Gfy.s1b, _dom.Gfy.s2b);
Cw = GFY_LOC(i - 1, j, k, _dom.Gfy.s1b, _dom.Gfy.s2b);
Cn = GFY_LOC(i, j + 1, k, _dom.Gfy.s1b, _dom.Gfy.s2b);
Cs = GFY_LOC(i, j - 1, k, _dom.Gfy.s1b, _dom.Gfy.s2b);
Ct = GFY_LOC(i, j, k + 1, _dom.Gfy.s1b, _dom.Gfy.s2b);
Cb = GFY_LOC(i, j, k - 1, _dom.Gfy.s1b, _dom.Gfy.s2b);
// Cartesian location of face
xx = (i-0.5) * _dom.dx + _dom.xs;
yy = (j-DOM_BUF) * _dom.dy + _dom.ys;
zz = (k-0.5) * _dom.dz + _dom.zs;
// Tri-linear interpolation
real dvdx = 0.5*(v[Ce] - v[Cw]) * ddx;
real dvdy = 0.5*(v[Cn] - v[Cs]) * ddy;
real dvdz = 0.5*(v[Ct] - v[Cb]) * ddz;
vv = v[C] + dvdx * (x - xx) + dvdy * (y - yy) + dvdz * (z - zz);
// set vvwall equal to interfering wall v-velocity
vvwalli = (parts[part].nodes[node] == WEST_WALL_D)*bc->vWD
+ (parts[part].nodes[node] == EAST_WALL_D)*bc->vED;
vvwallj = (parts[part].nodes[node] == SOUTH_WALL_D)*bc->vSD
+ (parts[part].nodes[node] == NORTH_WALL_D)*bc->vND;
vvwallk = (parts[part].nodes[node] == BOTTOM_WALL_D)*bc->vBD
+ (parts[part].nodes[node] == TOP_WALL_D)*bc->vTD;
// switch to particle rest frame
real ocrossr_y = -(ox*zp - oz*xp);
real odotcrossr_y = -(oxdot*zp - ozdot*xp);
real tmp_v = parts[part].v + ocrossr_y +
0.1 * inu * (rs5 - a5) * irs3 * odotcrossr_y;
vv -= tmp_v;
vvwalli -= tmp_v;
vvwallj -= tmp_v;
vvwallk -= tmp_v;
// set actual node value based on whether it is interfered with
vv = (1-oobi) * (1-oobj) * (1-oobk) * (parts[part].nodes[node] == -1) * vv
+ oobi * (1-oobj) * (1-oobk) * (parts[part].nodes[node] < -1) * vvwalli
+ (1-oobi) * oobj * (1-oobk) * (parts[part].nodes[node] < -1) * vvwallj
+ (1-oobi) * (1-oobj) * oobk * (parts[part].nodes[node] < -1) * vvwallk;
/* interpolate w-velocity */
arg_x = (x - (_dom.xs - _dom.dx)) * ddx + _dom.Gfz.isb;
arg_y = (y - (_dom.ys - _dom.dy)) * ddy + _dom.Gfz.jsb;
arg_z = (z - (_dom.zs - _dom.dz)) * ddz + _dom.Gfz.ksb;
i = floor(arg_x);
j = floor(arg_y);
k = floor(arg_z);
round_x = lrint(arg_x);
round_y = lrint(arg_y);
round_z = lrint(arg_z);
if ((round_x != i) && (abs(round_x - arg_x) <= DBL_EPSILON)) {
i = round_x;
}
if ((round_y != j) && (abs(round_y - arg_y) <= DBL_EPSILON)) {
j = round_y;
}
if ((round_z != k) && (abs(round_z - arg_z) <= DBL_EPSILON)) {
k = round_z;
}
i -= _dom.Gfz.isb;
j -= _dom.Gfz.jsb;
k -= _dom.Gfz.ksb;
//i = floor((x - _dom.xs) * ddx) + DOM_BUF;
//j = floor((y - _dom.ys) * ddy) + DOM_BUF;
//k = round((z - _dom.zs) * ddz) + DOM_BUF;
// Find if out-of-bounds -- 1 if oob, 0 if in bounds
oobi = i < _dom.Gcc._is || i > _dom.Gcc._ie;
oobj = j < _dom.Gcc._js || j > _dom.Gcc._je;
oobk = k < _dom.Gcc._ks || k > _dom.Gcc._ke;
// Correct indices so we don't have out-of-bounds reads
// If out out bounds, we'll read good info but trash the results
i += (_dom.Gfz._is - i) * (i < _dom.Gfz._is);
j += (_dom.Gfz._js - j) * (j < _dom.Gfz._js);
k += (_dom.Gfz._ks - k) * (k < _dom.Gfz._ks);
i += (_dom.Gfz._ie - i) * (i > _dom.Gfz._ie);
j += (_dom.Gfz._je - j) * (j > _dom.Gfz._je);
k += (_dom.Gfz._ke - k) * (k >= _dom.Gfz._ke);
// Face-centered indices
C = GFZ_LOC(i, j, k, _dom.Gfz.s1b, _dom.Gfz.s2b);
Ce = GFZ_LOC(i + 1, j, k, _dom.Gfz.s1b, _dom.Gfz.s2b);
Cw = GFZ_LOC(i - 1, j, k, _dom.Gfz.s1b, _dom.Gfz.s2b);
Cn = GFZ_LOC(i, j + 1, k, _dom.Gfz.s1b, _dom.Gfz.s2b);
Cs = GFZ_LOC(i, j - 1, k, _dom.Gfz.s1b, _dom.Gfz.s2b);
Ct = GFZ_LOC(i, j, k + 1, _dom.Gfz.s1b, _dom.Gfz.s2b);
Cb = GFZ_LOC(i, j, k - 1, _dom.Gfz.s1b, _dom.Gfz.s2b);
// Cartesian location of face
xx = (i-0.5) * _dom.dx + _dom.xs;
yy = (j-0.5) * _dom.dy + _dom.ys;
zz = (k-DOM_BUF) * _dom.dz + _dom.zs;
// Tri-linear interpolation
real dwdx = 0.5*(w[Ce] - w[Cw]) * ddx;
real dwdy = 0.5*(w[Cn] - w[Cs]) * ddy;
real dwdz = 0.5*(w[Ct] - w[Cb]) * ddz;
ww = w[C] + dwdx * (x - xx) + dwdy * (y - yy) + dwdz * (z - zz);
// set uuwall equal to interfering wall u-velocity
wwwalli = (parts[part].nodes[node] == WEST_WALL_D)*bc->wWD
+ (parts[part].nodes[node] == EAST_WALL_D)*bc->wED;
wwwallj = (parts[part].nodes[node] == SOUTH_WALL_D)*bc->wSD
+ (parts[part].nodes[node] == NORTH_WALL_D)*bc->wND;
wwwallk = (parts[part].nodes[node] == BOTTOM_WALL_D)*bc->wBD
+ (parts[part].nodes[node] == TOP_WALL_D)*bc->wTD;
// switch to particle rest frame
real ocrossr_z = ox*yp - oy*xp;
real odotcrossr_z = oxdot*yp - oydot*xp;
real tmp_w = parts[part].w + ocrossr_z +
0.1 * inu * (rs5 - a5) * irs3 * odotcrossr_z;
ww -= tmp_w;
wwwalli -= tmp_w;
wwwallj -= tmp_w;
wwwallk -= tmp_w;
// set actual node value based on whether it is interfered with
ww = (1-oobi) * (1-oobj) * (1-oobk) * (parts[part].nodes[node] == -1) * ww
+ oobi * (1-oobj) * (1-oobk) * (parts[part].nodes[node] < -1) * wwwalli
+ (1-oobi) * oobj * (1-oobk) * (parts[part].nodes[node] < -1) * wwwallj
+ (1-oobi) * (1-oobj) * oobk * (parts[part].nodes[node] < -1) * wwwallk;
// convert (uu, vv, ww) to (u_r, u_theta, u_phi) and write to node arrays
cart2sphere(uu, vv, ww, _node_t[node], _node_p[node],
&ur[node+part*NNODES], &ut[node+part*NNODES], &up[node+part*NNODES]);
}
__global__ void lebedev_quadrature(part_struct *parts, int ncoeffs_max,
real *pp, real *ur, real *ut, real *up,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im)
{
int node = threadIdx.x;
int part = blockIdx.x;
int coeff = blockIdx.y;
if (coeff < parts[part].ncoeff) {
/* Calculate integrand at each node */
int j = part*NNODES*ncoeffs_max + coeff*NNODES + node;
int n = _nn[coeff];
int m = _mm[coeff];
real theta = _node_t[node];
real phi = _node_p[node];
real N_nm = nnm(n, m);
real P_nm = pnm(n, m, theta);
real P_n1m = pnm(n + 1., m, theta);
real dPdt = (n - m + 1.)*P_n1m - (n + 1.)*cos(theta)*P_nm;
real dPdp = m*P_nm;
// Precalculate things we use more than once
real isth = 1./sin(theta);
real cmphi = cos(m * phi);
real smphi = sin(m * phi);
int stride = node + part*NNODES;
int_Yp_re[j] = N_nm*P_nm*pp[stride]*cmphi;
int_Yp_im[j] = -N_nm*P_nm*pp[stride]*smphi;
int_rDYu_re[j] = N_nm*isth*(dPdt * ut[stride] * cmphi
- dPdp * up[stride] * smphi);
int_rDYu_im[j] = N_nm*isth*(-dPdt * ut[stride] * smphi
- dPdp * up[stride] * cmphi);
int_xXDYu_re[j] = N_nm*isth*(dPdp * ut[stride] * smphi
+ dPdt * up[stride] * cmphi);
int_xXDYu_im[j] = N_nm*isth*(dPdp * ut[stride] * cmphi
- dPdt * up[stride] * smphi);
__syncthreads();
/* Compute partial sum of Lebedev quadrature (scalar product) */
// put sum into first node position for each coeff for each particle
if (node == 0) {
int_Yp_re[j] *= _A1;
int_Yp_im[j] *= _A1;
int_rDYu_re[j] *= _A1;
int_rDYu_im[j] *= _A1;
int_xXDYu_re[j] *= _A1;
int_xXDYu_im[j] *= _A1;
for (int i = 1; i < 6; i++) {
int_Yp_re[j] += _A1 * int_Yp_re[j+i];
int_Yp_im[j] += _A1 * int_Yp_im[j+i];
int_rDYu_re[j] += _A1 * int_rDYu_re[j+i];
int_rDYu_im[j] += _A1 * int_rDYu_im[j+i];
int_xXDYu_re[j] += _A1 * int_xXDYu_re[j+i];
int_xXDYu_im[j] += _A1 * int_xXDYu_im[j+i];
}
for (int i = 6; i < 18; i++) {
int_Yp_re[j] += _A2 * int_Yp_re[j+i];
int_Yp_im[j] += _A2 * int_Yp_im[j+i];
int_rDYu_re[j] += _A2 * int_rDYu_re[j+i];
int_rDYu_im[j] += _A2 * int_rDYu_im[j+i];
int_xXDYu_re[j] += _A2 * int_xXDYu_re[j+i];
int_xXDYu_im[j] += _A2 * int_xXDYu_im[j+i];
}
for (int i = 18; i < 26; i++) {
int_Yp_re[j] += _A3 * int_Yp_re[j+i];
int_Yp_im[j] += _A3 * int_Yp_im[j+i];
int_rDYu_re[j] += _A3 * int_rDYu_re[j+i];
int_rDYu_im[j] += _A3 * int_rDYu_im[j+i];
int_xXDYu_re[j] += _A3 * int_xXDYu_re[j+i];
int_xXDYu_im[j] += _A3 * int_xXDYu_im[j+i];
}
} // if (node == 0)
}
}
__global__ void compute_lambs_coeffs(part_struct *parts, real relax,
real mu, real nu, int ncoeffs_max, int nparts,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im)
{
int coeff = threadIdx.x;
int part = blockIdx.x;
// precalculate constants
real ars = parts[part].r / parts[part].rs;
real rsa = parts[part].rs / parts[part].r;
real r2 = parts[part].r * parts[part].r;
real inu = 1./nu;
real imunu = inu / mu;
if (coeff < parts[part].ncoeff && part < nparts) {
int j = part * NNODES * ncoeffs_max + coeff * NNODES + 0;
int n = _nn[coeff];
if (n == 0) {
parts[part].pnm_re[coeff] = (1. - relax) * parts[part].pnm_re0[coeff] +
relax * r2 * imunu * int_Yp_re[j];
parts[part].pnm_im[coeff] = (1. - relax) * parts[part].pnm_im0[coeff] +
relax * r2 * imunu * int_Yp_im[j];
parts[part].phinm_re[coeff] = 0.;
parts[part].phinm_im[coeff] = 0.;
parts[part].chinm_re[coeff] = 0.;
parts[part].chinm_im[coeff] = 0.;
} else { // n != 0
// Precalculate
real pow_ars_np1 = pow(ars, n + 1.); // ars^(n+1)
real pow_ars_2np1 = pow_ars_np1 * pow_ars_np1 * rsa; // ars^(2n+1)
real pow_rsa_nm1 = pow(rsa, n - 1.); // rsa^(n-1)
real pow_rsa_n = pow_rsa_nm1 * rsa; // rsa^n
real pow_rsa_np1 = pow_rsa_n * rsa; // rsa^(n+1)
real i_np1 = 1./(n + 1.);
real i_2np3 = 1./(2.*n + 3.);
// calculate p_nm and phi_nm
real A = (1. - 0.5*n*(2.*n - 1.) * i_np1 * pow_ars_2np1) * pow_rsa_n;
real B = n*(2.*n - 1.)*(2.*n + 1.) * i_np1*pow_ars_np1;
real C = 0.25*n*(2.*(n + 3.)*i_2np3
+ (n - 2. - n*(2.*n + 1.)*i_2np3*ars*ars)*pow_ars_2np1)*pow_rsa_np1;
real D = n*(n + 1. + 0.5*((n - 2.)*(2.*n + 1.)*rsa*rsa
- n*(2.*n - 1.))*pow_ars_2np1)*pow_rsa_nm1;
real idet = 1./ (A*D + B*C);
parts[part].pnm_re[coeff] = (r2*imunu*int_Yp_re[j]*D +
parts[part].r*inu*int_rDYu_re[j]*B) * idet;
parts[part].pnm_im[coeff] = (r2*imunu*int_Yp_im[j]*D +
parts[part].r*inu*int_rDYu_im[j]*B) * idet;
parts[part].phinm_re[coeff] = (parts[part].r*inu*int_rDYu_re[j]*A -
r2*imunu*int_Yp_re[j]*C) * idet;
parts[part].phinm_im[coeff] = (parts[part].r*inu*int_rDYu_im[j]*A -
r2*imunu*int_Yp_im[j]*C) * idet;
// calculate chi_nm
real E = n*(n + 1.)*(pow_ars_2np1 - 1.)*pow_rsa_n;
real iE = 1./ E;
parts[part].chinm_re[coeff] = parts[part].r*inu*int_xXDYu_re[j] * iE;
parts[part].chinm_im[coeff] = parts[part].r*inu*int_xXDYu_im[j] * iE;
// apply underrelaxation
parts[part].pnm_re[coeff] = parts[part].pnm_re0[coeff]*(1. - relax)
+ relax*parts[part].pnm_re[coeff];
parts[part].pnm_im[coeff] = parts[part].pnm_im0[coeff]*(1. - relax)
+ relax*parts[part].pnm_im[coeff];
parts[part].phinm_re[coeff] = parts[part].phinm_re0[coeff]*(1. - relax)
+ relax*parts[part].phinm_re[coeff];
parts[part].phinm_im[coeff] = parts[part].phinm_im0[coeff]*(1. - relax)
+ relax*parts[part].phinm_im[coeff];
parts[part].chinm_re[coeff] = parts[part].chinm_re0[coeff]*(1. - relax)
+ relax*parts[part].chinm_re[coeff];
parts[part].chinm_im[coeff] = parts[part].chinm_im0[coeff]*(1. - relax)
+ relax*parts[part].chinm_im[coeff];
}
}
}
__global__ void calc_forces(part_struct *parts, int nparts,
real gradPx, real gradPy, real gradPz, real rho_f, real mu, real nu,
real s_beta, real s_ref, g_struct g)
{
int pp = threadIdx.x + blockIdx.x*blockDim.x; // particle number
real irho_f = 1./ rho_f;
if(pp < nparts) {
real vol = 4./3. * PI * parts[pp].r*parts[pp].r*parts[pp].r;
real N10 = sqrt(3./4./PI);
real N11 = sqrt(3./8./PI);
real bousiq_x = -s_beta*(parts[pp].s - s_ref)*g.x;
real bousiq_y = -s_beta*(parts[pp].s - s_ref)*g.y;
real bousiq_z = -s_beta*(parts[pp].s - s_ref)*g.z;
parts[pp].Fx = rho_f * vol * (parts[pp].udot + gradPx * irho_f - bousiq_x)
- PI * mu * nu * 2.*N11 * (parts[pp].pnm_re[2]
+ 6.*parts[pp].phinm_re[2]);
parts[pp].Fy = rho_f * vol * (parts[pp].vdot + gradPy * irho_f - bousiq_y)
+ PI * mu * nu * 2.*N11 * (parts[pp].pnm_im[2]
+ 6.*parts[pp].phinm_im[2]);
parts[pp].Fz = rho_f * vol * (parts[pp].wdot + gradPz * irho_f - bousiq_z)
+ PI * mu * nu * N10 * (parts[pp].pnm_re[1]
+ 6.*parts[pp].phinm_re[1]);
parts[pp].Lx = rho_f * vol * parts[pp].r*parts[pp].r * parts[pp].oxdot
- 8. * PI * mu * nu * 2.*N11 * parts[pp].r * parts[pp].chinm_re[2];
parts[pp].Ly = rho_f * vol * parts[pp].r*parts[pp].r * parts[pp].oydot
+ 8. * PI * mu * nu * 2.*N11 * parts[pp].r * parts[pp].chinm_im[2];
parts[pp].Lz = rho_f * vol * parts[pp].r*parts[pp].r * parts[pp].ozdot
+ 8. * PI * mu * nu * N10 * parts[pp].r * parts[pp].chinm_re[1];
}
}
__global__ void pack_sums_e(real *sum_send_e, int *offset, int *bin_start,
int *bin_count, int *part_ind, int ncoeffs_max,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im)
{
int tj = blockIdx.x * blockDim.x + threadIdx.x; // bin index
int tk = blockIdx.y * blockDim.y + threadIdx.y;
int cbin; // bin index
int c2b; // bin index in 2-d plane
int pp; // particle index
int dest; // destination for particle partial sums in packed array
int sp0, sp1; // scalar product strides for (Ylm, p)
int sp2, sp3; // scalar product strides for (rDYlm, u)
int sp4, sp5; // scalar product strides for (x X DYlm, u)
int psum_ind; // index of partial sum in each scalar product
// Custom GFX indices
int s1b = _bins.Gcc.jnb;
int s2b = s1b * _bins.Gcc.knb;
if (tj < _bins.Gcc.jnb && tk < _bins.Gcc.knb) {
for (int ti = _bins.Gcc._ie; ti <= _bins.Gcc._ieb; ti++) {
cbin = GFX_LOC(ti, tj, tk, s1b, s2b);
c2b = tj + tk * s1b + (ti - _bins.Gcc._ie) * s2b; // two planes
// Loop through each bin's particles
// Each bin is offset by offset[cbin] (from excl. prefix scan)
// Each particle is then offset from that
for (int i = 0; i < bin_count[cbin]; i++) {
pp = part_ind[bin_start[cbin] + i];
dest = offset[c2b] + i;
for (int coeff = 0; coeff < ncoeffs_max; coeff++) {
// Packing: part varies slowest, coeff varies quickest, sp middle
sp0 = coeff + ncoeffs_max*SP_YP_RE + ncoeffs_max*NSP*dest; // Yp_re
sp1 = coeff + ncoeffs_max*SP_YP_IM + ncoeffs_max*NSP*dest; // Yp_im
sp2 = coeff + ncoeffs_max*SP_RDYU_RE + ncoeffs_max*NSP*dest; // rDYu_re
sp3 = coeff + ncoeffs_max*SP_RDYU_IM + ncoeffs_max*NSP*dest; // rDYu_im
sp4 = coeff + ncoeffs_max*SP_XXDYU_RE + ncoeffs_max*NSP*dest; // xXDYu_re
sp5 = coeff + ncoeffs_max*SP_XXDYU_IM + ncoeffs_max*NSP*dest; // xXDYu_im
// Partial sums: part varies slowest, node quickest, coeff middle
// Partial sums are stored in index for node = 0
psum_ind = pp*NNODES*ncoeffs_max + coeff*NNODES;
sum_send_e[sp0] = int_Yp_re[psum_ind];
sum_send_e[sp1] = int_Yp_im[psum_ind];
sum_send_e[sp2] = int_rDYu_re[psum_ind];
sum_send_e[sp3] = int_rDYu_im[psum_ind];
sum_send_e[sp4] = int_xXDYu_re[psum_ind];
sum_send_e[sp5] = int_xXDYu_im[psum_ind];
}
}
} // loop over ti planes
}
}
__global__ void pack_sums_w(real *sum_send_w, int *offset, int *bin_start,
int *bin_count, int *part_ind, int ncoeffs_max,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im)
{
int tj = blockIdx.x * blockDim.x + threadIdx.x; // bin index
int tk = blockIdx.y * blockDim.y + threadIdx.y;
int cbin; // bin index
int c2b; // bin index in 2-d plane
int pp; // particle index
int dest; // destination for particle partial sums in packed array
int sp0, sp1; // scalar product strides for (Ylm, p)
int sp2, sp3; // scalar product strides for (rDYlm, u)
int sp4, sp5; // scalar product strides for (x X DYlm, u)
int psum_ind; // index of partial sum in each scalar product
// Custom GFX indices
int s1b = _bins.Gcc.jnb;
int s2b = s1b * _bins.Gcc.knb;
if (tj < _bins.Gcc.jnb && tk < _bins.Gcc.knb) {
for (int ti = _bins.Gcc._isb; ti <= _bins.Gcc._is; ti++) {
cbin = GFX_LOC(ti, tj, tk, s1b, s2b);
c2b = tj + tk * s1b + (ti - _bins.Gcc._isb) * s2b; // two planes
// Loop through each bin's particles
// Each bin is offset by offset[cbin] (from excl. prefix scan)
// Each particle is then offset from that
for (int i = 0; i < bin_count[cbin]; i++) {
pp = part_ind[bin_start[cbin] + i];
dest = offset[c2b] + i;
for (int coeff = 0; coeff < ncoeffs_max; coeff++) {
// Packing: part varies slowest, coeff varies quickest, sp middle
sp0 = coeff + ncoeffs_max*SP_YP_RE + ncoeffs_max*NSP*dest; // Yp_re
sp1 = coeff + ncoeffs_max*SP_YP_IM + ncoeffs_max*NSP*dest; // Yp_im
sp2 = coeff + ncoeffs_max*SP_RDYU_RE + ncoeffs_max*NSP*dest; // rDYu_re
sp3 = coeff + ncoeffs_max*SP_RDYU_IM + ncoeffs_max*NSP*dest; // rDYu_im
sp4 = coeff + ncoeffs_max*SP_XXDYU_RE + ncoeffs_max*NSP*dest; // xXDYu_re
sp5 = coeff + ncoeffs_max*SP_XXDYU_IM + ncoeffs_max*NSP*dest; // xXDYu_im
// Partial sums: part varies slowest, node quickest, coeff middle
// Partial sums are stored in index for node = 0
psum_ind = pp*NNODES*ncoeffs_max + coeff*NNODES;
sum_send_w[sp0] = int_Yp_re[psum_ind];
sum_send_w[sp1] = int_Yp_im[psum_ind];
sum_send_w[sp2] = int_rDYu_re[psum_ind];
sum_send_w[sp3] = int_rDYu_im[psum_ind];
sum_send_w[sp4] = int_xXDYu_re[psum_ind];
sum_send_w[sp5] = int_xXDYu_im[psum_ind];
}
}
} // loop over ti
}
}
__global__ void pack_sums_n(real *sum_send_n, int *offset, int *bin_start,
int *bin_count, int *part_ind, int ncoeffs_max,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im)
{
int tk = blockIdx.x * blockDim.x + threadIdx.x; // bin index
int ti = blockIdx.y * blockDim.y + threadIdx.y;
int cbin; // bin index
int c2b; // bin index in 2-d plane
int pp; // particle index
int dest; // destination for particle partial sums in packed array
int sp0, sp1; // scalar product strides for (Ylm, p)
int sp2, sp3; // scalar product strides for (rDYlm, u)
int sp4, sp5; // scalar product strides for (x X DYlm, u)
int psum_ind; // index of partial sum in each scalar product
// Custom GFY indices
int s1b = _bins.Gcc.knb;
int s2b = s1b * _bins.Gcc.inb;
if (tk < _bins.Gcc.knb && ti < _bins.Gcc.inb) {
for (int tj = _bins.Gcc._je; tj <= _bins.Gcc._jeb; tj++) {
cbin = GFY_LOC(ti, tj, tk, s1b, s2b);
c2b = tk + ti * s1b + (tj - _bins.Gcc._je) * s2b; // two planes
// Loop through each bin's particles
// Each bin is offset by offset[cbin] (from excl. prefix scan)
// Each particle is then offset from that
for (int i = 0; i < bin_count[cbin]; i++) {
pp = part_ind[bin_start[cbin] + i];
dest = offset[c2b] + i;
for (int coeff = 0; coeff < ncoeffs_max; coeff++) {
// Packing: part varies slowest, coeff varies quickest, sp middle
sp0 = coeff + ncoeffs_max*SP_YP_RE + ncoeffs_max*NSP*dest; // Yp_re
sp1 = coeff + ncoeffs_max*SP_YP_IM + ncoeffs_max*NSP*dest; // Yp_im
sp2 = coeff + ncoeffs_max*SP_RDYU_RE + ncoeffs_max*NSP*dest; // rDYu_re
sp3 = coeff + ncoeffs_max*SP_RDYU_IM + ncoeffs_max*NSP*dest; // rDYu_im
sp4 = coeff + ncoeffs_max*SP_XXDYU_RE + ncoeffs_max*NSP*dest; // xXDYu_re
sp5 = coeff + ncoeffs_max*SP_XXDYU_IM + ncoeffs_max*NSP*dest; // xXDYu_im
// Partial sums: part varies slowest, node quickest, coeff middle
// Partial sums are stored in index for node = 0
psum_ind = pp*NNODES*ncoeffs_max + coeff*NNODES;
sum_send_n[sp0] = int_Yp_re[psum_ind];
//printf("N%d >> packing int_Yp_re[part %d, coeff %d (%d)] = %lf to sum_send_n[%d]\n",
// _dom.rank, pp, coeff, psum_ind, int_Yp_re[psum_ind], sp0);
sum_send_n[sp1] = int_Yp_im[psum_ind];
sum_send_n[sp2] = int_rDYu_re[psum_ind];
sum_send_n[sp3] = int_rDYu_im[psum_ind];
sum_send_n[sp4] = int_xXDYu_re[psum_ind];
sum_send_n[sp5] = int_xXDYu_im[psum_ind];
}
}
} // loop over tj planes
}
}
__global__ void pack_sums_s(real *sum_send_s, int *offset, int *bin_start,
int *bin_count, int *part_ind, int ncoeffs_max,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im)
{
int tk = blockIdx.x * blockDim.x + threadIdx.x; // bin index
int ti = blockIdx.y * blockDim.y + threadIdx.y;
int cbin; // bin index
int c2b; // bin index in 2-d plane
int pp; // particle index
int dest; // destination for particle partial sums in packed array
int sp0, sp1; // scalar product strides for (Ylm, p)
int sp2, sp3; // scalar product strides for (rDYlm, u)
int sp4, sp5; // scalar product strides for (x X DYlm, u)
int psum_ind; // index of partial sum in each scalar product
// Custom GFY indices
int s1b = _bins.Gcc.knb;
int s2b = s1b * _bins.Gcc.inb;
if (tk < _bins.Gcc.knb && ti < _bins.Gcc.inb) {
for (int tj = _bins.Gcc._jsb; tj <= _bins.Gcc._js; tj++) {
cbin = GFY_LOC(ti, tj, tk, s1b, s2b);
c2b = tk + ti * s1b + (tj - _bins.Gcc._jsb) * s2b; // two planes
// Loop through each bin's particles
// Each bin is offset by offset[cbin] (from excl. prefix scan)
// Each particle is then offset from that
for (int i = 0; i < bin_count[cbin]; i++) {
pp = part_ind[bin_start[cbin] + i];
dest = offset[c2b] + i;
for (int coeff = 0; coeff < ncoeffs_max; coeff++) {
// Packing: part varies slowest, coeff varies quickest, sp middle
sp0 = coeff + ncoeffs_max*SP_YP_RE + ncoeffs_max*NSP*dest; // Yp_re
sp1 = coeff + ncoeffs_max*SP_YP_IM + ncoeffs_max*NSP*dest; // Yp_im
sp2 = coeff + ncoeffs_max*SP_RDYU_RE + ncoeffs_max*NSP*dest; // rDYu_re
sp3 = coeff + ncoeffs_max*SP_RDYU_IM + ncoeffs_max*NSP*dest; // rDYu_im
sp4 = coeff + ncoeffs_max*SP_XXDYU_RE + ncoeffs_max*NSP*dest; // xXDYu_re
sp5 = coeff + ncoeffs_max*SP_XXDYU_IM + ncoeffs_max*NSP*dest; // xXDYu_im
// Partial sums: part varies slowest, node quickest, coeff middle
// Partial sums are stored in index for node = 0
psum_ind = pp*NNODES*ncoeffs_max + coeff*NNODES;
sum_send_s[sp0] = int_Yp_re[psum_ind];
sum_send_s[sp1] = int_Yp_im[psum_ind];
sum_send_s[sp2] = int_rDYu_re[psum_ind];
sum_send_s[sp3] = int_rDYu_im[psum_ind];
sum_send_s[sp4] = int_xXDYu_re[psum_ind];
sum_send_s[sp5] = int_xXDYu_im[psum_ind];
}
}
} // loop over tj planes
}
}
__global__ void pack_sums_t(real *sum_send_t, int *offset, int *bin_start,
int *bin_count, int *part_ind, int ncoeffs_max,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im)
{
int ti = blockIdx.x * blockDim.x + threadIdx.x; // bin index
int tj = blockIdx.y * blockDim.y + threadIdx.y;
int cbin; // bin index
int c2b; // bin index in 2-d plane
int pp; // particle index
int dest; // destination for particle partial sums in packed array
int sp0, sp1; // scalar product strides for (Ylm, p)
int sp2, sp3; // scalar product strides for (rDYlm, u)
int sp4, sp5; // scalar product strides for (x X DYlm, u)
int psum_ind; // index of partial sum in each scalar product
// Custom GFZ indices
int s1b = _bins.Gcc.inb;
int s2b = s1b * _bins.Gcc.jnb;
if (ti < _bins.Gcc.inb && tj < _bins.Gcc.jnb) {
for (int tk = _bins.Gcc._ke; tk <= _bins.Gcc._keb; tk++) {
cbin = GFZ_LOC(ti, tj, tk, s1b, s2b);
c2b = ti + tj * s1b + (tk - _bins.Gcc._ke) * s2b;
// Loop through each bin's particles
// Each bin is offset by offset[cbin] (from excl. prefix scan)
// Each particle is then offset from that
for (int i = 0; i < bin_count[cbin]; i++) {
pp = part_ind[bin_start[cbin] + i];
dest = offset[c2b] + i;
for (int coeff = 0; coeff < ncoeffs_max; coeff++) {
// Packing: part varies slowest, coeff varies quickest, sp middle
sp0 = coeff + ncoeffs_max*SP_YP_RE + ncoeffs_max*NSP*dest; // Yp_re
sp1 = coeff + ncoeffs_max*SP_YP_IM + ncoeffs_max*NSP*dest; // Yp_im
sp2 = coeff + ncoeffs_max*SP_RDYU_RE + ncoeffs_max*NSP*dest; // rDYu_re
sp3 = coeff + ncoeffs_max*SP_RDYU_IM + ncoeffs_max*NSP*dest; // rDYu_im
sp4 = coeff + ncoeffs_max*SP_XXDYU_RE + ncoeffs_max*NSP*dest; // xXDYu_re
sp5 = coeff + ncoeffs_max*SP_XXDYU_IM + ncoeffs_max*NSP*dest; // xXDYu_im
// Partial sums: part varies slowest, node quickest, coeff middle
// Partial sums are stored in index for node = 0
psum_ind = pp*NNODES*ncoeffs_max + coeff*NNODES;
sum_send_t[sp0] = int_Yp_re[psum_ind];
sum_send_t[sp1] = int_Yp_im[psum_ind];
sum_send_t[sp2] = int_rDYu_re[psum_ind];
sum_send_t[sp3] = int_rDYu_im[psum_ind];
sum_send_t[sp4] = int_xXDYu_re[psum_ind];
sum_send_t[sp5] = int_xXDYu_im[psum_ind];
}
}
} // loop over tk planes
}
}
__global__ void pack_sums_b(real *sum_send_b, int *offset, int *bin_start,
int *bin_count, int *part_ind, int ncoeffs_max,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im)
{
int ti = blockIdx.x * blockDim.x + threadIdx.x; // bin index
int tj = blockIdx.y * blockDim.y + threadIdx.y;
int cbin; // bin index
int c2b; // bin index in 2-d plane
int pp; // particle index
int dest; // destination for particle partial sums in packed array
int sp0, sp1; // scalar product strides for (Ylm, p)
int sp2, sp3; // scalar product strides for (rDYlm, u)
int sp4, sp5; // scalar product strides for (x X DYlm, u)
int psum_ind; // index of partial sum in each scalar product
// Custom GFZ indices
int s1b = _bins.Gcc.inb;
int s2b = s1b * _bins.Gcc.jnb;
if (ti < _bins.Gcc.inb && tj < _bins.Gcc.jnb) {
for (int tk = _bins.Gcc._ksb; tk <= _bins.Gcc._ks; tk++) {
cbin = GFZ_LOC(ti, tj, tk, s1b, s2b);
c2b = ti + tj * s1b + (tk - _bins.Gcc._ksb) * s2b; // two planes
// Loop through each bin's particles
// Each bin is offset by offset[cbin] (from excl. prefix scan)
// Each particle is then offset from that
for (int i = 0; i < bin_count[cbin]; i++) {
pp = part_ind[bin_start[cbin] + i];
dest = offset[c2b] + i;
for (int coeff = 0; coeff < ncoeffs_max; coeff++) {
// Packing: part varies slowest, coeff varies quickest, sp middle
sp0 = coeff + ncoeffs_max*SP_YP_RE + ncoeffs_max*NSP*dest; // Yp_re
sp1 = coeff + ncoeffs_max*SP_YP_IM + ncoeffs_max*NSP*dest; // Yp_im
sp2 = coeff + ncoeffs_max*SP_RDYU_RE + ncoeffs_max*NSP*dest; // rDYu_re
sp3 = coeff + ncoeffs_max*SP_RDYU_IM + ncoeffs_max*NSP*dest; // rDYu_im
sp4 = coeff + ncoeffs_max*SP_XXDYU_RE + ncoeffs_max*NSP*dest; // xXDYu_re
sp5 = coeff + ncoeffs_max*SP_XXDYU_IM + ncoeffs_max*NSP*dest; // xXDYu_im
// Partial sums: part varies slowest, node quickest, coeff middle
// Partial sums are stored in index for node = 0
psum_ind = pp*NNODES*ncoeffs_max + coeff*NNODES;
sum_send_b[sp0] = int_Yp_re[psum_ind];
sum_send_b[sp1] = int_Yp_im[psum_ind];
sum_send_b[sp2] = int_rDYu_re[psum_ind];
sum_send_b[sp3] = int_rDYu_im[psum_ind];
sum_send_b[sp4] = int_xXDYu_re[psum_ind];
sum_send_b[sp5] = int_xXDYu_im[psum_ind];
}
}
} // loop over tk planes
}
}
__global__ void unpack_sums_e(real *sum_recv_e, int *offset, int *bin_start,
int *bin_count, int *part_ind, int ncoeffs_max,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im)
{
int tj = blockIdx.x * blockDim.x + threadIdx.x; // bin index
int tk = blockIdx.y * blockDim.y + threadIdx.y;
int cbin; // bin index
int c2b; // bin index in 2-d plane
int pp; // particle index
int dest; // destination for particle partial sums in packed array
int sp0, sp1; // scalar product strides for (Ylm, p)
int sp2, sp3; // scalar product strides for (rDYlm, u)
int sp4, sp5; // scalar product strides for (x X DYlm, u)
int psum_ind; // index of partial sum in each scalar product
// Custom GFX indices
int s1b = _bins.Gcc.jnb;
int s2b = s1b * _bins.Gcc.knb;
if (tj < _bins.Gcc.jnb && tk < _bins.Gcc.knb) {
for (int ti = _bins.Gcc._ie; ti <= _bins.Gcc._ieb; ti++) {
cbin = GFX_LOC(ti, tj, tk, s1b, s2b);
c2b = tj + tk * s1b + (ti - _bins.Gcc._ie) * s2b; // two planes
// Loop through each bin's particles
// Each bin is offset by offset[cbin] (from excl. prefix scan)
// Each particle is then offset from that
for (int i = 0; i < bin_count[cbin]; i++) {
pp = part_ind[bin_start[cbin] + i];
dest = offset[c2b] + i;
for (int coeff = 0; coeff < ncoeffs_max; coeff++) {
// Packing: part varies slowest, coeff varies quickest, sp middle
sp0 = coeff + ncoeffs_max*SP_YP_RE + ncoeffs_max*NSP*dest; // Yp_re
sp1 = coeff + ncoeffs_max*SP_YP_IM + ncoeffs_max*NSP*dest; // Yp_im
sp2 = coeff + ncoeffs_max*SP_RDYU_RE + ncoeffs_max*NSP*dest; // rDYu_re
sp3 = coeff + ncoeffs_max*SP_RDYU_IM + ncoeffs_max*NSP*dest; // rDYu_im
sp4 = coeff + ncoeffs_max*SP_XXDYU_RE + ncoeffs_max*NSP*dest; // xXDYu_re
sp5 = coeff + ncoeffs_max*SP_XXDYU_IM + ncoeffs_max*NSP*dest; // xXDYu_im
// Partial sums: part varies slowest, node quickest, coeff middle
// Partial sums are stored in index for node = 0
psum_ind = pp*NNODES*ncoeffs_max + coeff*NNODES;
int_Yp_re[psum_ind] += sum_recv_e[sp0];
int_Yp_im[psum_ind] += sum_recv_e[sp1];
int_rDYu_re[psum_ind] += sum_recv_e[sp2];
int_rDYu_im[psum_ind] += sum_recv_e[sp3];
int_xXDYu_re[psum_ind] += sum_recv_e[sp4];
int_xXDYu_im[psum_ind] += sum_recv_e[sp5];
}
}
} // loop over ti
}
}
__global__ void unpack_sums_w(real *sum_recv_w, int *offset, int *bin_start,
int *bin_count, int *part_ind, int ncoeffs_max,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im)
{
int tj = blockIdx.x * blockDim.x + threadIdx.x; // bin index
int tk = blockIdx.y * blockDim.y + threadIdx.y;
int cbin; // bin index
int c2b; // bin index in 2-d plane
int pp; // particle index
int dest; // destination for particle partial sums in packed array
int sp0, sp1; // scalar product strides for (Ylm, p)
int sp2, sp3; // scalar product strides for (rDYlm, u)
int sp4, sp5; // scalar product strides for (x X DYlm, u)
int psum_ind; // index of partial sum in each scalar product
// Custom GFX indices
int s1b = _bins.Gcc.jnb;
int s2b = s1b * _bins.Gcc.knb;
if (tj < _bins.Gcc.jnb && tk < _bins.Gcc.knb) {
for (int ti = _bins.Gcc._isb; ti <= _bins.Gcc._is; ti++) {
cbin = GFX_LOC(ti, tj, tk, s1b, s2b);
c2b = tj + tk * s1b + (ti - _bins.Gcc._isb) * s2b; // two planes
// Loop through each bin's particles
// Each bin is offset by offset[cbin] (from excl. prefix scan)
// Each particle is then offset from that
for (int i = 0; i < bin_count[cbin]; i++) {
pp = part_ind[bin_start[cbin] + i];
dest = offset[c2b] + i;
for (int coeff = 0; coeff < ncoeffs_max; coeff++) {
// Packing: part varies slowest, coeff varies quickest, sp middle
sp0 = coeff + ncoeffs_max*SP_YP_RE + ncoeffs_max*NSP*dest; // Yp_re
sp1 = coeff + ncoeffs_max*SP_YP_IM + ncoeffs_max*NSP*dest; // Yp_im
sp2 = coeff + ncoeffs_max*SP_RDYU_RE + ncoeffs_max*NSP*dest; // rDYu_re
sp3 = coeff + ncoeffs_max*SP_RDYU_IM + ncoeffs_max*NSP*dest; // rDYu_im
sp4 = coeff + ncoeffs_max*SP_XXDYU_RE + ncoeffs_max*NSP*dest; // xXDYu_re
sp5 = coeff + ncoeffs_max*SP_XXDYU_IM + ncoeffs_max*NSP*dest; // xXDYu_im
// Partial sums: part varies slowest, node quickest, coeff middle
// Partial sums are stored in index for node = 0
psum_ind = pp*NNODES*ncoeffs_max + coeff*NNODES;
int_Yp_re[psum_ind] += sum_recv_w[sp0];
int_Yp_im[psum_ind] += sum_recv_w[sp1];
int_rDYu_re[psum_ind] += sum_recv_w[sp2];
int_rDYu_im[psum_ind] += sum_recv_w[sp3];
int_xXDYu_re[psum_ind] += sum_recv_w[sp4];
int_xXDYu_im[psum_ind] += sum_recv_w[sp5];
}
}
} // loop over ti
}
}
__global__ void unpack_sums_n(real *sum_recv_n, int *offset, int *bin_start,
int *bin_count, int *part_ind, int ncoeffs_max,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im)
{
int tk = blockIdx.x * blockDim.x + threadIdx.x; // bin index
int ti = blockIdx.y * blockDim.y + threadIdx.y;
int cbin; // bin index
int c2b; // bin index in 2-d plane
int pp; // particle index
int dest; // destination for particle partial sums in packed array
int sp0, sp1; // scalar product strides for (Ylm, p)
int sp2, sp3; // scalar product strides for (rDYlm, u)
int sp4, sp5; // scalar product strides for (x X DYlm, u)
int psum_ind; // index of partial sum in each scalar product
// Custom GFY indices
int s1b = _bins.Gcc.knb;
int s2b = s1b * _bins.Gcc.inb;
if (tk < _bins.Gcc.knb && ti < _bins.Gcc.inb) {
for (int tj = _bins.Gcc._je; tj <= _bins.Gcc._jeb; tj++) {
cbin = GFY_LOC(ti, tj, tk, s1b, s2b);
c2b = tk + ti * s1b + (tj - _bins.Gcc._je) * s2b; // two planes
// Loop through each bin's particles
// Each bin is offset by offset[cbin] (from excl. prefix scan)
// Each particle is then offset from that
for (int i = 0; i < bin_count[cbin]; i++) {
pp = part_ind[bin_start[cbin] + i];
dest = offset[c2b] + i;
for (int coeff = 0; coeff < ncoeffs_max; coeff++) {
// Packing: part varies slowest, coeff varies quickest, sp middle
sp0 = coeff + ncoeffs_max*SP_YP_RE + ncoeffs_max*NSP*dest; // Yp_re
sp1 = coeff + ncoeffs_max*SP_YP_IM + ncoeffs_max*NSP*dest; // Yp_im
sp2 = coeff + ncoeffs_max*SP_RDYU_RE + ncoeffs_max*NSP*dest; // rDYu_re
sp3 = coeff + ncoeffs_max*SP_RDYU_IM + ncoeffs_max*NSP*dest; // rDYu_im
sp4 = coeff + ncoeffs_max*SP_XXDYU_RE + ncoeffs_max*NSP*dest; // xXDYu_re
sp5 = coeff + ncoeffs_max*SP_XXDYU_IM + ncoeffs_max*NSP*dest; // xXDYu_im
// Partial sums: part varies slowest, node quickest, coeff middle
// Partial sums are stored in index for node = 0
psum_ind = pp*NNODES*ncoeffs_max + coeff*NNODES;
int_Yp_re[psum_ind] += sum_recv_n[sp0];
int_Yp_im[psum_ind] += sum_recv_n[sp1];
int_rDYu_re[psum_ind] += sum_recv_n[sp2];
int_rDYu_im[psum_ind] += sum_recv_n[sp3];
int_xXDYu_re[psum_ind] += sum_recv_n[sp4];
int_xXDYu_im[psum_ind] += sum_recv_n[sp5];
}
}
} // loop over tj
}
}
__global__ void unpack_sums_s(real *sum_recv_s, int *offset, int *bin_start,
int *bin_count, int *part_ind, int ncoeffs_max,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im)
{
int tk = blockIdx.x * blockDim.x + threadIdx.x; // bin index
int ti = blockIdx.y * blockDim.y + threadIdx.y;
int cbin; // bin index
int c2b; // bin index in 2-d plane
int pp; // particle index
int dest; // destination for particle partial sums in packed array
int sp0, sp1; // scalar product strides for (Ylm, p)
int sp2, sp3; // scalar product strides for (rDYlm, u)
int sp4, sp5; // scalar product strides for (x X DYlm, u)
int psum_ind; // index of partial sum in each scalar product
// Custom GFY indices
int s1b = _bins.Gcc.knb;
int s2b = s1b * _bins.Gcc.inb;
if (tk < _bins.Gcc.knb && ti < _bins.Gcc.inb) {
for (int tj = _bins.Gcc._jsb; tj <= _bins.Gcc._js; tj++) {
cbin = GFY_LOC(ti, tj, tk, s1b, s2b);
c2b = tk + ti * s1b + (tj - _bins.Gcc._jsb) * s2b; // two planes
// Loop through each bin's particles
// Each bin is offset by offset[cbin] (from excl. prefix scan)
// Each particle is then offset from that
for (int i = 0; i < bin_count[cbin]; i++) {
pp = part_ind[bin_start[cbin] + i];
dest = offset[c2b] + i;
for (int coeff = 0; coeff < ncoeffs_max; coeff++) {
// Packing: part varies slowest, coeff varies quickest, sp middle
sp0 = coeff + ncoeffs_max*SP_YP_RE + ncoeffs_max*NSP*dest; // Yp_re
sp1 = coeff + ncoeffs_max*SP_YP_IM + ncoeffs_max*NSP*dest; // Yp_im
sp2 = coeff + ncoeffs_max*SP_RDYU_RE + ncoeffs_max*NSP*dest; // rDYu_re
sp3 = coeff + ncoeffs_max*SP_RDYU_IM + ncoeffs_max*NSP*dest; // rDYu_im
sp4 = coeff + ncoeffs_max*SP_XXDYU_RE + ncoeffs_max*NSP*dest; // xXDYu_re
sp5 = coeff + ncoeffs_max*SP_XXDYU_IM + ncoeffs_max*NSP*dest; // xXDYu_im
// Partial sums: part varies slowest, node quickest, coeff middle
// Partial sums are stored in index for node = 0
psum_ind = pp*NNODES*ncoeffs_max + coeff*NNODES;
int_Yp_re[psum_ind] += sum_recv_s[sp0];
//printf("N%d >> unpacking int_Yp_re[part %d, coeff %d (%d)] = %lf from sum_send_s[%d]\n",
// _dom.rank, pp, coeff, psum_ind, int_Yp_re[psum_ind], sp0);
int_Yp_im[psum_ind] += sum_recv_s[sp1];
int_rDYu_re[psum_ind] += sum_recv_s[sp2];
int_rDYu_im[psum_ind] += sum_recv_s[sp3];
int_xXDYu_re[psum_ind] += sum_recv_s[sp4];
int_xXDYu_im[psum_ind] += sum_recv_s[sp5];
}
}
} // loop over tj
}
}
__global__ void unpack_sums_t(real *sum_recv_t, int *offset, int *bin_start,
int *bin_count, int *part_ind, int ncoeffs_max,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im)
{
int ti = blockIdx.x * blockDim.x + threadIdx.x; // bin index
int tj = blockIdx.y * blockDim.y + threadIdx.y;
int cbin; // bin index
int c2b; // bin index in 2-d plane
int pp; // particle index
int dest; // destination for particle partial sums in packed array
int sp0, sp1; // scalar product strides for (Ylm, p)
int sp2, sp3; // scalar product strides for (rDYlm, u)
int sp4, sp5; // scalar product strides for (x X DYlm, u)
int psum_ind; // index of partial sum in each scalar product
// Custom GFZ indices
int s1b = _bins.Gcc.inb;
int s2b = s1b * _bins.Gcc.jnb;
if (ti < _bins.Gcc.inb && tj < _bins.Gcc.jnb) {
for (int tk = _bins.Gcc._ke; tk <= _bins.Gcc._keb; tk++) {
cbin = GFZ_LOC(ti, tj, tk, s1b, s2b);
c2b = ti + tj * s1b + (tk - _bins.Gcc._ke) * s2b;
// Loop through each bin's particles
// Each bin is offset by offset[cbin] (from excl. prefix scan)
// Each particle is then offset from that
for (int i = 0; i < bin_count[cbin]; i++) {
pp = part_ind[bin_start[cbin] + i];
dest = offset[c2b] + i;
for (int coeff = 0; coeff < ncoeffs_max; coeff++) {
// Packing: part varies slowest, coeff varies quickest, sp middle
sp0 = coeff + ncoeffs_max*SP_YP_RE + ncoeffs_max*NSP*dest; // Yp_re
sp1 = coeff + ncoeffs_max*SP_YP_IM + ncoeffs_max*NSP*dest; // Yp_im
sp2 = coeff + ncoeffs_max*SP_RDYU_RE + ncoeffs_max*NSP*dest; // rDYu_re
sp3 = coeff + ncoeffs_max*SP_RDYU_IM + ncoeffs_max*NSP*dest; // rDYu_im
sp4 = coeff + ncoeffs_max*SP_XXDYU_RE + ncoeffs_max*NSP*dest; // xXDYu_re
sp5 = coeff + ncoeffs_max*SP_XXDYU_IM + ncoeffs_max*NSP*dest; // xXDYu_im
// Partial sums: part varies slowest, node quickest, coeff middle
// Partial sums are stored in index for node = 0
psum_ind = pp*NNODES*ncoeffs_max + coeff*NNODES;
int_Yp_re[psum_ind] += sum_recv_t[sp0];
int_Yp_im[psum_ind] += sum_recv_t[sp1];
int_rDYu_re[psum_ind] += sum_recv_t[sp2];
int_rDYu_im[psum_ind] += sum_recv_t[sp3];
int_xXDYu_re[psum_ind] += sum_recv_t[sp4];
int_xXDYu_im[psum_ind] += sum_recv_t[sp5];
}
}
} // loop over tk
}
}
__global__ void unpack_sums_b(real *sum_recv_b, int *offset, int *bin_start,
int *bin_count, int *part_ind, int ncoeffs_max,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im)
{
int ti = blockIdx.x * blockDim.x + threadIdx.x; // bin index
int tj = blockIdx.y * blockDim.y + threadIdx.y;
int cbin; // bin index
int c2b; // bin index in 2-d plane
int pp; // particle index
int dest; // destination for particle partial sums in packed array
int sp0, sp1; // scalar product strides for (Ylm, p)
int sp2, sp3; // scalar product strides for (rDYlm, u)
int sp4, sp5; // scalar product strides for (x X DYlm, u)
int psum_ind; // index of partial sum in each scalar product
// Custom GFZ indices
int s1b = _bins.Gcc.inb;
int s2b = s1b * _bins.Gcc.jnb;
if (ti < _bins.Gcc.inb && tj < _bins.Gcc.jnb) {
for (int tk = _bins.Gcc._ksb; tk <= _bins.Gcc._ks; tk++) {
cbin = GFZ_LOC(ti, tj, tk, s1b, s2b);
c2b = ti + tj * s1b + (tk - _bins.Gcc._ksb) * s2b; // two planes
// Loop through each bin's particles
// Each bin is offset by offset[cbin] (from excl. prefix scan)
// Each particle is then offset from that
for (int i = 0; i < bin_count[cbin]; i++) {
pp = part_ind[bin_start[cbin] + i];
dest = offset[c2b] + i;
for (int coeff = 0; coeff < ncoeffs_max; coeff++) {
// Packing: part varies slowest, coeff varies quickest, sp middle
sp0 = coeff + ncoeffs_max*SP_YP_RE + ncoeffs_max*NSP*dest; // Yp_re
sp1 = coeff + ncoeffs_max*SP_YP_IM + ncoeffs_max*NSP*dest; // Yp_im
sp2 = coeff + ncoeffs_max*SP_RDYU_RE + ncoeffs_max*NSP*dest; // rDYu_re
sp3 = coeff + ncoeffs_max*SP_RDYU_IM + ncoeffs_max*NSP*dest; // rDYu_im
sp4 = coeff + ncoeffs_max*SP_XXDYU_RE + ncoeffs_max*NSP*dest; // xXDYu_re
sp5 = coeff + ncoeffs_max*SP_XXDYU_IM + ncoeffs_max*NSP*dest; // xXDYu_im
// Partial sums: part varies slowest, node quickest, coeff middle
// Partial sums are stored in index for node = 0
psum_ind = pp*NNODES*ncoeffs_max + coeff*NNODES;
int_Yp_re[psum_ind] += sum_recv_b[sp0];
int_Yp_im[psum_ind] += sum_recv_b[sp1];
int_rDYu_re[psum_ind] += sum_recv_b[sp2];
int_rDYu_im[psum_ind] += sum_recv_b[sp3];
int_xXDYu_re[psum_ind] += sum_recv_b[sp4];
int_xXDYu_im[psum_ind] += sum_recv_b[sp5];
}
}
} // loop over tk
}
}
__global__ void compute_error(real lamb_cut, int ncoeffs_max, int nparts,
part_struct *parts, real *part_errors, int *part_nums)
{
int part = blockIdx.x;
int coeff = threadIdx.x;
real div = 0.;
real max = DBL_MIN;
__shared__ real s_coeffs[MAX_COEFFS * NSP];
__shared__ real s_coeffs0[MAX_COEFFS * NSP];
__shared__ real s_max[MAX_COEFFS];
if (part < nparts && coeff < ncoeffs_max) {
s_coeffs[coeff + ncoeffs_max * 0] = parts[part].pnm_re[coeff];
s_coeffs[coeff + ncoeffs_max * 1] = parts[part].pnm_im[coeff];
s_coeffs[coeff + ncoeffs_max * 2] = parts[part].phinm_re[coeff];
s_coeffs[coeff + ncoeffs_max * 3] = parts[part].phinm_im[coeff];
s_coeffs[coeff + ncoeffs_max * 4] = parts[part].chinm_re[coeff];
s_coeffs[coeff + ncoeffs_max * 5] = parts[part].chinm_im[coeff];
s_coeffs0[coeff + ncoeffs_max * 0] = parts[part].pnm_re0[coeff];
s_coeffs0[coeff + ncoeffs_max * 1] = parts[part].pnm_im0[coeff];
s_coeffs0[coeff + ncoeffs_max * 2] = parts[part].phinm_re0[coeff];
s_coeffs0[coeff + ncoeffs_max * 3] = parts[part].phinm_im0[coeff];
s_coeffs0[coeff + ncoeffs_max * 4] = parts[part].chinm_re0[coeff];
s_coeffs0[coeff + ncoeffs_max * 5] = parts[part].chinm_im0[coeff];
s_max[coeff] = DBL_MIN;
__syncthreads();
// If coefficient has a large enough magnitude (relative to 0th order coeff)
// calculate the error
for (int i = 0; i < NSP; i++) {
int c = coeff + ncoeffs_max * i;
// Determine if current coefficient has large enough value compared to 0th
// (also, make sure it's large enough so we don't get issues with close-to-zero
// errors)
// (also, if zeroth order is 0, ignore)
real curr_val = s_coeffs[c];
real zeroth_val = s_coeffs[0 + ncoeffs_max * i];
int flag = (fabs(curr_val) > fabs(lamb_cut*zeroth_val)) *
(fabs(curr_val) > 1.e-16) *
(fabs(zeroth_val) > DBL_MIN);
// If flag == 1, set scoeff equal to error value
// If flag == 0, set scoeff equal to zero (no error)
div = fabs(curr_val);
div += (1.e-16 - div) * (div < 1.e-16);
real curr_val0 = s_coeffs0[c];
s_coeffs[c] = (real) flag * fabs(curr_val - curr_val0) / div;
// See if current error is the max we've seen so far over all the
// coefficients of a given order, set if so
s_max[coeff] += (s_coeffs[c] - s_max[coeff]) * (s_coeffs[c] > s_max[coeff]);
}
__syncthreads();
// We've now calculated the error for each "large enough" coefficients and
// found the maximum over all coefficients of a given order. Now, each
// order has a maximum, and we need to find the max over these
if (coeff == 0) {
for (int i = 0; i < ncoeffs_max; i++) {
max += (s_max[i] - max) * (s_max[i] > max);
}
part_errors[part] = max;
part_nums[part] = parts[part].N;
}
}
}
__global__ void store_coeffs(part_struct *parts, int nparts,
int ncoeffs_max)
{
int part = blockIdx.x;
int coeff = threadIdx.x;
if (part < nparts && coeff < ncoeffs_max) {
parts[part].pnm_re0[coeff] = parts[part].pnm_re[coeff];
parts[part].pnm_im0[coeff] = parts[part].pnm_im[coeff];
parts[part].phinm_re0[coeff] = parts[part].phinm_re[coeff];
parts[part].phinm_im0[coeff] = parts[part].phinm_im[coeff];
parts[part].chinm_re0[coeff] = parts[part].chinm_re[coeff];
parts[part].chinm_im0[coeff] = parts[part].chinm_im[coeff];
}
}
| f5ff927e3b4862d63975de02aa45c8a114ec5319.cu | /*******************************************************************************
********************************* BLUEBOTTLE **********************************
*******************************************************************************
*
* Copyright 2012 - 2018 Adam Sierakowski and Daniel Willen,
* The Johns Hopkins University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Please contact the Johns Hopkins University to use Bluebottle for
* commercial and/or for-profit applications.
******************************************************************************/
#include "cuda_physalis.h"
__device__ void rtp2xyz(real r, real theta, real phi, real *x, real *y, real *z)
{
*x = r * sin(theta) * cos(phi);
*y = r * sin(theta) * sin(phi);
*z = r * cos(theta);
}
__device__ void cart2sphere(real u, real v, real w, real theta, real phi,
real *ur, real *ut, real *up)
{
real st = sin(theta);
real ct = cos(theta);
real sp = sin(phi);
real cp = cos(phi);
*ur = st * (u * cp + v * sp) + w * ct;
*ut = ct * (u * cp + v * sp) - w * st;
*up = -u * sp + v * cp;
}
__device__ real nnm(int n, int m)
{
real fact_top = 1;
real fact_bot = 1;
for (int i = 1; i <= (n - m); i++) fact_top *= (real) i;
for (int i = 1; i <= (n + m); i++) fact_bot *= (real) i;
return sqrt((2.*n + 1.) / 4. / PI * fact_top / fact_bot);
}
__device__ real pnm(int n, int m, real theta)
{
real x = cos(theta);
real y = sin(theta);
switch(n) {
case 0: return 1;
case 1:
switch(m) {
//case -1: return -0.5*y;
case 0: return x;
case 1: return -y;
}
case 2:
switch(m) {
//case -2: return 0.125*y*y;
//case -1: return -0.5*x*y;
case 0: return 0.5*(3.*x*x - 1.);
case 1: return -3.*x*y;
case 2: return 3.*y*y;
}
case 3:
switch(m) {
//case -3: return -0.02083333333333*y*y*y;
//case -2: return 0.125*x*y*y;
//case -1: return -0.125*(1. - 5.*x*x)*y;
case 0: return 0.5*x*(5.*x*x - 3.);
case 1: return -1.5*(5.*x*x - 1.)*y;
case 2: return 15.*x*y*y;
case 3: return -15.*y*y*y;
}
case 4:
switch(m) {
//case -4: return .002604166666667*y*y*y*y;
//case -3: return -0.02083333333333*x*y*y*y*y;
//case -2: return 0.02083333333333*(7.*x*x - 1.)*y*y;
//case -1: return -0.125*x*(3. - 7.*x*x)*y;
case 0: return 0.125*(35.*x*x*x*x - 30.*x*x + 3.);
case 1: return -2.5*(7.*x*x - 3.)*x*y;
case 2: return 7.5*(7.*x*x - 1.)*y*y;
case 3: return -105.*x*y*y*y;
case 4: return 105.*y*y*y*y;
}
case 5:
switch(m) {
//case -5: return -0.000260416666667*y*y*y*y*y;
//case -4: return 0.002604166666667*x*y*y*y*y;
//case -3: return -0.002604166666667*y*y*y*(9.*x*x - 1.);
//case -2: return 0.0625*x*y*y*(3.*x*x - 1.);
//case -1: return -0.0625*(21.*x*x*x*x - 14.*x*x + 1.);
case 0: return 0.125*x*(63.*x*x*x*x - 70.*x*x + 15.);
case 1: return -1.875*y*(21.*x*x*x*x - 14.*x*x + 1.);
case 2: return 52.5*x*y*y*(3.*x*x - 1.);
case 3: return -52.5*y*y*y*(9.*x*x - 1.);
case 4: return 945.*x*y*y*y*y;
case 5: return -945.*y*y*y*y*y;
}
}
return 0; // this should never be reached
}
__global__ void check_nodes(int nparts, part_struct *parts, BC *bc,
dom_struct *DOM)
{
int node = threadIdx.x;
int part = blockIdx.x;
/* Convert node (r, theta, phi) to (x, y, z) */
real xp, yp, zp; // Cartesian radial vector
real x, y, z; // Cartesian location of node
rtp2xyz(parts[part].rs, _node_t[node], _node_p[node], &xp, &yp, &zp);
/* shift from particle center */
x = xp + parts[part].x;
y = yp + parts[part].y;
z = zp + parts[part].z;
// start off with all -1's
parts[part].nodes[node] = -1;
/* check if the node is interfered with by a wall */
// compute distance between node and walls
// set equal to some number to identify which wall is interfering
// We use <= for E,N,T and > for W,S,B -- allows us to do [start,end) on all
// subdomains regardless of bc
parts[part].nodes[node] += (WEST_WALL_D + 1) * // set equal to WEST_WALL_D...
(x - _dom.xs < 0) * // if outside domain &
(_dom.I == DOM->Is) * // if edge domain & DIRICHLET
(bc->uW == DIRICHLET || bc->vW == DIRICHLET || bc->wW == DIRICHLET)*
(parts[part].nodes[node] == -1);
parts[part].nodes[node] += (WEST_WALL_N + 1) *
(x - _dom.xs < 0) *
(_dom.I == DOM->Is) *
(bc->uW == NEUMANN || bc->vW == NEUMANN || bc->wW == NEUMANN)*
(parts[part].nodes[node] == -1);
parts[part].nodes[node] += (EAST_WALL_D + 1) *
(x - _dom.xe >= 0) *
(_dom.I == DOM->Ie) *
(bc->uE == DIRICHLET || bc->vE == DIRICHLET || bc->wE == DIRICHLET)*
(parts[part].nodes[node] == -1);
parts[part].nodes[node] += (EAST_WALL_N + 1) *
(x - _dom.xe >= 0) *
(_dom.I == DOM->Ie) *
(bc->uE == NEUMANN || bc->vE == NEUMANN || bc->wE == NEUMANN)*
(parts[part].nodes[node] == -1);
parts[part].nodes[node] += (SOUTH_WALL_D + 1) *
(y - _dom.ys < 0) *
(_dom.J == DOM->Js) *
(bc->uS == DIRICHLET || bc->vS == DIRICHLET || bc->wS == DIRICHLET)*
(parts[part].nodes[node] == -1);
parts[part].nodes[node] += (SOUTH_WALL_N + 1) *
(y - _dom.ys < 0) *
(_dom.J == DOM->Js) *
(bc->uS == NEUMANN || bc->vS == NEUMANN || bc->wS == NEUMANN)*
(parts[part].nodes[node] == -1);
parts[part].nodes[node] += (NORTH_WALL_D + 1) *
(y - _dom.ye >= 0) *
(_dom.J == DOM->Je) *
(bc->uN == DIRICHLET || bc->vN == DIRICHLET || bc->wN == DIRICHLET)*
(parts[part].nodes[node] == -1);
parts[part].nodes[node] += (NORTH_WALL_N + 1) *
(y - _dom.ye >= 0) *
(_dom.J == DOM->Je) *
(bc->uN == NEUMANN || bc->vN == NEUMANN || bc->wN == NEUMANN)*
(parts[part].nodes[node] == -1);
parts[part].nodes[node] += (BOTTOM_WALL_D + 1) *
(z - _dom.zs < 0) *
(_dom.K == DOM->Ks) *
(bc->uB == DIRICHLET || bc->vB == DIRICHLET || bc->wB == DIRICHLET)*
(parts[part].nodes[node] == -1);
parts[part].nodes[node] += (BOTTOM_WALL_N + 1) *
(z - _dom.zs < 0) *
(_dom.K == DOM->Ks) *
(bc->uB == NEUMANN || bc->vB == NEUMANN || bc->wB == NEUMANN)*
(parts[part].nodes[node] == -1);
parts[part].nodes[node] += (TOP_WALL_D + 1) *
(z - _dom.ze >= 0) *
(_dom.K == DOM->Ke) *
(bc->uT == DIRICHLET || bc->vT == DIRICHLET || bc->wT == DIRICHLET)*
(parts[part].nodes[node] == -1);
parts[part].nodes[node] += (TOP_WALL_N + 1) *
(z - _dom.ze >= 0) *
(_dom.K == DOM->Ke) *
(bc->uT == NEUMANN || bc->vT == NEUMANN || bc->wT == NEUMANN)*
(parts[part].nodes[node] == -1);
}
__global__ void interpolate_nodes(real *p, real *u, real *v, real *w,
real rho_f, real nu, gradP_struct gradP, part_struct *parts, real *pp,
real *ur, real *ut, real *up, BC *bc, real s_beta, real s_ref, g_struct g)
{
int node = threadIdx.x;
int part = blockIdx.x;
real ddx = 1. / _dom.dx;
real ddy = 1. / _dom.dy;
real ddz = 1. / _dom.dz;
real irho_f = 1. / rho_f;
real inu = 1. / nu;
real ox = parts[part].ox;
real oy = parts[part].oy;
real oz = parts[part].oz;
real oxdot = parts[part].oxdot;
real oydot = parts[part].oydot;
real ozdot = parts[part].ozdot;
real udot = parts[part].udot;
real vdot = parts[part].vdot;
real wdot = parts[part].wdot;
real rs2 = parts[part].rs * parts[part].rs;
real rs3 = rs2 * parts[part].rs;
real rs5 = rs3 * rs2;
real irs3 = 1./rs3;
real a5 = parts[part].r * parts[part].r; // r^2
a5 *= a5 * parts[part].r; // r^5
real uu, vv, ww; // temporary nodes for Cartesian result of interpolation
real uuwalli, uuwallj, uuwallk;
real vvwalli, vvwallj, vvwallk;
real wwwalli, wwwallj, wwwallk;
int i, j, k; // index of cells containing node
int oobi, oobj, oobk, oob; // out of bounds indicator, 1 if out of bounds else 0
int C, Ce, Cw, Cn, Cs, Ct, Cb; // cell indices
real xx, yy, zz; // Cartesian location of p,u,v,w
// convert node (r, theta, phi) to (x, y, z)
real xp, yp, zp; // Cartesian radial vector
real x, y, z; // Cartesian location of node
rtp2xyz(parts[part].rs, _node_t[node], _node_p[node], &xp, &yp, &zp);
// shift from particle center
x = xp + parts[part].x;
y = yp + parts[part].y;
z = zp + parts[part].z;
/* Find index of cell containing node. */
// Do this in GLOBAL coordinates so that magnitude of floating point error is
// the same on each subdomain.
real arg_x = (x - (_dom.xs - _dom.dx)) * ddx + _dom.Gcc.isb;
real arg_y = (y - (_dom.ys - _dom.dy)) * ddy + _dom.Gcc.jsb;
real arg_z = (z - (_dom.zs - _dom.dz)) * ddz + _dom.Gcc.ksb;
/* Deal with floating point errors in position so we don't lose nodes */
// Similar to bin_fill_{i,j,k}. If floor != round and round is "close enough"
// to the nearest integer, use round instead. this ensures that all nodes are
// accounted for between subdomains
// Using global indices makes sure that the floating point representation
// error is the same for each subdomain, since the magnitude of the index will
// be the similar/the same.
i = floor(arg_x);
j = floor(arg_y);
k = floor(arg_z);
int round_x = lrint(arg_x);
int round_y = lrint(arg_y);
int round_z = lrint(arg_z);
// Better way to do this? no if-statement... abs?
if ((round_x != i) && (abs(round_x - arg_x) <= DBL_EPSILON)) {
i = round_x;
}
if ((round_y != j) && (abs(round_y - arg_y) <= DBL_EPSILON)) {
j = round_y;
}
if ((round_z != k) && (abs(round_z - arg_z) <= DBL_EPSILON)) {
k = round_z;
}
// Convert back to LOCAL coodrinates
i -= _dom.Gcc.isb;
j -= _dom.Gcc.jsb;
k -= _dom.Gcc.ksb;
/* Interpolate Pressure */
// Find if out-of-bounds -- 1 if oob, 0 if in bounds
oob = i < _dom.Gcc._is || i > _dom.Gcc._ie
|| j < _dom.Gcc._js || j > _dom.Gcc._je
|| k < _dom.Gcc._ks || k > _dom.Gcc._ke;
// Correct indices so we don't have out-of-bounds reads
// If out out bounds, we'll read good info but trash the results
i += (_dom.Gcc._is - i) * (i < _dom.Gcc._is);
j += (_dom.Gcc._js - j) * (j < _dom.Gcc._js);
k += (_dom.Gcc._ks - k) * (k < _dom.Gcc._ks);
i += (_dom.Gcc._ie - i) * (i > _dom.Gcc._ie);
j += (_dom.Gcc._je - j) * (j > _dom.Gcc._je);
k += (_dom.Gcc._ke - k) * (k > _dom.Gcc._ke);
// Cell-centered indices
C = GCC_LOC(i, j, k, _dom.Gcc.s1b, _dom.Gcc.s2b);
Ce = GCC_LOC(i + 1, j, k, _dom.Gcc.s1b, _dom.Gcc.s2b);
Cw = GCC_LOC(i - 1, j, k, _dom.Gcc.s1b, _dom.Gcc.s2b);
Cn = GCC_LOC(i, j + 1, k, _dom.Gcc.s1b, _dom.Gcc.s2b);
Cs = GCC_LOC(i, j - 1, k, _dom.Gcc.s1b, _dom.Gcc.s2b);
Ct = GCC_LOC(i, j, k + 1, _dom.Gcc.s1b, _dom.Gcc.s2b);
Cb = GCC_LOC(i, j, k - 1, _dom.Gcc.s1b, _dom.Gcc.s2b);
// Cartesian location of center of cell
xx = (i - 0.5) * _dom.dx + _dom.xs;
yy = (j - 0.5) * _dom.dy + _dom.ys;
zz = (k - 0.5) * _dom.dz + _dom.zs;
// perform tri-linear interpolation
real dpdx = 0.5*(p[Ce] - p[Cw]) * ddx;
real dpdy = 0.5*(p[Cn] - p[Cs]) * ddy;
real dpdz = 0.5*(p[Ct] - p[Cb]) * ddz;
pp[node + NNODES*part] = p[C] + dpdx*(x - xx) + dpdy*(y - yy) + dpdz*(z - zz);
// set ppwall equal to
/* ppwall = (parts[part].nodes[node] == WEST_WALL_D || parts[part].nodes[node] == WEST_WALL_N)*p[Cw]
+ (parts[part].nodes[node] == EAST_WALL_D || parts[part].nodes[node] == EAST_WALL_N)*p[Ce]
+ (parts[part].nodes[node] == SOUTH_WALL_D || parts[part].nodes[node] == SOUTH_WALL_N)*p[Cs]
+ (parts[part].nodes[node] == NORTH_WALL_D || parts[part].nodes[node] == NORTH_WALL_N)*p[Cn]
+ (parts[part].nodes[node] == BOTTOM_WALL_D || parts[part].nodes[node] == BOTTOM_WALL_N)*p[Cb]
+ (parts[part].nodes[node] == TOP_WALL_D || parts[part].nodes[node] == TOP_WALL_N)*p[Ct];
*/
// switch to particle rest frame
real ocrossr2 = (oy*zp - oz*yp) * (oy*zp - oz*yp);
ocrossr2 += (ox*zp - oz*xp) * (ox*zp - oz*xp);
ocrossr2 += (ox*yp - oy*xp) * (ox*yp - oy*xp);
real bousiq_x = -s_beta*(parts[part].s - s_ref)*g.x;
real bousiq_y = -s_beta*(parts[part].s - s_ref)*g.y;
real bousiq_z = -s_beta*(parts[part].s - s_ref)*g.z;
real accdotr = (-gradP.x * irho_f - udot + bousiq_x)*xp +
(-gradP.y * irho_f - vdot + bousiq_y)*yp +
(-gradP.z * irho_f - wdot + bousiq_z)*zp;
pp[node + NNODES*part] -= 0.5 * rho_f * ocrossr2 + rho_f * accdotr;
// ppwall -= 0.5 * rho_f * ocrossr2 + rho_f * accdotr;
// Zero if this node intersects wall or is out of bounds
pp[node + NNODES*part] = pp[node+part*NNODES] * (1 - oob) *
(parts[part].nodes[node] == -1);
// pp[node + NNODES*part] = ppwall * oob * (parts[part].nodes[node] < -1) +
// pp[node + NNODES*part] * (1 - oob) * (parts[part].nodes[node] == -1);
/* Interpolate Velocities */
// don't work with cell-center anymore; find closest cell face in x-direction
/* Interpolate u-velocity */
arg_x = (x - (_dom.xs - _dom.dx)) * ddx + _dom.Gfx.isb;
arg_y = (y - (_dom.ys - _dom.dy)) * ddy + _dom.Gfx.jsb;
arg_z = (z - (_dom.zs - _dom.dz)) * ddz + _dom.Gfx.ksb;
i = floor(arg_x);
j = floor(arg_y);
k = floor(arg_z);
round_x = lrint(arg_x);
round_y = lrint(arg_y);
round_z = lrint(arg_z);
if ((round_x != i) && (abs(round_x - arg_x) <= DBL_EPSILON)) {
i = round_x;
}
if ((round_y != j) && (abs(round_y - arg_y) <= DBL_EPSILON)) {
j = round_y;
}
if ((round_z != k) && (abs(round_z - arg_z) <= DBL_EPSILON)) {
k = round_z;
}
i -= _dom.Gfx.isb;
j -= _dom.Gfx.jsb;
k -= _dom.Gfx.ksb;
//i = round((x - _dom.xs) * ddx) + DOM_BUF;
//j = floor((y - _dom.ys) * ddy) + DOM_BUF;
//k = floor((z - _dom.zs) * ddz) + DOM_BUF;
// Find if out-of-bounds -- 1 if oob, 0 if in bounds
// Use >= so domain is [start, end)
oobi = i < _dom.Gcc._is || i > _dom.Gcc._ie;
oobj = j < _dom.Gcc._js || j > _dom.Gcc._je;
oobk = k < _dom.Gcc._ks || k > _dom.Gcc._ke;
// Correct indices so we don't have out-of-bounds reads
// If out out bounds, we'll read good info but trash the results
i += (_dom.Gfx._is - i) * (i < _dom.Gfx._is);
j += (_dom.Gfx._js - j) * (j < _dom.Gfx._js);
k += (_dom.Gfx._ks - k) * (k < _dom.Gfx._ks);
i += (_dom.Gfx._ie - i) * (i >= _dom.Gfx._ie);
j += (_dom.Gfx._je - j) * (j > _dom.Gfx._je);
k += (_dom.Gfx._ke - k) * (k > _dom.Gfx._ke);
// Face-centered indices
C = GFX_LOC(i, j, k, _dom.Gfx.s1b, _dom.Gfx.s2b);
Ce = GFX_LOC(i + 1, j, k, _dom.Gfx.s1b, _dom.Gfx.s2b);
Cw = GFX_LOC(i - 1, j, k, _dom.Gfx.s1b, _dom.Gfx.s2b);
Cn = GFX_LOC(i, j + 1, k, _dom.Gfx.s1b, _dom.Gfx.s2b);
Cs = GFX_LOC(i, j - 1, k, _dom.Gfx.s1b, _dom.Gfx.s2b);
Ct = GFX_LOC(i, j, k + 1, _dom.Gfx.s1b, _dom.Gfx.s2b);
Cb = GFX_LOC(i, j, k - 1, _dom.Gfx.s1b, _dom.Gfx.s2b);
// Cartesian location of face
xx = (i - DOM_BUF) * _dom.dx + _dom.xs;
yy = (j - 0.5) * _dom.dy + _dom.ys;
zz = (k - 0.5) * _dom.dz + _dom.zs;
// Tri-linear interpolation
real dudx = 0.5*(u[Ce] - u[Cw]) * ddx;
real dudy = 0.5*(u[Cn] - u[Cs]) * ddy;
real dudz = 0.5*(u[Ct] - u[Cb]) * ddz;
uu = u[C] + dudx * (x - xx) + dudy * (y - yy) + dudz * (z - zz);
// set uuwall equal to interfering wall u-velocity
uuwalli = (parts[part].nodes[node] == WEST_WALL_D)*bc->uWD
+ (parts[part].nodes[node] == EAST_WALL_D)*bc->uED;
uuwallj = (parts[part].nodes[node] == SOUTH_WALL_D)*bc->uSD
+ (parts[part].nodes[node] == NORTH_WALL_D)*bc->uND;
uuwallk = (parts[part].nodes[node] == BOTTOM_WALL_D)*bc->uBD
+ (parts[part].nodes[node] == TOP_WALL_D)*bc->uTD;
// switch to particle rest frame
real ocrossr_x = oy*zp - oz*yp;
real odotcrossr_x = oydot*zp - ozdot*yp;
real tmp_u = parts[part].u + ocrossr_x +
0.1 * inu * (rs5 - a5) * irs3 * odotcrossr_x;
uu -= tmp_u;
uuwalli -= tmp_u;
uuwallj -= tmp_u;
uuwallk -= tmp_u;
// set actual node value based on whether it is interfered with
uu = (1-oobi) * (1-oobj) * (1-oobk) * (parts[part].nodes[node] == -1) * uu
+ oobi * (1-oobj) * (1-oobk) * (parts[part].nodes[node] < -1) * uuwalli
+ (1-oobi) * oobj * (1-oobk) * (parts[part].nodes[node] < -1) * uuwallj
+ (1-oobi) * (1-oobj) * oobk * (parts[part].nodes[node] < -1) * uuwallk;
/* interpolate v-velocity */
//i = floor((x - _dom.xs) * ddx) + DOM_BUF;
//j = round((y - _dom.ys) * ddy) + DOM_BUF;
//k = floor((z - _dom.zs) * ddz) + DOM_BUF;
arg_x = (x - (_dom.xs - _dom.dx)) * ddx + _dom.Gfy.isb;
arg_y = (y - (_dom.ys - _dom.dy)) * ddy + _dom.Gfy.jsb;
arg_z = (z - (_dom.zs - _dom.dz)) * ddz + _dom.Gfy.ksb;
i = floor(arg_x);
j = floor(arg_y);
k = floor(arg_z);
round_x = lrint(arg_x);
round_y = lrint(arg_y);
round_z = lrint(arg_z);
if ((round_x != i) && (abs(round_x - arg_x) <= DBL_EPSILON)) {
i = round_x;
}
if ((round_y != j) && (abs(round_y - arg_y) <= DBL_EPSILON)) {
j = round_y;
}
if ((round_z != k) && (abs(round_z - arg_z) <= DBL_EPSILON)) {
k = round_z;
}
i -= _dom.Gfy.isb;
j -= _dom.Gfy.jsb;
k -= _dom.Gfy.ksb;
// Find if out-of-bounds -- 1 if oob, 0 if in bounds
oobi = i < _dom.Gcc._is || i > _dom.Gcc._ie;
oobj = j < _dom.Gcc._js || j > _dom.Gcc._je;
oobk = k < _dom.Gcc._ks || k > _dom.Gcc._ke;
// Correct indices so we don't have out-of-bounds reads
// If out out bounds, we'll read good info but trash the results
i += (_dom.Gfy._is - i) * (i < _dom.Gfy._is);
j += (_dom.Gfy._js - j) * (j < _dom.Gfy._js);
k += (_dom.Gfy._ks - k) * (k < _dom.Gfy._ks);
i += (_dom.Gfy._ie - i) * (i > _dom.Gfy._ie);
j += (_dom.Gfy._je - j) * (j >= _dom.Gfy._je);
k += (_dom.Gfy._ke - k) * (k > _dom.Gfy._ke);
// Face-centered indices
C = GFY_LOC(i, j, k, _dom.Gfy.s1b, _dom.Gfy.s2b);
Ce = GFY_LOC(i + 1, j, k, _dom.Gfy.s1b, _dom.Gfy.s2b);
Cw = GFY_LOC(i - 1, j, k, _dom.Gfy.s1b, _dom.Gfy.s2b);
Cn = GFY_LOC(i, j + 1, k, _dom.Gfy.s1b, _dom.Gfy.s2b);
Cs = GFY_LOC(i, j - 1, k, _dom.Gfy.s1b, _dom.Gfy.s2b);
Ct = GFY_LOC(i, j, k + 1, _dom.Gfy.s1b, _dom.Gfy.s2b);
Cb = GFY_LOC(i, j, k - 1, _dom.Gfy.s1b, _dom.Gfy.s2b);
// Cartesian location of face
xx = (i-0.5) * _dom.dx + _dom.xs;
yy = (j-DOM_BUF) * _dom.dy + _dom.ys;
zz = (k-0.5) * _dom.dz + _dom.zs;
// Tri-linear interpolation
real dvdx = 0.5*(v[Ce] - v[Cw]) * ddx;
real dvdy = 0.5*(v[Cn] - v[Cs]) * ddy;
real dvdz = 0.5*(v[Ct] - v[Cb]) * ddz;
vv = v[C] + dvdx * (x - xx) + dvdy * (y - yy) + dvdz * (z - zz);
// set vvwall equal to interfering wall v-velocity
vvwalli = (parts[part].nodes[node] == WEST_WALL_D)*bc->vWD
+ (parts[part].nodes[node] == EAST_WALL_D)*bc->vED;
vvwallj = (parts[part].nodes[node] == SOUTH_WALL_D)*bc->vSD
+ (parts[part].nodes[node] == NORTH_WALL_D)*bc->vND;
vvwallk = (parts[part].nodes[node] == BOTTOM_WALL_D)*bc->vBD
+ (parts[part].nodes[node] == TOP_WALL_D)*bc->vTD;
// switch to particle rest frame
real ocrossr_y = -(ox*zp - oz*xp);
real odotcrossr_y = -(oxdot*zp - ozdot*xp);
real tmp_v = parts[part].v + ocrossr_y +
0.1 * inu * (rs5 - a5) * irs3 * odotcrossr_y;
vv -= tmp_v;
vvwalli -= tmp_v;
vvwallj -= tmp_v;
vvwallk -= tmp_v;
// set actual node value based on whether it is interfered with
vv = (1-oobi) * (1-oobj) * (1-oobk) * (parts[part].nodes[node] == -1) * vv
+ oobi * (1-oobj) * (1-oobk) * (parts[part].nodes[node] < -1) * vvwalli
+ (1-oobi) * oobj * (1-oobk) * (parts[part].nodes[node] < -1) * vvwallj
+ (1-oobi) * (1-oobj) * oobk * (parts[part].nodes[node] < -1) * vvwallk;
/* interpolate w-velocity */
arg_x = (x - (_dom.xs - _dom.dx)) * ddx + _dom.Gfz.isb;
arg_y = (y - (_dom.ys - _dom.dy)) * ddy + _dom.Gfz.jsb;
arg_z = (z - (_dom.zs - _dom.dz)) * ddz + _dom.Gfz.ksb;
i = floor(arg_x);
j = floor(arg_y);
k = floor(arg_z);
round_x = lrint(arg_x);
round_y = lrint(arg_y);
round_z = lrint(arg_z);
if ((round_x != i) && (abs(round_x - arg_x) <= DBL_EPSILON)) {
i = round_x;
}
if ((round_y != j) && (abs(round_y - arg_y) <= DBL_EPSILON)) {
j = round_y;
}
if ((round_z != k) && (abs(round_z - arg_z) <= DBL_EPSILON)) {
k = round_z;
}
i -= _dom.Gfz.isb;
j -= _dom.Gfz.jsb;
k -= _dom.Gfz.ksb;
//i = floor((x - _dom.xs) * ddx) + DOM_BUF;
//j = floor((y - _dom.ys) * ddy) + DOM_BUF;
//k = round((z - _dom.zs) * ddz) + DOM_BUF;
// Find if out-of-bounds -- 1 if oob, 0 if in bounds
oobi = i < _dom.Gcc._is || i > _dom.Gcc._ie;
oobj = j < _dom.Gcc._js || j > _dom.Gcc._je;
oobk = k < _dom.Gcc._ks || k > _dom.Gcc._ke;
// Correct indices so we don't have out-of-bounds reads
// If out out bounds, we'll read good info but trash the results
i += (_dom.Gfz._is - i) * (i < _dom.Gfz._is);
j += (_dom.Gfz._js - j) * (j < _dom.Gfz._js);
k += (_dom.Gfz._ks - k) * (k < _dom.Gfz._ks);
i += (_dom.Gfz._ie - i) * (i > _dom.Gfz._ie);
j += (_dom.Gfz._je - j) * (j > _dom.Gfz._je);
k += (_dom.Gfz._ke - k) * (k >= _dom.Gfz._ke);
// Face-centered indices
C = GFZ_LOC(i, j, k, _dom.Gfz.s1b, _dom.Gfz.s2b);
Ce = GFZ_LOC(i + 1, j, k, _dom.Gfz.s1b, _dom.Gfz.s2b);
Cw = GFZ_LOC(i - 1, j, k, _dom.Gfz.s1b, _dom.Gfz.s2b);
Cn = GFZ_LOC(i, j + 1, k, _dom.Gfz.s1b, _dom.Gfz.s2b);
Cs = GFZ_LOC(i, j - 1, k, _dom.Gfz.s1b, _dom.Gfz.s2b);
Ct = GFZ_LOC(i, j, k + 1, _dom.Gfz.s1b, _dom.Gfz.s2b);
Cb = GFZ_LOC(i, j, k - 1, _dom.Gfz.s1b, _dom.Gfz.s2b);
// Cartesian location of face
xx = (i-0.5) * _dom.dx + _dom.xs;
yy = (j-0.5) * _dom.dy + _dom.ys;
zz = (k-DOM_BUF) * _dom.dz + _dom.zs;
// Tri-linear interpolation
real dwdx = 0.5*(w[Ce] - w[Cw]) * ddx;
real dwdy = 0.5*(w[Cn] - w[Cs]) * ddy;
real dwdz = 0.5*(w[Ct] - w[Cb]) * ddz;
ww = w[C] + dwdx * (x - xx) + dwdy * (y - yy) + dwdz * (z - zz);
// set uuwall equal to interfering wall u-velocity
wwwalli = (parts[part].nodes[node] == WEST_WALL_D)*bc->wWD
+ (parts[part].nodes[node] == EAST_WALL_D)*bc->wED;
wwwallj = (parts[part].nodes[node] == SOUTH_WALL_D)*bc->wSD
+ (parts[part].nodes[node] == NORTH_WALL_D)*bc->wND;
wwwallk = (parts[part].nodes[node] == BOTTOM_WALL_D)*bc->wBD
+ (parts[part].nodes[node] == TOP_WALL_D)*bc->wTD;
// switch to particle rest frame
real ocrossr_z = ox*yp - oy*xp;
real odotcrossr_z = oxdot*yp - oydot*xp;
real tmp_w = parts[part].w + ocrossr_z +
0.1 * inu * (rs5 - a5) * irs3 * odotcrossr_z;
ww -= tmp_w;
wwwalli -= tmp_w;
wwwallj -= tmp_w;
wwwallk -= tmp_w;
// set actual node value based on whether it is interfered with
ww = (1-oobi) * (1-oobj) * (1-oobk) * (parts[part].nodes[node] == -1) * ww
+ oobi * (1-oobj) * (1-oobk) * (parts[part].nodes[node] < -1) * wwwalli
+ (1-oobi) * oobj * (1-oobk) * (parts[part].nodes[node] < -1) * wwwallj
+ (1-oobi) * (1-oobj) * oobk * (parts[part].nodes[node] < -1) * wwwallk;
// convert (uu, vv, ww) to (u_r, u_theta, u_phi) and write to node arrays
cart2sphere(uu, vv, ww, _node_t[node], _node_p[node],
&ur[node+part*NNODES], &ut[node+part*NNODES], &up[node+part*NNODES]);
}
__global__ void lebedev_quadrature(part_struct *parts, int ncoeffs_max,
real *pp, real *ur, real *ut, real *up,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im)
{
int node = threadIdx.x;
int part = blockIdx.x;
int coeff = blockIdx.y;
if (coeff < parts[part].ncoeff) {
/* Calculate integrand at each node */
int j = part*NNODES*ncoeffs_max + coeff*NNODES + node;
int n = _nn[coeff];
int m = _mm[coeff];
real theta = _node_t[node];
real phi = _node_p[node];
real N_nm = nnm(n, m);
real P_nm = pnm(n, m, theta);
real P_n1m = pnm(n + 1., m, theta);
real dPdt = (n - m + 1.)*P_n1m - (n + 1.)*cos(theta)*P_nm;
real dPdp = m*P_nm;
// Precalculate things we use more than once
real isth = 1./sin(theta);
real cmphi = cos(m * phi);
real smphi = sin(m * phi);
int stride = node + part*NNODES;
int_Yp_re[j] = N_nm*P_nm*pp[stride]*cmphi;
int_Yp_im[j] = -N_nm*P_nm*pp[stride]*smphi;
int_rDYu_re[j] = N_nm*isth*(dPdt * ut[stride] * cmphi
- dPdp * up[stride] * smphi);
int_rDYu_im[j] = N_nm*isth*(-dPdt * ut[stride] * smphi
- dPdp * up[stride] * cmphi);
int_xXDYu_re[j] = N_nm*isth*(dPdp * ut[stride] * smphi
+ dPdt * up[stride] * cmphi);
int_xXDYu_im[j] = N_nm*isth*(dPdp * ut[stride] * cmphi
- dPdt * up[stride] * smphi);
__syncthreads();
/* Compute partial sum of Lebedev quadrature (scalar product) */
// put sum into first node position for each coeff for each particle
if (node == 0) {
int_Yp_re[j] *= _A1;
int_Yp_im[j] *= _A1;
int_rDYu_re[j] *= _A1;
int_rDYu_im[j] *= _A1;
int_xXDYu_re[j] *= _A1;
int_xXDYu_im[j] *= _A1;
for (int i = 1; i < 6; i++) {
int_Yp_re[j] += _A1 * int_Yp_re[j+i];
int_Yp_im[j] += _A1 * int_Yp_im[j+i];
int_rDYu_re[j] += _A1 * int_rDYu_re[j+i];
int_rDYu_im[j] += _A1 * int_rDYu_im[j+i];
int_xXDYu_re[j] += _A1 * int_xXDYu_re[j+i];
int_xXDYu_im[j] += _A1 * int_xXDYu_im[j+i];
}
for (int i = 6; i < 18; i++) {
int_Yp_re[j] += _A2 * int_Yp_re[j+i];
int_Yp_im[j] += _A2 * int_Yp_im[j+i];
int_rDYu_re[j] += _A2 * int_rDYu_re[j+i];
int_rDYu_im[j] += _A2 * int_rDYu_im[j+i];
int_xXDYu_re[j] += _A2 * int_xXDYu_re[j+i];
int_xXDYu_im[j] += _A2 * int_xXDYu_im[j+i];
}
for (int i = 18; i < 26; i++) {
int_Yp_re[j] += _A3 * int_Yp_re[j+i];
int_Yp_im[j] += _A3 * int_Yp_im[j+i];
int_rDYu_re[j] += _A3 * int_rDYu_re[j+i];
int_rDYu_im[j] += _A3 * int_rDYu_im[j+i];
int_xXDYu_re[j] += _A3 * int_xXDYu_re[j+i];
int_xXDYu_im[j] += _A3 * int_xXDYu_im[j+i];
}
} // if (node == 0)
}
}
__global__ void compute_lambs_coeffs(part_struct *parts, real relax,
real mu, real nu, int ncoeffs_max, int nparts,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im)
{
int coeff = threadIdx.x;
int part = blockIdx.x;
// precalculate constants
real ars = parts[part].r / parts[part].rs;
real rsa = parts[part].rs / parts[part].r;
real r2 = parts[part].r * parts[part].r;
real inu = 1./nu;
real imunu = inu / mu;
if (coeff < parts[part].ncoeff && part < nparts) {
int j = part * NNODES * ncoeffs_max + coeff * NNODES + 0;
int n = _nn[coeff];
if (n == 0) {
parts[part].pnm_re[coeff] = (1. - relax) * parts[part].pnm_re0[coeff] +
relax * r2 * imunu * int_Yp_re[j];
parts[part].pnm_im[coeff] = (1. - relax) * parts[part].pnm_im0[coeff] +
relax * r2 * imunu * int_Yp_im[j];
parts[part].phinm_re[coeff] = 0.;
parts[part].phinm_im[coeff] = 0.;
parts[part].chinm_re[coeff] = 0.;
parts[part].chinm_im[coeff] = 0.;
} else { // n != 0
// Precalculate
real pow_ars_np1 = pow(ars, n + 1.); // ars^(n+1)
real pow_ars_2np1 = pow_ars_np1 * pow_ars_np1 * rsa; // ars^(2n+1)
real pow_rsa_nm1 = pow(rsa, n - 1.); // rsa^(n-1)
real pow_rsa_n = pow_rsa_nm1 * rsa; // rsa^n
real pow_rsa_np1 = pow_rsa_n * rsa; // rsa^(n+1)
real i_np1 = 1./(n + 1.);
real i_2np3 = 1./(2.*n + 3.);
// calculate p_nm and phi_nm
real A = (1. - 0.5*n*(2.*n - 1.) * i_np1 * pow_ars_2np1) * pow_rsa_n;
real B = n*(2.*n - 1.)*(2.*n + 1.) * i_np1*pow_ars_np1;
real C = 0.25*n*(2.*(n + 3.)*i_2np3
+ (n - 2. - n*(2.*n + 1.)*i_2np3*ars*ars)*pow_ars_2np1)*pow_rsa_np1;
real D = n*(n + 1. + 0.5*((n - 2.)*(2.*n + 1.)*rsa*rsa
- n*(2.*n - 1.))*pow_ars_2np1)*pow_rsa_nm1;
real idet = 1./ (A*D + B*C);
parts[part].pnm_re[coeff] = (r2*imunu*int_Yp_re[j]*D +
parts[part].r*inu*int_rDYu_re[j]*B) * idet;
parts[part].pnm_im[coeff] = (r2*imunu*int_Yp_im[j]*D +
parts[part].r*inu*int_rDYu_im[j]*B) * idet;
parts[part].phinm_re[coeff] = (parts[part].r*inu*int_rDYu_re[j]*A -
r2*imunu*int_Yp_re[j]*C) * idet;
parts[part].phinm_im[coeff] = (parts[part].r*inu*int_rDYu_im[j]*A -
r2*imunu*int_Yp_im[j]*C) * idet;
// calculate chi_nm
real E = n*(n + 1.)*(pow_ars_2np1 - 1.)*pow_rsa_n;
real iE = 1./ E;
parts[part].chinm_re[coeff] = parts[part].r*inu*int_xXDYu_re[j] * iE;
parts[part].chinm_im[coeff] = parts[part].r*inu*int_xXDYu_im[j] * iE;
// apply underrelaxation
parts[part].pnm_re[coeff] = parts[part].pnm_re0[coeff]*(1. - relax)
+ relax*parts[part].pnm_re[coeff];
parts[part].pnm_im[coeff] = parts[part].pnm_im0[coeff]*(1. - relax)
+ relax*parts[part].pnm_im[coeff];
parts[part].phinm_re[coeff] = parts[part].phinm_re0[coeff]*(1. - relax)
+ relax*parts[part].phinm_re[coeff];
parts[part].phinm_im[coeff] = parts[part].phinm_im0[coeff]*(1. - relax)
+ relax*parts[part].phinm_im[coeff];
parts[part].chinm_re[coeff] = parts[part].chinm_re0[coeff]*(1. - relax)
+ relax*parts[part].chinm_re[coeff];
parts[part].chinm_im[coeff] = parts[part].chinm_im0[coeff]*(1. - relax)
+ relax*parts[part].chinm_im[coeff];
}
}
}
__global__ void calc_forces(part_struct *parts, int nparts,
real gradPx, real gradPy, real gradPz, real rho_f, real mu, real nu,
real s_beta, real s_ref, g_struct g)
{
int pp = threadIdx.x + blockIdx.x*blockDim.x; // particle number
real irho_f = 1./ rho_f;
if(pp < nparts) {
real vol = 4./3. * PI * parts[pp].r*parts[pp].r*parts[pp].r;
real N10 = sqrt(3./4./PI);
real N11 = sqrt(3./8./PI);
real bousiq_x = -s_beta*(parts[pp].s - s_ref)*g.x;
real bousiq_y = -s_beta*(parts[pp].s - s_ref)*g.y;
real bousiq_z = -s_beta*(parts[pp].s - s_ref)*g.z;
parts[pp].Fx = rho_f * vol * (parts[pp].udot + gradPx * irho_f - bousiq_x)
- PI * mu * nu * 2.*N11 * (parts[pp].pnm_re[2]
+ 6.*parts[pp].phinm_re[2]);
parts[pp].Fy = rho_f * vol * (parts[pp].vdot + gradPy * irho_f - bousiq_y)
+ PI * mu * nu * 2.*N11 * (parts[pp].pnm_im[2]
+ 6.*parts[pp].phinm_im[2]);
parts[pp].Fz = rho_f * vol * (parts[pp].wdot + gradPz * irho_f - bousiq_z)
+ PI * mu * nu * N10 * (parts[pp].pnm_re[1]
+ 6.*parts[pp].phinm_re[1]);
parts[pp].Lx = rho_f * vol * parts[pp].r*parts[pp].r * parts[pp].oxdot
- 8. * PI * mu * nu * 2.*N11 * parts[pp].r * parts[pp].chinm_re[2];
parts[pp].Ly = rho_f * vol * parts[pp].r*parts[pp].r * parts[pp].oydot
+ 8. * PI * mu * nu * 2.*N11 * parts[pp].r * parts[pp].chinm_im[2];
parts[pp].Lz = rho_f * vol * parts[pp].r*parts[pp].r * parts[pp].ozdot
+ 8. * PI * mu * nu * N10 * parts[pp].r * parts[pp].chinm_re[1];
}
}
__global__ void pack_sums_e(real *sum_send_e, int *offset, int *bin_start,
int *bin_count, int *part_ind, int ncoeffs_max,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im)
{
int tj = blockIdx.x * blockDim.x + threadIdx.x; // bin index
int tk = blockIdx.y * blockDim.y + threadIdx.y;
int cbin; // bin index
int c2b; // bin index in 2-d plane
int pp; // particle index
int dest; // destination for particle partial sums in packed array
int sp0, sp1; // scalar product strides for (Ylm, p)
int sp2, sp3; // scalar product strides for (rDYlm, u)
int sp4, sp5; // scalar product strides for (x X DYlm, u)
int psum_ind; // index of partial sum in each scalar product
// Custom GFX indices
int s1b = _bins.Gcc.jnb;
int s2b = s1b * _bins.Gcc.knb;
if (tj < _bins.Gcc.jnb && tk < _bins.Gcc.knb) {
for (int ti = _bins.Gcc._ie; ti <= _bins.Gcc._ieb; ti++) {
cbin = GFX_LOC(ti, tj, tk, s1b, s2b);
c2b = tj + tk * s1b + (ti - _bins.Gcc._ie) * s2b; // two planes
// Loop through each bin's particles
// Each bin is offset by offset[cbin] (from excl. prefix scan)
// Each particle is then offset from that
for (int i = 0; i < bin_count[cbin]; i++) {
pp = part_ind[bin_start[cbin] + i];
dest = offset[c2b] + i;
for (int coeff = 0; coeff < ncoeffs_max; coeff++) {
// Packing: part varies slowest, coeff varies quickest, sp middle
sp0 = coeff + ncoeffs_max*SP_YP_RE + ncoeffs_max*NSP*dest; // Yp_re
sp1 = coeff + ncoeffs_max*SP_YP_IM + ncoeffs_max*NSP*dest; // Yp_im
sp2 = coeff + ncoeffs_max*SP_RDYU_RE + ncoeffs_max*NSP*dest; // rDYu_re
sp3 = coeff + ncoeffs_max*SP_RDYU_IM + ncoeffs_max*NSP*dest; // rDYu_im
sp4 = coeff + ncoeffs_max*SP_XXDYU_RE + ncoeffs_max*NSP*dest; // xXDYu_re
sp5 = coeff + ncoeffs_max*SP_XXDYU_IM + ncoeffs_max*NSP*dest; // xXDYu_im
// Partial sums: part varies slowest, node quickest, coeff middle
// Partial sums are stored in index for node = 0
psum_ind = pp*NNODES*ncoeffs_max + coeff*NNODES;
sum_send_e[sp0] = int_Yp_re[psum_ind];
sum_send_e[sp1] = int_Yp_im[psum_ind];
sum_send_e[sp2] = int_rDYu_re[psum_ind];
sum_send_e[sp3] = int_rDYu_im[psum_ind];
sum_send_e[sp4] = int_xXDYu_re[psum_ind];
sum_send_e[sp5] = int_xXDYu_im[psum_ind];
}
}
} // loop over ti planes
}
}
__global__ void pack_sums_w(real *sum_send_w, int *offset, int *bin_start,
int *bin_count, int *part_ind, int ncoeffs_max,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im)
{
int tj = blockIdx.x * blockDim.x + threadIdx.x; // bin index
int tk = blockIdx.y * blockDim.y + threadIdx.y;
int cbin; // bin index
int c2b; // bin index in 2-d plane
int pp; // particle index
int dest; // destination for particle partial sums in packed array
int sp0, sp1; // scalar product strides for (Ylm, p)
int sp2, sp3; // scalar product strides for (rDYlm, u)
int sp4, sp5; // scalar product strides for (x X DYlm, u)
int psum_ind; // index of partial sum in each scalar product
// Custom GFX indices
int s1b = _bins.Gcc.jnb;
int s2b = s1b * _bins.Gcc.knb;
if (tj < _bins.Gcc.jnb && tk < _bins.Gcc.knb) {
for (int ti = _bins.Gcc._isb; ti <= _bins.Gcc._is; ti++) {
cbin = GFX_LOC(ti, tj, tk, s1b, s2b);
c2b = tj + tk * s1b + (ti - _bins.Gcc._isb) * s2b; // two planes
// Loop through each bin's particles
// Each bin is offset by offset[cbin] (from excl. prefix scan)
// Each particle is then offset from that
for (int i = 0; i < bin_count[cbin]; i++) {
pp = part_ind[bin_start[cbin] + i];
dest = offset[c2b] + i;
for (int coeff = 0; coeff < ncoeffs_max; coeff++) {
// Packing: part varies slowest, coeff varies quickest, sp middle
sp0 = coeff + ncoeffs_max*SP_YP_RE + ncoeffs_max*NSP*dest; // Yp_re
sp1 = coeff + ncoeffs_max*SP_YP_IM + ncoeffs_max*NSP*dest; // Yp_im
sp2 = coeff + ncoeffs_max*SP_RDYU_RE + ncoeffs_max*NSP*dest; // rDYu_re
sp3 = coeff + ncoeffs_max*SP_RDYU_IM + ncoeffs_max*NSP*dest; // rDYu_im
sp4 = coeff + ncoeffs_max*SP_XXDYU_RE + ncoeffs_max*NSP*dest; // xXDYu_re
sp5 = coeff + ncoeffs_max*SP_XXDYU_IM + ncoeffs_max*NSP*dest; // xXDYu_im
// Partial sums: part varies slowest, node quickest, coeff middle
// Partial sums are stored in index for node = 0
psum_ind = pp*NNODES*ncoeffs_max + coeff*NNODES;
sum_send_w[sp0] = int_Yp_re[psum_ind];
sum_send_w[sp1] = int_Yp_im[psum_ind];
sum_send_w[sp2] = int_rDYu_re[psum_ind];
sum_send_w[sp3] = int_rDYu_im[psum_ind];
sum_send_w[sp4] = int_xXDYu_re[psum_ind];
sum_send_w[sp5] = int_xXDYu_im[psum_ind];
}
}
} // loop over ti
}
}
__global__ void pack_sums_n(real *sum_send_n, int *offset, int *bin_start,
int *bin_count, int *part_ind, int ncoeffs_max,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im)
{
int tk = blockIdx.x * blockDim.x + threadIdx.x; // bin index
int ti = blockIdx.y * blockDim.y + threadIdx.y;
int cbin; // bin index
int c2b; // bin index in 2-d plane
int pp; // particle index
int dest; // destination for particle partial sums in packed array
int sp0, sp1; // scalar product strides for (Ylm, p)
int sp2, sp3; // scalar product strides for (rDYlm, u)
int sp4, sp5; // scalar product strides for (x X DYlm, u)
int psum_ind; // index of partial sum in each scalar product
// Custom GFY indices
int s1b = _bins.Gcc.knb;
int s2b = s1b * _bins.Gcc.inb;
if (tk < _bins.Gcc.knb && ti < _bins.Gcc.inb) {
for (int tj = _bins.Gcc._je; tj <= _bins.Gcc._jeb; tj++) {
cbin = GFY_LOC(ti, tj, tk, s1b, s2b);
c2b = tk + ti * s1b + (tj - _bins.Gcc._je) * s2b; // two planes
// Loop through each bin's particles
// Each bin is offset by offset[cbin] (from excl. prefix scan)
// Each particle is then offset from that
for (int i = 0; i < bin_count[cbin]; i++) {
pp = part_ind[bin_start[cbin] + i];
dest = offset[c2b] + i;
for (int coeff = 0; coeff < ncoeffs_max; coeff++) {
// Packing: part varies slowest, coeff varies quickest, sp middle
sp0 = coeff + ncoeffs_max*SP_YP_RE + ncoeffs_max*NSP*dest; // Yp_re
sp1 = coeff + ncoeffs_max*SP_YP_IM + ncoeffs_max*NSP*dest; // Yp_im
sp2 = coeff + ncoeffs_max*SP_RDYU_RE + ncoeffs_max*NSP*dest; // rDYu_re
sp3 = coeff + ncoeffs_max*SP_RDYU_IM + ncoeffs_max*NSP*dest; // rDYu_im
sp4 = coeff + ncoeffs_max*SP_XXDYU_RE + ncoeffs_max*NSP*dest; // xXDYu_re
sp5 = coeff + ncoeffs_max*SP_XXDYU_IM + ncoeffs_max*NSP*dest; // xXDYu_im
// Partial sums: part varies slowest, node quickest, coeff middle
// Partial sums are stored in index for node = 0
psum_ind = pp*NNODES*ncoeffs_max + coeff*NNODES;
sum_send_n[sp0] = int_Yp_re[psum_ind];
//printf("N%d >> packing int_Yp_re[part %d, coeff %d (%d)] = %lf to sum_send_n[%d]\n",
// _dom.rank, pp, coeff, psum_ind, int_Yp_re[psum_ind], sp0);
sum_send_n[sp1] = int_Yp_im[psum_ind];
sum_send_n[sp2] = int_rDYu_re[psum_ind];
sum_send_n[sp3] = int_rDYu_im[psum_ind];
sum_send_n[sp4] = int_xXDYu_re[psum_ind];
sum_send_n[sp5] = int_xXDYu_im[psum_ind];
}
}
} // loop over tj planes
}
}
__global__ void pack_sums_s(real *sum_send_s, int *offset, int *bin_start,
int *bin_count, int *part_ind, int ncoeffs_max,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im)
{
int tk = blockIdx.x * blockDim.x + threadIdx.x; // bin index
int ti = blockIdx.y * blockDim.y + threadIdx.y;
int cbin; // bin index
int c2b; // bin index in 2-d plane
int pp; // particle index
int dest; // destination for particle partial sums in packed array
int sp0, sp1; // scalar product strides for (Ylm, p)
int sp2, sp3; // scalar product strides for (rDYlm, u)
int sp4, sp5; // scalar product strides for (x X DYlm, u)
int psum_ind; // index of partial sum in each scalar product
// Custom GFY indices
int s1b = _bins.Gcc.knb;
int s2b = s1b * _bins.Gcc.inb;
if (tk < _bins.Gcc.knb && ti < _bins.Gcc.inb) {
for (int tj = _bins.Gcc._jsb; tj <= _bins.Gcc._js; tj++) {
cbin = GFY_LOC(ti, tj, tk, s1b, s2b);
c2b = tk + ti * s1b + (tj - _bins.Gcc._jsb) * s2b; // two planes
// Loop through each bin's particles
// Each bin is offset by offset[cbin] (from excl. prefix scan)
// Each particle is then offset from that
for (int i = 0; i < bin_count[cbin]; i++) {
pp = part_ind[bin_start[cbin] + i];
dest = offset[c2b] + i;
for (int coeff = 0; coeff < ncoeffs_max; coeff++) {
// Packing: part varies slowest, coeff varies quickest, sp middle
sp0 = coeff + ncoeffs_max*SP_YP_RE + ncoeffs_max*NSP*dest; // Yp_re
sp1 = coeff + ncoeffs_max*SP_YP_IM + ncoeffs_max*NSP*dest; // Yp_im
sp2 = coeff + ncoeffs_max*SP_RDYU_RE + ncoeffs_max*NSP*dest; // rDYu_re
sp3 = coeff + ncoeffs_max*SP_RDYU_IM + ncoeffs_max*NSP*dest; // rDYu_im
sp4 = coeff + ncoeffs_max*SP_XXDYU_RE + ncoeffs_max*NSP*dest; // xXDYu_re
sp5 = coeff + ncoeffs_max*SP_XXDYU_IM + ncoeffs_max*NSP*dest; // xXDYu_im
// Partial sums: part varies slowest, node quickest, coeff middle
// Partial sums are stored in index for node = 0
psum_ind = pp*NNODES*ncoeffs_max + coeff*NNODES;
sum_send_s[sp0] = int_Yp_re[psum_ind];
sum_send_s[sp1] = int_Yp_im[psum_ind];
sum_send_s[sp2] = int_rDYu_re[psum_ind];
sum_send_s[sp3] = int_rDYu_im[psum_ind];
sum_send_s[sp4] = int_xXDYu_re[psum_ind];
sum_send_s[sp5] = int_xXDYu_im[psum_ind];
}
}
} // loop over tj planes
}
}
__global__ void pack_sums_t(real *sum_send_t, int *offset, int *bin_start,
int *bin_count, int *part_ind, int ncoeffs_max,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im)
{
int ti = blockIdx.x * blockDim.x + threadIdx.x; // bin index
int tj = blockIdx.y * blockDim.y + threadIdx.y;
int cbin; // bin index
int c2b; // bin index in 2-d plane
int pp; // particle index
int dest; // destination for particle partial sums in packed array
int sp0, sp1; // scalar product strides for (Ylm, p)
int sp2, sp3; // scalar product strides for (rDYlm, u)
int sp4, sp5; // scalar product strides for (x X DYlm, u)
int psum_ind; // index of partial sum in each scalar product
// Custom GFZ indices
int s1b = _bins.Gcc.inb;
int s2b = s1b * _bins.Gcc.jnb;
if (ti < _bins.Gcc.inb && tj < _bins.Gcc.jnb) {
for (int tk = _bins.Gcc._ke; tk <= _bins.Gcc._keb; tk++) {
cbin = GFZ_LOC(ti, tj, tk, s1b, s2b);
c2b = ti + tj * s1b + (tk - _bins.Gcc._ke) * s2b;
// Loop through each bin's particles
// Each bin is offset by offset[cbin] (from excl. prefix scan)
// Each particle is then offset from that
for (int i = 0; i < bin_count[cbin]; i++) {
pp = part_ind[bin_start[cbin] + i];
dest = offset[c2b] + i;
for (int coeff = 0; coeff < ncoeffs_max; coeff++) {
// Packing: part varies slowest, coeff varies quickest, sp middle
sp0 = coeff + ncoeffs_max*SP_YP_RE + ncoeffs_max*NSP*dest; // Yp_re
sp1 = coeff + ncoeffs_max*SP_YP_IM + ncoeffs_max*NSP*dest; // Yp_im
sp2 = coeff + ncoeffs_max*SP_RDYU_RE + ncoeffs_max*NSP*dest; // rDYu_re
sp3 = coeff + ncoeffs_max*SP_RDYU_IM + ncoeffs_max*NSP*dest; // rDYu_im
sp4 = coeff + ncoeffs_max*SP_XXDYU_RE + ncoeffs_max*NSP*dest; // xXDYu_re
sp5 = coeff + ncoeffs_max*SP_XXDYU_IM + ncoeffs_max*NSP*dest; // xXDYu_im
// Partial sums: part varies slowest, node quickest, coeff middle
// Partial sums are stored in index for node = 0
psum_ind = pp*NNODES*ncoeffs_max + coeff*NNODES;
sum_send_t[sp0] = int_Yp_re[psum_ind];
sum_send_t[sp1] = int_Yp_im[psum_ind];
sum_send_t[sp2] = int_rDYu_re[psum_ind];
sum_send_t[sp3] = int_rDYu_im[psum_ind];
sum_send_t[sp4] = int_xXDYu_re[psum_ind];
sum_send_t[sp5] = int_xXDYu_im[psum_ind];
}
}
} // loop over tk planes
}
}
__global__ void pack_sums_b(real *sum_send_b, int *offset, int *bin_start,
int *bin_count, int *part_ind, int ncoeffs_max,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im)
{
int ti = blockIdx.x * blockDim.x + threadIdx.x; // bin index
int tj = blockIdx.y * blockDim.y + threadIdx.y;
int cbin; // bin index
int c2b; // bin index in 2-d plane
int pp; // particle index
int dest; // destination for particle partial sums in packed array
int sp0, sp1; // scalar product strides for (Ylm, p)
int sp2, sp3; // scalar product strides for (rDYlm, u)
int sp4, sp5; // scalar product strides for (x X DYlm, u)
int psum_ind; // index of partial sum in each scalar product
// Custom GFZ indices
int s1b = _bins.Gcc.inb;
int s2b = s1b * _bins.Gcc.jnb;
if (ti < _bins.Gcc.inb && tj < _bins.Gcc.jnb) {
for (int tk = _bins.Gcc._ksb; tk <= _bins.Gcc._ks; tk++) {
cbin = GFZ_LOC(ti, tj, tk, s1b, s2b);
c2b = ti + tj * s1b + (tk - _bins.Gcc._ksb) * s2b; // two planes
// Loop through each bin's particles
// Each bin is offset by offset[cbin] (from excl. prefix scan)
// Each particle is then offset from that
for (int i = 0; i < bin_count[cbin]; i++) {
pp = part_ind[bin_start[cbin] + i];
dest = offset[c2b] + i;
for (int coeff = 0; coeff < ncoeffs_max; coeff++) {
// Packing: part varies slowest, coeff varies quickest, sp middle
sp0 = coeff + ncoeffs_max*SP_YP_RE + ncoeffs_max*NSP*dest; // Yp_re
sp1 = coeff + ncoeffs_max*SP_YP_IM + ncoeffs_max*NSP*dest; // Yp_im
sp2 = coeff + ncoeffs_max*SP_RDYU_RE + ncoeffs_max*NSP*dest; // rDYu_re
sp3 = coeff + ncoeffs_max*SP_RDYU_IM + ncoeffs_max*NSP*dest; // rDYu_im
sp4 = coeff + ncoeffs_max*SP_XXDYU_RE + ncoeffs_max*NSP*dest; // xXDYu_re
sp5 = coeff + ncoeffs_max*SP_XXDYU_IM + ncoeffs_max*NSP*dest; // xXDYu_im
// Partial sums: part varies slowest, node quickest, coeff middle
// Partial sums are stored in index for node = 0
psum_ind = pp*NNODES*ncoeffs_max + coeff*NNODES;
sum_send_b[sp0] = int_Yp_re[psum_ind];
sum_send_b[sp1] = int_Yp_im[psum_ind];
sum_send_b[sp2] = int_rDYu_re[psum_ind];
sum_send_b[sp3] = int_rDYu_im[psum_ind];
sum_send_b[sp4] = int_xXDYu_re[psum_ind];
sum_send_b[sp5] = int_xXDYu_im[psum_ind];
}
}
} // loop over tk planes
}
}
__global__ void unpack_sums_e(real *sum_recv_e, int *offset, int *bin_start,
int *bin_count, int *part_ind, int ncoeffs_max,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im)
{
int tj = blockIdx.x * blockDim.x + threadIdx.x; // bin index
int tk = blockIdx.y * blockDim.y + threadIdx.y;
int cbin; // bin index
int c2b; // bin index in 2-d plane
int pp; // particle index
int dest; // destination for particle partial sums in packed array
int sp0, sp1; // scalar product strides for (Ylm, p)
int sp2, sp3; // scalar product strides for (rDYlm, u)
int sp4, sp5; // scalar product strides for (x X DYlm, u)
int psum_ind; // index of partial sum in each scalar product
// Custom GFX indices
int s1b = _bins.Gcc.jnb;
int s2b = s1b * _bins.Gcc.knb;
if (tj < _bins.Gcc.jnb && tk < _bins.Gcc.knb) {
for (int ti = _bins.Gcc._ie; ti <= _bins.Gcc._ieb; ti++) {
cbin = GFX_LOC(ti, tj, tk, s1b, s2b);
c2b = tj + tk * s1b + (ti - _bins.Gcc._ie) * s2b; // two planes
// Loop through each bin's particles
// Each bin is offset by offset[cbin] (from excl. prefix scan)
// Each particle is then offset from that
for (int i = 0; i < bin_count[cbin]; i++) {
pp = part_ind[bin_start[cbin] + i];
dest = offset[c2b] + i;
for (int coeff = 0; coeff < ncoeffs_max; coeff++) {
// Packing: part varies slowest, coeff varies quickest, sp middle
sp0 = coeff + ncoeffs_max*SP_YP_RE + ncoeffs_max*NSP*dest; // Yp_re
sp1 = coeff + ncoeffs_max*SP_YP_IM + ncoeffs_max*NSP*dest; // Yp_im
sp2 = coeff + ncoeffs_max*SP_RDYU_RE + ncoeffs_max*NSP*dest; // rDYu_re
sp3 = coeff + ncoeffs_max*SP_RDYU_IM + ncoeffs_max*NSP*dest; // rDYu_im
sp4 = coeff + ncoeffs_max*SP_XXDYU_RE + ncoeffs_max*NSP*dest; // xXDYu_re
sp5 = coeff + ncoeffs_max*SP_XXDYU_IM + ncoeffs_max*NSP*dest; // xXDYu_im
// Partial sums: part varies slowest, node quickest, coeff middle
// Partial sums are stored in index for node = 0
psum_ind = pp*NNODES*ncoeffs_max + coeff*NNODES;
int_Yp_re[psum_ind] += sum_recv_e[sp0];
int_Yp_im[psum_ind] += sum_recv_e[sp1];
int_rDYu_re[psum_ind] += sum_recv_e[sp2];
int_rDYu_im[psum_ind] += sum_recv_e[sp3];
int_xXDYu_re[psum_ind] += sum_recv_e[sp4];
int_xXDYu_im[psum_ind] += sum_recv_e[sp5];
}
}
} // loop over ti
}
}
__global__ void unpack_sums_w(real *sum_recv_w, int *offset, int *bin_start,
int *bin_count, int *part_ind, int ncoeffs_max,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im)
{
int tj = blockIdx.x * blockDim.x + threadIdx.x; // bin index
int tk = blockIdx.y * blockDim.y + threadIdx.y;
int cbin; // bin index
int c2b; // bin index in 2-d plane
int pp; // particle index
int dest; // destination for particle partial sums in packed array
int sp0, sp1; // scalar product strides for (Ylm, p)
int sp2, sp3; // scalar product strides for (rDYlm, u)
int sp4, sp5; // scalar product strides for (x X DYlm, u)
int psum_ind; // index of partial sum in each scalar product
// Custom GFX indices
int s1b = _bins.Gcc.jnb;
int s2b = s1b * _bins.Gcc.knb;
if (tj < _bins.Gcc.jnb && tk < _bins.Gcc.knb) {
for (int ti = _bins.Gcc._isb; ti <= _bins.Gcc._is; ti++) {
cbin = GFX_LOC(ti, tj, tk, s1b, s2b);
c2b = tj + tk * s1b + (ti - _bins.Gcc._isb) * s2b; // two planes
// Loop through each bin's particles
// Each bin is offset by offset[cbin] (from excl. prefix scan)
// Each particle is then offset from that
for (int i = 0; i < bin_count[cbin]; i++) {
pp = part_ind[bin_start[cbin] + i];
dest = offset[c2b] + i;
for (int coeff = 0; coeff < ncoeffs_max; coeff++) {
// Packing: part varies slowest, coeff varies quickest, sp middle
sp0 = coeff + ncoeffs_max*SP_YP_RE + ncoeffs_max*NSP*dest; // Yp_re
sp1 = coeff + ncoeffs_max*SP_YP_IM + ncoeffs_max*NSP*dest; // Yp_im
sp2 = coeff + ncoeffs_max*SP_RDYU_RE + ncoeffs_max*NSP*dest; // rDYu_re
sp3 = coeff + ncoeffs_max*SP_RDYU_IM + ncoeffs_max*NSP*dest; // rDYu_im
sp4 = coeff + ncoeffs_max*SP_XXDYU_RE + ncoeffs_max*NSP*dest; // xXDYu_re
sp5 = coeff + ncoeffs_max*SP_XXDYU_IM + ncoeffs_max*NSP*dest; // xXDYu_im
// Partial sums: part varies slowest, node quickest, coeff middle
// Partial sums are stored in index for node = 0
psum_ind = pp*NNODES*ncoeffs_max + coeff*NNODES;
int_Yp_re[psum_ind] += sum_recv_w[sp0];
int_Yp_im[psum_ind] += sum_recv_w[sp1];
int_rDYu_re[psum_ind] += sum_recv_w[sp2];
int_rDYu_im[psum_ind] += sum_recv_w[sp3];
int_xXDYu_re[psum_ind] += sum_recv_w[sp4];
int_xXDYu_im[psum_ind] += sum_recv_w[sp5];
}
}
} // loop over ti
}
}
__global__ void unpack_sums_n(real *sum_recv_n, int *offset, int *bin_start,
int *bin_count, int *part_ind, int ncoeffs_max,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im)
{
int tk = blockIdx.x * blockDim.x + threadIdx.x; // bin index
int ti = blockIdx.y * blockDim.y + threadIdx.y;
int cbin; // bin index
int c2b; // bin index in 2-d plane
int pp; // particle index
int dest; // destination for particle partial sums in packed array
int sp0, sp1; // scalar product strides for (Ylm, p)
int sp2, sp3; // scalar product strides for (rDYlm, u)
int sp4, sp5; // scalar product strides for (x X DYlm, u)
int psum_ind; // index of partial sum in each scalar product
// Custom GFY indices
int s1b = _bins.Gcc.knb;
int s2b = s1b * _bins.Gcc.inb;
if (tk < _bins.Gcc.knb && ti < _bins.Gcc.inb) {
for (int tj = _bins.Gcc._je; tj <= _bins.Gcc._jeb; tj++) {
cbin = GFY_LOC(ti, tj, tk, s1b, s2b);
c2b = tk + ti * s1b + (tj - _bins.Gcc._je) * s2b; // two planes
// Loop through each bin's particles
// Each bin is offset by offset[cbin] (from excl. prefix scan)
// Each particle is then offset from that
for (int i = 0; i < bin_count[cbin]; i++) {
pp = part_ind[bin_start[cbin] + i];
dest = offset[c2b] + i;
for (int coeff = 0; coeff < ncoeffs_max; coeff++) {
// Packing: part varies slowest, coeff varies quickest, sp middle
sp0 = coeff + ncoeffs_max*SP_YP_RE + ncoeffs_max*NSP*dest; // Yp_re
sp1 = coeff + ncoeffs_max*SP_YP_IM + ncoeffs_max*NSP*dest; // Yp_im
sp2 = coeff + ncoeffs_max*SP_RDYU_RE + ncoeffs_max*NSP*dest; // rDYu_re
sp3 = coeff + ncoeffs_max*SP_RDYU_IM + ncoeffs_max*NSP*dest; // rDYu_im
sp4 = coeff + ncoeffs_max*SP_XXDYU_RE + ncoeffs_max*NSP*dest; // xXDYu_re
sp5 = coeff + ncoeffs_max*SP_XXDYU_IM + ncoeffs_max*NSP*dest; // xXDYu_im
// Partial sums: part varies slowest, node quickest, coeff middle
// Partial sums are stored in index for node = 0
psum_ind = pp*NNODES*ncoeffs_max + coeff*NNODES;
int_Yp_re[psum_ind] += sum_recv_n[sp0];
int_Yp_im[psum_ind] += sum_recv_n[sp1];
int_rDYu_re[psum_ind] += sum_recv_n[sp2];
int_rDYu_im[psum_ind] += sum_recv_n[sp3];
int_xXDYu_re[psum_ind] += sum_recv_n[sp4];
int_xXDYu_im[psum_ind] += sum_recv_n[sp5];
}
}
} // loop over tj
}
}
__global__ void unpack_sums_s(real *sum_recv_s, int *offset, int *bin_start,
int *bin_count, int *part_ind, int ncoeffs_max,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im)
{
int tk = blockIdx.x * blockDim.x + threadIdx.x; // bin index
int ti = blockIdx.y * blockDim.y + threadIdx.y;
int cbin; // bin index
int c2b; // bin index in 2-d plane
int pp; // particle index
int dest; // destination for particle partial sums in packed array
int sp0, sp1; // scalar product strides for (Ylm, p)
int sp2, sp3; // scalar product strides for (rDYlm, u)
int sp4, sp5; // scalar product strides for (x X DYlm, u)
int psum_ind; // index of partial sum in each scalar product
// Custom GFY indices
int s1b = _bins.Gcc.knb;
int s2b = s1b * _bins.Gcc.inb;
if (tk < _bins.Gcc.knb && ti < _bins.Gcc.inb) {
for (int tj = _bins.Gcc._jsb; tj <= _bins.Gcc._js; tj++) {
cbin = GFY_LOC(ti, tj, tk, s1b, s2b);
c2b = tk + ti * s1b + (tj - _bins.Gcc._jsb) * s2b; // two planes
// Loop through each bin's particles
// Each bin is offset by offset[cbin] (from excl. prefix scan)
// Each particle is then offset from that
for (int i = 0; i < bin_count[cbin]; i++) {
pp = part_ind[bin_start[cbin] + i];
dest = offset[c2b] + i;
for (int coeff = 0; coeff < ncoeffs_max; coeff++) {
// Packing: part varies slowest, coeff varies quickest, sp middle
sp0 = coeff + ncoeffs_max*SP_YP_RE + ncoeffs_max*NSP*dest; // Yp_re
sp1 = coeff + ncoeffs_max*SP_YP_IM + ncoeffs_max*NSP*dest; // Yp_im
sp2 = coeff + ncoeffs_max*SP_RDYU_RE + ncoeffs_max*NSP*dest; // rDYu_re
sp3 = coeff + ncoeffs_max*SP_RDYU_IM + ncoeffs_max*NSP*dest; // rDYu_im
sp4 = coeff + ncoeffs_max*SP_XXDYU_RE + ncoeffs_max*NSP*dest; // xXDYu_re
sp5 = coeff + ncoeffs_max*SP_XXDYU_IM + ncoeffs_max*NSP*dest; // xXDYu_im
// Partial sums: part varies slowest, node quickest, coeff middle
// Partial sums are stored in index for node = 0
psum_ind = pp*NNODES*ncoeffs_max + coeff*NNODES;
int_Yp_re[psum_ind] += sum_recv_s[sp0];
//printf("N%d >> unpacking int_Yp_re[part %d, coeff %d (%d)] = %lf from sum_send_s[%d]\n",
// _dom.rank, pp, coeff, psum_ind, int_Yp_re[psum_ind], sp0);
int_Yp_im[psum_ind] += sum_recv_s[sp1];
int_rDYu_re[psum_ind] += sum_recv_s[sp2];
int_rDYu_im[psum_ind] += sum_recv_s[sp3];
int_xXDYu_re[psum_ind] += sum_recv_s[sp4];
int_xXDYu_im[psum_ind] += sum_recv_s[sp5];
}
}
} // loop over tj
}
}
__global__ void unpack_sums_t(real *sum_recv_t, int *offset, int *bin_start,
int *bin_count, int *part_ind, int ncoeffs_max,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im)
{
int ti = blockIdx.x * blockDim.x + threadIdx.x; // bin index
int tj = blockIdx.y * blockDim.y + threadIdx.y;
int cbin; // bin index
int c2b; // bin index in 2-d plane
int pp; // particle index
int dest; // destination for particle partial sums in packed array
int sp0, sp1; // scalar product strides for (Ylm, p)
int sp2, sp3; // scalar product strides for (rDYlm, u)
int sp4, sp5; // scalar product strides for (x X DYlm, u)
int psum_ind; // index of partial sum in each scalar product
// Custom GFZ indices
int s1b = _bins.Gcc.inb;
int s2b = s1b * _bins.Gcc.jnb;
if (ti < _bins.Gcc.inb && tj < _bins.Gcc.jnb) {
for (int tk = _bins.Gcc._ke; tk <= _bins.Gcc._keb; tk++) {
cbin = GFZ_LOC(ti, tj, tk, s1b, s2b);
c2b = ti + tj * s1b + (tk - _bins.Gcc._ke) * s2b;
// Loop through each bin's particles
// Each bin is offset by offset[cbin] (from excl. prefix scan)
// Each particle is then offset from that
for (int i = 0; i < bin_count[cbin]; i++) {
pp = part_ind[bin_start[cbin] + i];
dest = offset[c2b] + i;
for (int coeff = 0; coeff < ncoeffs_max; coeff++) {
// Packing: part varies slowest, coeff varies quickest, sp middle
sp0 = coeff + ncoeffs_max*SP_YP_RE + ncoeffs_max*NSP*dest; // Yp_re
sp1 = coeff + ncoeffs_max*SP_YP_IM + ncoeffs_max*NSP*dest; // Yp_im
sp2 = coeff + ncoeffs_max*SP_RDYU_RE + ncoeffs_max*NSP*dest; // rDYu_re
sp3 = coeff + ncoeffs_max*SP_RDYU_IM + ncoeffs_max*NSP*dest; // rDYu_im
sp4 = coeff + ncoeffs_max*SP_XXDYU_RE + ncoeffs_max*NSP*dest; // xXDYu_re
sp5 = coeff + ncoeffs_max*SP_XXDYU_IM + ncoeffs_max*NSP*dest; // xXDYu_im
// Partial sums: part varies slowest, node quickest, coeff middle
// Partial sums are stored in index for node = 0
psum_ind = pp*NNODES*ncoeffs_max + coeff*NNODES;
int_Yp_re[psum_ind] += sum_recv_t[sp0];
int_Yp_im[psum_ind] += sum_recv_t[sp1];
int_rDYu_re[psum_ind] += sum_recv_t[sp2];
int_rDYu_im[psum_ind] += sum_recv_t[sp3];
int_xXDYu_re[psum_ind] += sum_recv_t[sp4];
int_xXDYu_im[psum_ind] += sum_recv_t[sp5];
}
}
} // loop over tk
}
}
__global__ void unpack_sums_b(real *sum_recv_b, int *offset, int *bin_start,
int *bin_count, int *part_ind, int ncoeffs_max,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im)
{
int ti = blockIdx.x * blockDim.x + threadIdx.x; // bin index
int tj = blockIdx.y * blockDim.y + threadIdx.y;
int cbin; // bin index
int c2b; // bin index in 2-d plane
int pp; // particle index
int dest; // destination for particle partial sums in packed array
int sp0, sp1; // scalar product strides for (Ylm, p)
int sp2, sp3; // scalar product strides for (rDYlm, u)
int sp4, sp5; // scalar product strides for (x X DYlm, u)
int psum_ind; // index of partial sum in each scalar product
// Custom GFZ indices
int s1b = _bins.Gcc.inb;
int s2b = s1b * _bins.Gcc.jnb;
if (ti < _bins.Gcc.inb && tj < _bins.Gcc.jnb) {
for (int tk = _bins.Gcc._ksb; tk <= _bins.Gcc._ks; tk++) {
cbin = GFZ_LOC(ti, tj, tk, s1b, s2b);
c2b = ti + tj * s1b + (tk - _bins.Gcc._ksb) * s2b; // two planes
// Loop through each bin's particles
// Each bin is offset by offset[cbin] (from excl. prefix scan)
// Each particle is then offset from that
for (int i = 0; i < bin_count[cbin]; i++) {
pp = part_ind[bin_start[cbin] + i];
dest = offset[c2b] + i;
for (int coeff = 0; coeff < ncoeffs_max; coeff++) {
// Packing: part varies slowest, coeff varies quickest, sp middle
sp0 = coeff + ncoeffs_max*SP_YP_RE + ncoeffs_max*NSP*dest; // Yp_re
sp1 = coeff + ncoeffs_max*SP_YP_IM + ncoeffs_max*NSP*dest; // Yp_im
sp2 = coeff + ncoeffs_max*SP_RDYU_RE + ncoeffs_max*NSP*dest; // rDYu_re
sp3 = coeff + ncoeffs_max*SP_RDYU_IM + ncoeffs_max*NSP*dest; // rDYu_im
sp4 = coeff + ncoeffs_max*SP_XXDYU_RE + ncoeffs_max*NSP*dest; // xXDYu_re
sp5 = coeff + ncoeffs_max*SP_XXDYU_IM + ncoeffs_max*NSP*dest; // xXDYu_im
// Partial sums: part varies slowest, node quickest, coeff middle
// Partial sums are stored in index for node = 0
psum_ind = pp*NNODES*ncoeffs_max + coeff*NNODES;
int_Yp_re[psum_ind] += sum_recv_b[sp0];
int_Yp_im[psum_ind] += sum_recv_b[sp1];
int_rDYu_re[psum_ind] += sum_recv_b[sp2];
int_rDYu_im[psum_ind] += sum_recv_b[sp3];
int_xXDYu_re[psum_ind] += sum_recv_b[sp4];
int_xXDYu_im[psum_ind] += sum_recv_b[sp5];
}
}
} // loop over tk
}
}
__global__ void compute_error(real lamb_cut, int ncoeffs_max, int nparts,
part_struct *parts, real *part_errors, int *part_nums)
{
int part = blockIdx.x;
int coeff = threadIdx.x;
real div = 0.;
real max = DBL_MIN;
__shared__ real s_coeffs[MAX_COEFFS * NSP];
__shared__ real s_coeffs0[MAX_COEFFS * NSP];
__shared__ real s_max[MAX_COEFFS];
if (part < nparts && coeff < ncoeffs_max) {
s_coeffs[coeff + ncoeffs_max * 0] = parts[part].pnm_re[coeff];
s_coeffs[coeff + ncoeffs_max * 1] = parts[part].pnm_im[coeff];
s_coeffs[coeff + ncoeffs_max * 2] = parts[part].phinm_re[coeff];
s_coeffs[coeff + ncoeffs_max * 3] = parts[part].phinm_im[coeff];
s_coeffs[coeff + ncoeffs_max * 4] = parts[part].chinm_re[coeff];
s_coeffs[coeff + ncoeffs_max * 5] = parts[part].chinm_im[coeff];
s_coeffs0[coeff + ncoeffs_max * 0] = parts[part].pnm_re0[coeff];
s_coeffs0[coeff + ncoeffs_max * 1] = parts[part].pnm_im0[coeff];
s_coeffs0[coeff + ncoeffs_max * 2] = parts[part].phinm_re0[coeff];
s_coeffs0[coeff + ncoeffs_max * 3] = parts[part].phinm_im0[coeff];
s_coeffs0[coeff + ncoeffs_max * 4] = parts[part].chinm_re0[coeff];
s_coeffs0[coeff + ncoeffs_max * 5] = parts[part].chinm_im0[coeff];
s_max[coeff] = DBL_MIN;
__syncthreads();
// If coefficient has a large enough magnitude (relative to 0th order coeff)
// calculate the error
for (int i = 0; i < NSP; i++) {
int c = coeff + ncoeffs_max * i;
// Determine if current coefficient has large enough value compared to 0th
// (also, make sure it's large enough so we don't get issues with close-to-zero
// errors)
// (also, if zeroth order is 0, ignore)
real curr_val = s_coeffs[c];
real zeroth_val = s_coeffs[0 + ncoeffs_max * i];
int flag = (fabs(curr_val) > fabs(lamb_cut*zeroth_val)) *
(fabs(curr_val) > 1.e-16) *
(fabs(zeroth_val) > DBL_MIN);
// If flag == 1, set scoeff equal to error value
// If flag == 0, set scoeff equal to zero (no error)
div = fabs(curr_val);
div += (1.e-16 - div) * (div < 1.e-16);
real curr_val0 = s_coeffs0[c];
s_coeffs[c] = (real) flag * fabs(curr_val - curr_val0) / div;
// See if current error is the max we've seen so far over all the
// coefficients of a given order, set if so
s_max[coeff] += (s_coeffs[c] - s_max[coeff]) * (s_coeffs[c] > s_max[coeff]);
}
__syncthreads();
// We've now calculated the error for each "large enough" coefficients and
// found the maximum over all coefficients of a given order. Now, each
// order has a maximum, and we need to find the max over these
if (coeff == 0) {
for (int i = 0; i < ncoeffs_max; i++) {
max += (s_max[i] - max) * (s_max[i] > max);
}
part_errors[part] = max;
part_nums[part] = parts[part].N;
}
}
}
__global__ void store_coeffs(part_struct *parts, int nparts,
int ncoeffs_max)
{
int part = blockIdx.x;
int coeff = threadIdx.x;
if (part < nparts && coeff < ncoeffs_max) {
parts[part].pnm_re0[coeff] = parts[part].pnm_re[coeff];
parts[part].pnm_im0[coeff] = parts[part].pnm_im[coeff];
parts[part].phinm_re0[coeff] = parts[part].phinm_re[coeff];
parts[part].phinm_im0[coeff] = parts[part].phinm_im[coeff];
parts[part].chinm_re0[coeff] = parts[part].chinm_re[coeff];
parts[part].chinm_im0[coeff] = parts[part].chinm_im[coeff];
}
}
|
e20fd812bbce54dc56b719832b392cd2cc569223.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef __COLSUMSKERNEL__CU__
#define __COLSUMSKERNEL__CU__
#include <hip/hip_runtime.h>
#include <math.h>
#include <stdio.h>
#include <hip/driver_types.h>
#include <iostream>
#include "config.h"
#include <boost/format.hpp>
using boost::format;
using std::cout;
//
//unsigned int roundToPowerOf2(unsigned int v){
// v--;
// v |= v >> 1;
// v |= v >> 2;
// v |= v >> 4;
// v |= v >> 8;
// v |= v >> 16;
// v++;
// return v;
//}
//
//__device__ void PrintDevice(float * d_a, int M, int N)
//{
//
// for(int i = 0; i < M * N; i++){
// printf("%f\t", d_a[i]);
//
// if((i + 1) % N == 0){
// printf("\n");
// }
// }
// printf("\n");
//}
template <class T, unsigned int BLOCK_SIZE>
__global__ void colSumsKernel(T * in, T * out, int M, int N){
__shared__ T sdata[BLOCK_SIZE][BLOCK_SIZE];
int row = blockDim.y * blockIdx.y + threadIdx.y ;//+ x;
int column = blockDim.x * blockIdx.x + threadIdx.x;// + y;
int ty = threadIdx.y, tx = threadIdx.x;
sdata[ty][tx] = row < M && column < N ? in[row * N + column]: 0;
__syncthreads();
for(unsigned int s = BLOCK_SIZE / 2; s > 0; s >>= 1){
if(ty < s){
sdata[ty][tx] += sdata[ty+s][tx];
}
__syncthreads();
}
if(ty == 0 && column < N)
out[blockIdx.y * N + column] = sdata[0][tx];
}
template <class T, unsigned int BLOCK_SIZE>
__global__ void colSumsKernel2(T * in, T * out, int M, int N){
__shared__ T sdata[BLOCK_SIZE][BLOCK_SIZE];
unsigned int gridHeight = blockDim.y * gridDim.y;
unsigned int gridWidth = blockDim.x * gridDim.x;
for(int x = 0; x < N; x += gridWidth){
for(int y = 0; y < M; y += gridHeight){
int row = blockDim.y * blockIdx.y + threadIdx.y + y;
int column = blockDim.x * blockIdx.x + threadIdx.x + x;
if(row == 0 && column == 0){
// printf("h: %d, w: %d\n", gridHeight, gridWidth);
}
int ty = threadIdx.y, tx = threadIdx.x;
sdata[ty][tx] = row < M && column < N ? in[row * N + column]: 0;
__syncthreads();
for(unsigned int s = BLOCK_SIZE / 2; s > 0; s >>= 1){
if(ty < s){
if(row == 3 && column == 4){
printf("adding sdata[%d][%d]: %f to sdata[%d][%d]: %f = %f\n",
ty, tx, sdata[ty][tx],
ty + s, tx, sdata[ty + s][tx],
sdata[ty][tx] + sdata[ty+s][tx]);
}
sdata[ty][tx] += sdata[ty+s][tx];
__syncthreads();
}
}
// __syncthreads();
if(threadIdx.y == 0 && column < N) {
if(row == 3 && column == 4){
printf("writign out[%d] = %f\n\n",
(row / BLOCK_SIZE) * N + column,
sdata[0][threadIdx.x]);
}
out[(row / BLOCK_SIZE) * N + column] = sdata[0][threadIdx.x];
}
}
}
}
template <class T>
void colSums(T * in, T * out, int M, int N, int version){
if(version != 1 && version != 2)
version = 1;
int i =0;
while( ceil(M / (float)THREADS_PER_BLOCK) > 1){
dim3 grid(ceil(N / (float)THREADS_PER_BLOCK), ceil(M/(float)(THREADS_PER_BLOCK)), 1);
dim3 block(THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1);
int tpb = THREADS_PER_BLOCK;
if(version == 1)
hipLaunchKernelGGL(( colSumsKernel<T, THREADS_PER_BLOCK>), dim3(grid), dim3(block), 0, 0, in, in, M, N);
else if(version == 2) {
printf("Launching Kernel with grid: %d * %d, blockdim: %d * %d, M: %d, N: %d\n",
(int)ceil(N / (float) (THREADS_PER_BLOCK * 4)), (int)ceil(M / (float) (THREADS_PER_BLOCK * 4)),
tpb, tpb, M, N);
colSumsKernel2<T, THREADS_PER_BLOCK> << < dim3{ceil(N / (float) (THREADS_PER_BLOCK * 4)),
ceil(M / (float) (THREADS_PER_BLOCK * 4)), 1},
dim3{THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1} >> > (in, in, M, N);
}
hipError_t cudaerr = hipDeviceSynchronize();
if (cudaerr != hipSuccess)
printf("\033[1;31m colSumsKernel launch failed with error \"%s\". \033[0m\n",
hipGetErrorString(cudaerr));
//* swap in and tmp
M = ceil(M/(float)(THREADS_PER_BLOCK));
}
if(version == 1)
hipLaunchKernelGGL(( colSumsKernel<T, THREADS_PER_BLOCK>), dim3(dim3{ ceil(N / (float) THREADS_PER_BLOCK)),
dim3(ceil(M / (float)THREADS_PER_BLOCK)), 1},
dim3{THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1}, in, out, M, N);
else if(version == 2) {
printf("Launching Kernel with grid: %d * %d, blockdim: %d * %d, M: %d, N: %d\n",
(int)ceil(N / (float) (THREADS_PER_BLOCK * 4)), (int)ceil(M / (float) (THREADS_PER_BLOCK * 4)),
THREADS_PER_BLOCK, THREADS_PER_BLOCK, M, N);
colSumsKernel2<T, THREADS_PER_BLOCK> << < dim3{ceil(N / (float) (THREADS_PER_BLOCK * 4)),
ceil(M / (float) (THREADS_PER_BLOCK * 4)), 1},
dim3{THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1} >> > (in, out, M, N);
}
// colSumsKernel<T, THREADS_PER_BLOCK><<<dim3{(int)ceil(N/(float)THREADS_PER_BLOCK),
// ceil(M/(float)THREADS_PER_BLOCK), 1},
// dim3{THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1}>>>(in, out, M, N);
// M = ceil(M/(float)THREADS_PER_BLOCK);
// hipFree(tmp1Backup);
// hipFree(tmp2Backup);
//first launch kernel to perform initial reduce
// if(ceil(M/(float)THREADS_PER_BLOCK) > 1){
//
// T * tmp;
// hipMalloc((void **) &tmp, roundToPowerOf2(ceil(M/(float)(THREADS_PER_BLOCK))) * N * sizeof(T));
//
// hipLaunchKernelGGL(( colSumsKernel<T, THREADS_PER_BLOCK>), dim3(grid), dim3(block), 0, 0, in, tmp, M, N);
//
hipError_t cudaerr = hipDeviceSynchronize();
if (cudaerr != hipSuccess)
printf("\033[1;31m colSumsKernel launch failed with error \"%s\". \033[0m\n",
hipGetErrorString(cudaerr));
}
template void
colSums<int>(int * input, int * out, int M, int N, int threadsPerBlock);
template void
colSums<float>(float * input, float * out, int M, int N, int threadsPerBlock);
template void
colSums<double>(double * input, double * out, int M, int N, int threadsPerBlock);
#endif | e20fd812bbce54dc56b719832b392cd2cc569223.cu | #ifndef __COLSUMSKERNEL__CU__
#define __COLSUMSKERNEL__CU__
#include <cuda.h>
#include <math.h>
#include <stdio.h>
#include <driver_types.h>
#include <iostream>
#include "config.h"
#include <boost/format.hpp>
using boost::format;
using std::cout;
//
//unsigned int roundToPowerOf2(unsigned int v){
// v--;
// v |= v >> 1;
// v |= v >> 2;
// v |= v >> 4;
// v |= v >> 8;
// v |= v >> 16;
// v++;
// return v;
//}
//
//__device__ void PrintDevice(float * d_a, int M, int N)
//{
//
// for(int i = 0; i < M * N; i++){
// printf("%f\t", d_a[i]);
//
// if((i + 1) % N == 0){
// printf("\n");
// }
// }
// printf("\n");
//}
template <class T, unsigned int BLOCK_SIZE>
__global__ void colSumsKernel(T * in, T * out, int M, int N){
__shared__ T sdata[BLOCK_SIZE][BLOCK_SIZE];
int row = blockDim.y * blockIdx.y + threadIdx.y ;//+ x;
int column = blockDim.x * blockIdx.x + threadIdx.x;// + y;
int ty = threadIdx.y, tx = threadIdx.x;
sdata[ty][tx] = row < M && column < N ? in[row * N + column]: 0;
__syncthreads();
for(unsigned int s = BLOCK_SIZE / 2; s > 0; s >>= 1){
if(ty < s){
sdata[ty][tx] += sdata[ty+s][tx];
}
__syncthreads();
}
if(ty == 0 && column < N)
out[blockIdx.y * N + column] = sdata[0][tx];
}
template <class T, unsigned int BLOCK_SIZE>
__global__ void colSumsKernel2(T * in, T * out, int M, int N){
__shared__ T sdata[BLOCK_SIZE][BLOCK_SIZE];
unsigned int gridHeight = blockDim.y * gridDim.y;
unsigned int gridWidth = blockDim.x * gridDim.x;
for(int x = 0; x < N; x += gridWidth){
for(int y = 0; y < M; y += gridHeight){
int row = blockDim.y * blockIdx.y + threadIdx.y + y;
int column = blockDim.x * blockIdx.x + threadIdx.x + x;
if(row == 0 && column == 0){
// printf("h: %d, w: %d\n", gridHeight, gridWidth);
}
int ty = threadIdx.y, tx = threadIdx.x;
sdata[ty][tx] = row < M && column < N ? in[row * N + column]: 0;
__syncthreads();
for(unsigned int s = BLOCK_SIZE / 2; s > 0; s >>= 1){
if(ty < s){
if(row == 3 && column == 4){
printf("adding sdata[%d][%d]: %f to sdata[%d][%d]: %f = %f\n",
ty, tx, sdata[ty][tx],
ty + s, tx, sdata[ty + s][tx],
sdata[ty][tx] + sdata[ty+s][tx]);
}
sdata[ty][tx] += sdata[ty+s][tx];
__syncthreads();
}
}
// __syncthreads();
if(threadIdx.y == 0 && column < N) {
if(row == 3 && column == 4){
printf("writign out[%d] = %f\n\n",
(row / BLOCK_SIZE) * N + column,
sdata[0][threadIdx.x]);
}
out[(row / BLOCK_SIZE) * N + column] = sdata[0][threadIdx.x];
}
}
}
}
template <class T>
void colSums(T * in, T * out, int M, int N, int version){
if(version != 1 && version != 2)
version = 1;
int i =0;
while( ceil(M / (float)THREADS_PER_BLOCK) > 1){
dim3 grid(ceil(N / (float)THREADS_PER_BLOCK), ceil(M/(float)(THREADS_PER_BLOCK)), 1);
dim3 block(THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1);
int tpb = THREADS_PER_BLOCK;
if(version == 1)
colSumsKernel<T, THREADS_PER_BLOCK><<<grid, block>>>(in, in, M, N);
else if(version == 2) {
printf("Launching Kernel with grid: %d * %d, blockdim: %d * %d, M: %d, N: %d\n",
(int)ceil(N / (float) (THREADS_PER_BLOCK * 4)), (int)ceil(M / (float) (THREADS_PER_BLOCK * 4)),
tpb, tpb, M, N);
colSumsKernel2<T, THREADS_PER_BLOCK> << < dim3{ceil(N / (float) (THREADS_PER_BLOCK * 4)),
ceil(M / (float) (THREADS_PER_BLOCK * 4)), 1},
dim3{THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1} >> > (in, in, M, N);
}
cudaError_t cudaerr = cudaDeviceSynchronize();
if (cudaerr != CUDA_SUCCESS)
printf("\033[1;31m colSumsKernel launch failed with error \"%s\". \033[0m\n",
cudaGetErrorString(cudaerr));
//* swap in and tmp
M = ceil(M/(float)(THREADS_PER_BLOCK));
}
if(version == 1)
colSumsKernel<T, THREADS_PER_BLOCK><<<dim3{ ceil(N / (float) THREADS_PER_BLOCK),
ceil(M / (float)THREADS_PER_BLOCK), 1},
dim3{THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1}>>>(in, out, M, N);
else if(version == 2) {
printf("Launching Kernel with grid: %d * %d, blockdim: %d * %d, M: %d, N: %d\n",
(int)ceil(N / (float) (THREADS_PER_BLOCK * 4)), (int)ceil(M / (float) (THREADS_PER_BLOCK * 4)),
THREADS_PER_BLOCK, THREADS_PER_BLOCK, M, N);
colSumsKernel2<T, THREADS_PER_BLOCK> << < dim3{ceil(N / (float) (THREADS_PER_BLOCK * 4)),
ceil(M / (float) (THREADS_PER_BLOCK * 4)), 1},
dim3{THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1} >> > (in, out, M, N);
}
// colSumsKernel<T, THREADS_PER_BLOCK><<<dim3{(int)ceil(N/(float)THREADS_PER_BLOCK),
// ceil(M/(float)THREADS_PER_BLOCK), 1},
// dim3{THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1}>>>(in, out, M, N);
// M = ceil(M/(float)THREADS_PER_BLOCK);
// cudaFree(tmp1Backup);
// cudaFree(tmp2Backup);
//first launch kernel to perform initial reduce
// if(ceil(M/(float)THREADS_PER_BLOCK) > 1){
//
// T * tmp;
// cudaMalloc((void **) &tmp, roundToPowerOf2(ceil(M/(float)(THREADS_PER_BLOCK))) * N * sizeof(T));
//
// colSumsKernel<T, THREADS_PER_BLOCK><<<grid, block>>>(in, tmp, M, N);
//
cudaError_t cudaerr = cudaDeviceSynchronize();
if (cudaerr != CUDA_SUCCESS)
printf("\033[1;31m colSumsKernel launch failed with error \"%s\". \033[0m\n",
cudaGetErrorString(cudaerr));
}
template void
colSums<int>(int * input, int * out, int M, int N, int threadsPerBlock);
template void
colSums<float>(float * input, float * out, int M, int N, int threadsPerBlock);
template void
colSums<double>(double * input, double * out, int M, int N, int threadsPerBlock);
#endif |
cd7818659c0bc91a18d1c7ab5ec99efb82247908.hip | // !!! This is a file automatically generated by hipify!!!
// ==============================================
// Veltman Lina group 407
// ==============================================
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <chrono>
#include <string.h>
#include "mpi.h"
#include <omp.h>
using namespace std::chrono;
#define CSC(call) do { \
hipError_t res = call; \
if (res != hipSuccess) { \
fprintf(stderr, "CUDA Error in %s:%d: %s\n", __FILE__, __LINE__, hipGetErrorString(res)); \
fflush(stderr); \
exit(0); \
} \
} while (0) \
typedef unsigned char uchar;
struct vec3
{
double x;
double y;
double z;
};
struct Triangle
{
vec3 a;
vec3 b;
vec3 c;
uchar4 color;
};
//
__device__ __host__
double dot(vec3 a, vec3 b)
{
return a.x * b.x + a.y * b.y + a.z * b.z;
}
//
__device__ __host__
vec3 mulc(vec3 a, double c)
{
return { c * a.x, c * a.y, c * a.z };
}
//
__device__ __host__
vec3 prod(vec3 a, vec3 b)
{
return { a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y - a.y * b.x };
}
//
__device__ __host__
vec3 norm(vec3 v)
{
//
double l = sqrt(dot(v, v));
return { v.x / l, v.y / l, v.z / l };
}
__device__ __host__
double len(vec3 v)
{
return sqrt(dot(v, v));
}
// ( )
__device__ __host__
vec3 diff(vec3 a, vec3 b)
{
return { a.x - b.x, a.y - b.y, a.z - b.z };
}
//
__device__ __host__
vec3 add(vec3 a, vec3 b)
{
return { a.x + b.x, a.y + b.y, a.z + b.z };
}
// a, b, c v
__device__ __host__
vec3 mult(vec3 a, vec3 b, vec3 c, vec3 v)
{
return {
a.x * v.x + b.x * v.y + c.x * v.z,
a.y * v.x + b.y * v.y + c.y * v.z,
a.z * v.x + b.z * v.y + c.z * v.z
};
}
void print(vec3 v)
{
printf("%e %e %e\n", v.x, v.y, v.z);
}
__host__ __device__
double dmin(double x, double y)
{
if (x > y) { return y; }
return x;
}
//
void BuildStage(Triangle* t, double r1, vec3 o1, uchar4 c1,
double r2, vec3 o2, uchar4 c2,
double r3, vec3 o3, uchar4 c3,
vec3* fv, uchar4 fc)
{
int st = 0;
double p = (1 + sqrt(5)) / 2;
//
vec3 icosVertexes[] = {
add(o1, norm(vec3{ 0, -1, p})),
add(o1, norm(vec3{ 0, 1, p})),
add(o1, norm(vec3{-p, 0, 1})),
add(o1, norm(vec3{ p, 0, 1})),
add(o1, norm(vec3{-1, p, 0})),
add(o1, norm(vec3{ 1, p, 0})),
add(o1, norm(vec3{ 1, -p, 0})),
add(o1, norm(vec3{-1, -p, 0})),
add(o1, norm(vec3{-p, 0, -1})),
add(o1, norm(vec3{ p, 0, -1})),
add(o1, norm(vec3{ 0, -1, -p})),
add(o1, norm(vec3{ 0, 1, -p}))
};
//
t[st++] = Triangle{ icosVertexes[0], icosVertexes[1], icosVertexes[2] , c1 };
t[st++] = Triangle{ icosVertexes[1], icosVertexes[0], icosVertexes[3] , c1 };
t[st++] = Triangle{ icosVertexes[0], icosVertexes[2], icosVertexes[7] , c1 };
t[st++] = Triangle{ icosVertexes[2], icosVertexes[1], icosVertexes[4] , c1 };
t[st++] = Triangle{ icosVertexes[4], icosVertexes[1], icosVertexes[5] , c1 };
t[st++] = Triangle{ icosVertexes[6], icosVertexes[0], icosVertexes[7] , c1 };
t[st++] = Triangle{ icosVertexes[3], icosVertexes[0], icosVertexes[6] , c1 };
t[st++] = Triangle{ icosVertexes[1], icosVertexes[3], icosVertexes[5] , c1 };
t[st++] = Triangle{ icosVertexes[4], icosVertexes[5], icosVertexes[11], c1 };
t[st++] = Triangle{ icosVertexes[6], icosVertexes[7], icosVertexes[10], c1 };
t[st++] = Triangle{ icosVertexes[3], icosVertexes[6], icosVertexes[9] , c1 };
t[st++] = Triangle{ icosVertexes[5], icosVertexes[3], icosVertexes[9] , c1 };
t[st++] = Triangle{ icosVertexes[7], icosVertexes[2], icosVertexes[8] , c1 };
t[st++] = Triangle{ icosVertexes[2], icosVertexes[4], icosVertexes[8] , c1 };
t[st++] = Triangle{ icosVertexes[9], icosVertexes[10], icosVertexes[11], c1 };
t[st++] = Triangle{ icosVertexes[10], icosVertexes[8], icosVertexes[11], c1 };
t[st++] = Triangle{ icosVertexes[5], icosVertexes[9], icosVertexes[11], c1 };
t[st++] = Triangle{ icosVertexes[9], icosVertexes[6], icosVertexes[10], c1 };
t[st++] = Triangle{ icosVertexes[7], icosVertexes[8], icosVertexes[10], c1 };
t[st++] = Triangle{ icosVertexes[8], icosVertexes[4], icosVertexes[11], c1 };
//
vec3 hexVertexes[] = {
add(o2, norm(vec3{-1, -1, -1})),
add(o2, norm(vec3{-1, 1, -1})),
add(o2, norm(vec3{ 1, -1, -1})),
add(o2, norm(vec3{ 1, 1, -1})),
add(o2, norm(vec3{-1, -1, 1})),
add(o2, norm(vec3{-1, 1, 1})),
add(o2, norm(vec3{ 1, -1, 1})),
add(o2, norm(vec3{ 1, 1, 1})),
};
//
t[st++] = Triangle{ hexVertexes[5], hexVertexes[4], hexVertexes[7], c2 };
t[st++] = Triangle{ hexVertexes[4], hexVertexes[6], hexVertexes[7], c2 };
t[st++] = Triangle{ hexVertexes[0], hexVertexes[1], hexVertexes[3], c2 };
t[st++] = Triangle{ hexVertexes[2], hexVertexes[0], hexVertexes[3], c2 };
t[st++] = Triangle{ hexVertexes[3], hexVertexes[1], hexVertexes[5], c2 };
t[st++] = Triangle{ hexVertexes[3], hexVertexes[5], hexVertexes[7], c2 };
t[st++] = Triangle{ hexVertexes[0], hexVertexes[2], hexVertexes[4], c2 };
t[st++] = Triangle{ hexVertexes[4], hexVertexes[2], hexVertexes[6], c2 };
t[st++] = Triangle{ hexVertexes[1], hexVertexes[0], hexVertexes[5], c2 };
t[st++] = Triangle{ hexVertexes[0], hexVertexes[4], hexVertexes[5], c2 };
t[st++] = Triangle{ hexVertexes[2], hexVertexes[3], hexVertexes[7], c2 };
t[st++] = Triangle{ hexVertexes[6], hexVertexes[2], hexVertexes[7], c2 };
//
vec3 octVertexes[] = {
add(o3, norm(vec3{ 0, 0, -1})),
add(o3, norm(vec3{ 0, 0, 1})),
add(o3, norm(vec3{-1, 0, 0})),
add(o3, norm(vec3{ 1, 0, 0})),
add(o3, norm(vec3{ 0, -1, 0})),
add(o3, norm(vec3{ 0, 1, 0})),
};
//
t[st++] = Triangle{ octVertexes[0], octVertexes[4], octVertexes[2], c3 };
t[st++] = Triangle{ octVertexes[0], octVertexes[2], octVertexes[5], c3 };
t[st++] = Triangle{ octVertexes[0], octVertexes[5], octVertexes[3], c3 };
t[st++] = Triangle{ octVertexes[0], octVertexes[3], octVertexes[4], c3 };
t[st++] = Triangle{ octVertexes[1], octVertexes[2], octVertexes[4], c3 };
t[st++] = Triangle{ octVertexes[1], octVertexes[5], octVertexes[2], c3 };
t[st++] = Triangle{ octVertexes[1], octVertexes[3], octVertexes[5], c3 };
t[st++] = Triangle{ octVertexes[1], octVertexes[4], octVertexes[3], c3 };
//
t[st++] = Triangle{ fv[0], fv[2], fv[1], fc };
t[st++] = Triangle{ fv[1], fv[2], fv[3], fc };
}
// pos -
// dir -
//
__host__ __device__
void RayTracing(Triangle* triangles, vec3 pos, vec3 dir, int* i, double* t)
{
// , min
//
int k, k_min = -1;
double ts_min = 0;
for (k = 0; k < 42; ++k) //
{
vec3 e1 = diff(triangles[k].b, triangles[k].a);
vec3 e2 = diff(triangles[k].c, triangles[k].a);
vec3 p = prod(dir, e2);
double div = dot(p, e1);
if (fabs(div) < 1e-10)
continue; //
vec3 t = diff(pos, triangles[k].a);
double u = dot(p, t) / div;
if (u < 0.0 || u > 1.0)
continue; //
vec3 q = prod(t, e1);
double v = dot(q, dir) / div;
if (v < 0.0 || v + u > 1.0)
continue;
double ts = dot(q, e2) / div;
if (ts < 0.0)
continue; // ,
if (k_min == -1 || ts < ts_min)
{
k_min = k;
ts_min = ts;
}
}
*i = k_min;
*t = ts_min;
}
//
//
//
void Render(Triangle* triangles, vec3 pc, vec3 pv,
int w, int h, double angle, uchar4* data,
vec3 lightPosition, uchar4 lightColor)
{
double dw = 2.0 / (w - 1.0);
double dh = 2.0 / (h - 1.0);
double z = 1.0 / tan(angle * M_PI / 360.0);
// ,
vec3 bz = norm(diff(pv, pc));
vec3 bx = norm(prod(bz, { 0.0, 0.0, 1.0 }));
vec3 by = norm(prod(bx, bz));
int size = w * h;
int kmin;
double tmin;
#pragma omp parallel
{
int threadQuantity = omp_get_num_threads(); // -
int threadId = omp_get_thread_num(); //
for (int k = threadId; k < size; k += threadQuantity)
{
int i = k % w;
int j = k / w;
vec3 v = { -1.0 + dw * i, (-1.0 + dh * j) * h / w, z };
//
vec3 dir = norm(mult(bx, by, bz, v));
// ,
//
RayTracing(triangles, pc, dir, &kmin, &tmin);
if (kmin != -1)
{
// ,
//
double rr = (double)triangles[kmin].color.x / 255.0;
double gg = (double)triangles[kmin].color.y / 255.0;
double bb = (double)triangles[kmin].color.z / 255.0;
double ri = 0.2, gi = 0.2, bi = 0.2;
// p -
vec3 p = add(pc, mulc(dir, tmin));
vec3 l = diff(lightPosition, p);
vec3 n = prod(diff(triangles[kmin].b, triangles[kmin].a),
diff(triangles[kmin].c, triangles[kmin].a));
double dot_nl = dot(n, l);
if (dot_nl > 0)
{
ri += (lightColor.x / 255.0) * dot_nl / (len(n) * len(l));
gi += (lightColor.y / 255.0) * dot_nl / (len(n) * len(l));
bi += (lightColor.z / 255.0) * dot_nl / (len(n) * len(l));
}
data[(h - 1 - j) * w + i].x = (uchar)(255 * dmin(1.0, ri * rr));
data[(h - 1 - j) * w + i].y = (uchar)(255 * dmin(1.0, gi * gg));
data[(h - 1 - j) * w + i].z = (uchar)(255 * dmin(1.0, bi * bb));
}
else
{
// -
data[(h - 1 - j) * w + i] = uchar4{ 0, 0, 0, 0 };
}
}
}
}
__global__
void DeviceRender(Triangle* triangles, vec3 pc, vec3 pv,
int w, int h, double angle, uchar4* data,
vec3 lightPosition, uchar4 lightColor)
{
double pi = acos(-1.0);
int i, j;
double dw = 2.0 / (w - 1.0); //
double dh = 2.0 / (h - 1.0); //
double z = 1.0 / tan(angle * pi / 360.0); // ,
// ,
vec3 bz = norm(diff(pv, pc));
vec3 bx = norm(prod(bz, { 0.0, 0.0, 1.0 }));
vec3 by = norm(prod(bx, bz));
int kmin;
double tmin;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ofs = blockDim.x * gridDim.x;
while (tid < w * h)
{
i = tid % w;
j = tid / w;
tid += ofs;
// x,y
vec3 v = { -1.0 + dw * i, (-1.0 + dh * j) * h / w, z };
//
vec3 dir = norm(mult(bx, by, bz, v));
//
RayTracing(triangles, pc, dir, &kmin, &tmin);
if (kmin != -1)
{
// ,
//
double rr = (double)triangles[kmin].color.x / 255.0;
double gg = (double)triangles[kmin].color.y / 255.0;
double bb = (double)triangles[kmin].color.z / 255.0;
double ri = 0.2, gi = 0.2, bi = 0.2;
// p -
vec3 p = add(pc, mulc(dir, tmin));
vec3 l = diff(lightPosition, p); // -
vec3 n = prod(diff(triangles[kmin].b, triangles[kmin].a),
diff(triangles[kmin].c, triangles[kmin].a));
double dot_nl = dot(n, l);
if (dot_nl > 0)
{
ri += (lightColor.x / 255.0) * dot_nl / (len(n) * len(l));
gi += (lightColor.y / 255.0) * dot_nl / (len(n) * len(l));
bi += (lightColor.z / 255.0) * dot_nl / (len(n) * len(l));
}
data[(h - 1 - j) * w + i].x = (uchar)(255 * dmin(1.0, ri * rr));
data[(h - 1 - j) * w + i].y = (uchar)(255 * dmin(1.0, gi * gg));
data[(h - 1 - j) * w + i].z = (uchar)(255 * dmin(1.0, bi * bb));
}
else
{
// -
data[(h - 1 - j) * w + i] = uchar4{ 0, 0, 0, 0 };
}
}
}
//
// x y z t
vec3 CoordCameraFromTime(double r0c, double z0c, double p0c,
double arc, double azc,
double wrc, double wzc, double wpc,
double prc, double pzc, double t)
{
double r = r0c + arc * sin(wrc * t + prc);
double z = z0c + azc * sin(wzc * t + pzc);
double phi = p0c + wpc * t;
return vec3{ r * cos(phi), r * sin(phi), z };
};
//
// x y z t
vec3 CoordViewPointFromTime(double r0n, double z0n, double p0n,
double arn, double azn,
double wrn, double wzn, double wpn,
double prn, double pzn, double t)
{
double r = r0n + arn * sin(wrn * t + prn);
double z = z0n + azn * sin(wzn * t + pzn);
double phi = p0n + wpn * t;
return vec3{ r * cos(phi), r * sin(phi), z };
};
// bcast
// 0 ()
//
// mpi_comm_world -
void MpiReader(int rank, int& input)
{
if (!rank)
{
std::cin >> input;
}
MPI_Bcast(&input, 1, MPI_INT, 0, MPI_COMM_WORLD);
}
void MpiReader(int rank, double& input)
{
if (!rank)
{
std::cin >> input;
}
MPI_Bcast(&input, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
}
void MpiReader(int rank, std::string& input)
{
int sizeFilename;
if (!rank)
{
std::cin >> input;
sizeFilename = input.size();
}
MPI_Bcast(&sizeFilename, 1, MPI_INT, 0, MPI_COMM_WORLD);
input.resize(sizeFilename);
MPI_Bcast(const_cast<char*>(input.c_str()), sizeFilename, MPI_CHAR, 0, MPI_COMM_WORLD);
}
int main(int argc, char* argv[])
{
int deviceSelection = 0;
if (argc >= 3)
{
printf("argc error\n");
return -1;
}
if (argc == 1)
{
deviceSelection = 1;
}
else if (strcmp(argv[1], "--default") == 0)
{
printf("400 \n");
printf("img_%% d.data \n");
printf("1240 960 100 \n");
printf("7.0 3.0 0.0 2.0 1.0 2.0 6.0 1.0 0.0 0.0 \n");
printf("2.0 0.0 0.0 0.5 0.1 1.0 4.0 1.0 0.0 0.0 \n");
printf("-2 -2 0 2 200 0 0 \n");
printf("-2 2 0 2 0 255 0 \n");
printf("2 0 0 2 0 0 255 \n");
printf("-4 -4 -1 -4 4 -1 4 -4 -1 4 4 -1 102 62 0 \n");
return 0;
}
else if (strcmp(argv[1], "--gpu") == 0)
{
deviceSelection = 1;
}
else if (strcmp(argv[1], "--cpu") == 0)
{
deviceSelection = 0;
}
int procRank, numberOfProcs;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numberOfProcs); //
MPI_Comm_rank(MPI_COMM_WORLD, &procRank); //
int deviceCnt;
hipGetDeviceCount(&deviceCnt); // -
hipSetDevice(procRank % deviceCnt); // ,
int n, w, h;
double a = 100;
std::string path;
//
//
double r0c, z0c, p0c, arc, azc, wrc, wzc, wpc, prc, pzc,
r0n, z0n, p0n, arn, azn, wrn, wzn, wpn, prn, pzn;
double r1 = 2, r2 = 2, r3 = 2;
vec3 o1 = { -2, -2, 0 };
vec3 o2 = { -2, 2, 0 };
vec3 o3 = { 2, 0, 0 };
int c1x, c1y, c1z;
uchar4 c1 = { 200, 0, 0 };
int c2x, c2y, c2z;
uchar4 c2 = { 0, 255, 0 };
int c3x, c3y, c3z;
uchar4 c3 = { 0, 0, 255 };
vec3 fv[4];
fv[0] = { -5, -5, -3 };
fv[1] = { -5, 5, -3 };
fv[2] = { 5, -5, -3 };
fv[3] = { 5, 5, -3 };
int fcx, fcy, fcz;
uchar4 fc = { 102, 62, 0 };
// :)
MpiReader(procRank, n); //
MpiReader(procRank, path); //
//
MpiReader(procRank, w); MpiReader(procRank, h); MpiReader(procRank, a);
//
MpiReader(procRank, r0c); MpiReader(procRank, z0c); MpiReader(procRank, p0c); MpiReader(procRank, arc); MpiReader(procRank, azc); MpiReader(procRank, wrc); MpiReader(procRank, wzc); MpiReader(procRank, wpc); MpiReader(procRank, prc); MpiReader(procRank, pzc);
MpiReader(procRank, r0n); MpiReader(procRank, z0n); MpiReader(procRank, p0n); MpiReader(procRank, arn); MpiReader(procRank, azn); MpiReader(procRank, wrn); MpiReader(procRank, wzn); MpiReader(procRank, wpn); MpiReader(procRank, prn); MpiReader(procRank, pzn);
MpiReader(procRank, o1.x); MpiReader(procRank, o1.y); MpiReader(procRank, o1.z); MpiReader(procRank, r1); MpiReader(procRank, c1x); MpiReader(procRank, c1y); MpiReader(procRank, c1z);
MpiReader(procRank, o2.x); MpiReader(procRank, o2.y); MpiReader(procRank, o2.z); MpiReader(procRank, r2); MpiReader(procRank, c2x); MpiReader(procRank, c2y); MpiReader(procRank, c2z);
MpiReader(procRank, o3.x); MpiReader(procRank, o3.y); MpiReader(procRank, o3.z); MpiReader(procRank, r3); MpiReader(procRank, c3x); MpiReader(procRank, c3y); MpiReader(procRank, c3z);
MpiReader(procRank, fv[0].x); MpiReader(procRank, fv[0].y); MpiReader(procRank, fv[0].z); MpiReader(procRank, fv[1].x); MpiReader(procRank, fv[1].y); MpiReader(procRank, fv[1].z);
MpiReader(procRank, fv[2].x); MpiReader(procRank, fv[2].y); MpiReader(procRank, fv[2].z); MpiReader(procRank, fv[3].x); MpiReader(procRank, fv[3].y); MpiReader(procRank, fv[3].z);
MpiReader(procRank, fcx); MpiReader(procRank, fcy); MpiReader(procRank, fcz);
c1.x = c1x;
c1.y = c1y;
c1.z = c1z;
c2.x = c2x;
c2.y = c2y;
c2.z = c2z;
c3.x = c3x;
c3.y = c3y;
c3.z = c3z;
fc.x = fcx;
fc.y = fcy;
fc.z = fcz;
char buff[256];
// ( )
uchar4* data = (uchar4*)malloc(sizeof(uchar4) * w * h);
vec3 pc, pv;
//
Triangle triangles[42];
BuildStage(triangles, r1, o1, c1, r2, o2, c2, r3, o3, c3, fv, fc);
//
double dt = 2 * M_PI / (double)n;
//
vec3 lightPosition = { -2, 0, 4 };
//
uchar4 lightColor = { 255, 255, 255 };
//float sharedTime = 0;
double timeStart;
if (!procRank)
{
timeStart = MPI_Wtime();
}
if (deviceSelection == 0)
{
double cpuTime;
for (int k = procRank; k < n; k += numberOfProcs)
{
pc = CoordCameraFromTime(r0c, z0c, p0c, arc, azc, wrc, wzc, wpc, prc, pzc, k * dt);
pv = CoordViewPointFromTime(r0n, z0n, p0n, arn, azn, wrn, wzn, wpn, prn, pzn, k * dt);
auto start = steady_clock::now();
//
Render(triangles, pc, pv, w, h, a, data, lightPosition, lightColor);
auto end = steady_clock::now();
cpuTime = ((double)duration_cast<microseconds>(end - start).count()) / 1000.0;
//
sprintf(buff, path.c_str(), k);
printf("%d: %s %e ms\n", k, buff, cpuTime);
//sharedTime += cpuTime;
FILE* out = fopen(buff, "wb");
fwrite(&w, sizeof(int), 1, out);
fwrite(&h, sizeof(int), 1, out);
fwrite(data, sizeof(uchar4), w * h, out);
fclose(out);
}
//printf("All time: %e ms\n", sharedTime);
}
else
{
float deviceTime = 0;
hipEvent_t start, stop;
CSC(hipEventCreate(&start));
CSC(hipEventCreate(&stop));
Triangle* deviceTriangles;
//
CSC(hipMalloc((void**)(&deviceTriangles), 42 * sizeof(Triangle)));
CSC(hipMemcpy(deviceTriangles, triangles, 42 * sizeof(Triangle), hipMemcpyHostToDevice));
uchar4* deviceData;
CSC(hipMalloc((void**)(&deviceData), w * h * sizeof(uchar4)));
for (int k = procRank; k < n; k += numberOfProcs)
{
pc = CoordCameraFromTime(r0c, z0c, p0c, arc, azc, wrc, wzc, wpc, prc, pzc, k * dt);
pv = CoordViewPointFromTime(r0n, z0n, p0n, arn, azn, wrn, wzn, wpn, prn, pzn, k * dt);
CSC(hipEventRecord(start));
hipLaunchKernelGGL(( DeviceRender), dim3(128), dim3(128), 0, 0, deviceTriangles, pc, pv, w, h, a, deviceData, lightPosition, lightColor);
CSC(hipGetLastError());
CSC(hipEventRecord(stop));
CSC(hipEventSynchronize(stop));
CSC(hipEventElapsedTime(&deviceTime, start, stop));
CSC(hipMemcpy(data, deviceData, w * h * sizeof(uchar4), hipMemcpyDeviceToHost));
sprintf(buff, path.c_str(), k);
printf("%d: %s %e ms\n", k, buff, deviceTime);
//sharedTime += deviceTime;
FILE* out = fopen(buff, "wb");
fwrite(&w, sizeof(int), 1, out);
fwrite(&h, sizeof(int), 1, out);
fwrite(data, sizeof(uchar4), w * h, out);
fclose(out);
}
//printf("All time: %e ms\n", sharedTime);
CSC(hipEventDestroy(start));
CSC(hipEventDestroy(stop));
CSC(hipFree(deviceTriangles));
CSC(hipFree(deviceData));
}
free(data);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
double timeEnd;
if (!procRank)
{
timeEnd = MPI_Wtime();
std::cout << "TIME: ";
std::cout << (timeEnd - timeStart) * 1000.0 << "ms" << std::endl;
}
return 0;
} | cd7818659c0bc91a18d1c7ab5ec99efb82247908.cu | // ==============================================
// Veltman Lina group 407
// ==============================================
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <chrono>
#include <string.h>
#include "mpi.h"
#include <omp.h>
using namespace std::chrono;
#define CSC(call) do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "CUDA Error in %s:%d: %s\n", __FILE__, __LINE__, cudaGetErrorString(res)); \
fflush(stderr); \
exit(0); \
} \
} while (0) \
typedef unsigned char uchar;
struct vec3
{
double x;
double y;
double z;
};
struct Triangle
{
vec3 a;
vec3 b;
vec3 c;
uchar4 color;
};
// скалярное произв
__device__ __host__
double dot(vec3 a, vec3 b)
{
return a.x * b.x + a.y * b.y + a.z * b.z;
}
// вектор умнож на число
__device__ __host__
vec3 mulc(vec3 a, double c)
{
return { c * a.x, c * a.y, c * a.z };
}
// векторовное произведение
__device__ __host__
vec3 prod(vec3 a, vec3 b)
{
return { a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y - a.y * b.x };
}
// нормированный вектор
__device__ __host__
vec3 norm(vec3 v)
{
// норма вектора
double l = sqrt(dot(v, v));
return { v.x / l, v.y / l, v.z / l };
}
__device__ __host__
double len(vec3 v)
{
return sqrt(dot(v, v));
}
// вычитание векторов (их разницу находим)
__device__ __host__
vec3 diff(vec3 a, vec3 b)
{
return { a.x - b.x, a.y - b.y, a.z - b.z };
}
// сложение векторов
__device__ __host__
vec3 add(vec3 a, vec3 b)
{
return { a.x + b.x, a.y + b.y, a.z + b.z };
}
// умножение матрицы сост из векторов a, b, c на вектор v
__device__ __host__
vec3 mult(vec3 a, vec3 b, vec3 c, vec3 v)
{
return {
a.x * v.x + b.x * v.y + c.x * v.z,
a.y * v.x + b.y * v.y + c.y * v.z,
a.z * v.x + b.z * v.y + c.z * v.z
};
}
void print(vec3 v)
{
printf("%e %e %e\n", v.x, v.y, v.z);
}
__host__ __device__
double dmin(double x, double y)
{
if (x > y) { return y; }
return x;
}
// отрисовываем сцену
void BuildStage(Triangle* t, double r1, vec3 o1, uchar4 c1,
double r2, vec3 o2, uchar4 c2,
double r3, vec3 o3, uchar4 c3,
vec3* fv, uchar4 fc)
{
int st = 0;
double p = (1 + sqrt(5)) / 2;
// координаты вершин икосаэдра
vec3 icosVertexes[] = {
add(o1, norm(vec3{ 0, -1, p})),
add(o1, norm(vec3{ 0, 1, p})),
add(o1, norm(vec3{-p, 0, 1})),
add(o1, norm(vec3{ p, 0, 1})),
add(o1, norm(vec3{-1, p, 0})),
add(o1, norm(vec3{ 1, p, 0})),
add(o1, norm(vec3{ 1, -p, 0})),
add(o1, norm(vec3{-1, -p, 0})),
add(o1, norm(vec3{-p, 0, -1})),
add(o1, norm(vec3{ p, 0, -1})),
add(o1, norm(vec3{ 0, -1, -p})),
add(o1, norm(vec3{ 0, 1, -p}))
};
// добавляем грани в массив сцены
t[st++] = Triangle{ icosVertexes[0], icosVertexes[1], icosVertexes[2] , c1 };
t[st++] = Triangle{ icosVertexes[1], icosVertexes[0], icosVertexes[3] , c1 };
t[st++] = Triangle{ icosVertexes[0], icosVertexes[2], icosVertexes[7] , c1 };
t[st++] = Triangle{ icosVertexes[2], icosVertexes[1], icosVertexes[4] , c1 };
t[st++] = Triangle{ icosVertexes[4], icosVertexes[1], icosVertexes[5] , c1 };
t[st++] = Triangle{ icosVertexes[6], icosVertexes[0], icosVertexes[7] , c1 };
t[st++] = Triangle{ icosVertexes[3], icosVertexes[0], icosVertexes[6] , c1 };
t[st++] = Triangle{ icosVertexes[1], icosVertexes[3], icosVertexes[5] , c1 };
t[st++] = Triangle{ icosVertexes[4], icosVertexes[5], icosVertexes[11], c1 };
t[st++] = Triangle{ icosVertexes[6], icosVertexes[7], icosVertexes[10], c1 };
t[st++] = Triangle{ icosVertexes[3], icosVertexes[6], icosVertexes[9] , c1 };
t[st++] = Triangle{ icosVertexes[5], icosVertexes[3], icosVertexes[9] , c1 };
t[st++] = Triangle{ icosVertexes[7], icosVertexes[2], icosVertexes[8] , c1 };
t[st++] = Triangle{ icosVertexes[2], icosVertexes[4], icosVertexes[8] , c1 };
t[st++] = Triangle{ icosVertexes[9], icosVertexes[10], icosVertexes[11], c1 };
t[st++] = Triangle{ icosVertexes[10], icosVertexes[8], icosVertexes[11], c1 };
t[st++] = Triangle{ icosVertexes[5], icosVertexes[9], icosVertexes[11], c1 };
t[st++] = Triangle{ icosVertexes[9], icosVertexes[6], icosVertexes[10], c1 };
t[st++] = Triangle{ icosVertexes[7], icosVertexes[8], icosVertexes[10], c1 };
t[st++] = Triangle{ icosVertexes[8], icosVertexes[4], icosVertexes[11], c1 };
// координаты вершин гексаэдра
vec3 hexVertexes[] = {
add(o2, norm(vec3{-1, -1, -1})),
add(o2, norm(vec3{-1, 1, -1})),
add(o2, norm(vec3{ 1, -1, -1})),
add(o2, norm(vec3{ 1, 1, -1})),
add(o2, norm(vec3{-1, -1, 1})),
add(o2, norm(vec3{-1, 1, 1})),
add(o2, norm(vec3{ 1, -1, 1})),
add(o2, norm(vec3{ 1, 1, 1})),
};
// добавляем грани в массив сцены
t[st++] = Triangle{ hexVertexes[5], hexVertexes[4], hexVertexes[7], c2 };
t[st++] = Triangle{ hexVertexes[4], hexVertexes[6], hexVertexes[7], c2 };
t[st++] = Triangle{ hexVertexes[0], hexVertexes[1], hexVertexes[3], c2 };
t[st++] = Triangle{ hexVertexes[2], hexVertexes[0], hexVertexes[3], c2 };
t[st++] = Triangle{ hexVertexes[3], hexVertexes[1], hexVertexes[5], c2 };
t[st++] = Triangle{ hexVertexes[3], hexVertexes[5], hexVertexes[7], c2 };
t[st++] = Triangle{ hexVertexes[0], hexVertexes[2], hexVertexes[4], c2 };
t[st++] = Triangle{ hexVertexes[4], hexVertexes[2], hexVertexes[6], c2 };
t[st++] = Triangle{ hexVertexes[1], hexVertexes[0], hexVertexes[5], c2 };
t[st++] = Triangle{ hexVertexes[0], hexVertexes[4], hexVertexes[5], c2 };
t[st++] = Triangle{ hexVertexes[2], hexVertexes[3], hexVertexes[7], c2 };
t[st++] = Triangle{ hexVertexes[6], hexVertexes[2], hexVertexes[7], c2 };
// координаты вершин октаэдра
vec3 octVertexes[] = {
add(o3, norm(vec3{ 0, 0, -1})),
add(o3, norm(vec3{ 0, 0, 1})),
add(o3, norm(vec3{-1, 0, 0})),
add(o3, norm(vec3{ 1, 0, 0})),
add(o3, norm(vec3{ 0, -1, 0})),
add(o3, norm(vec3{ 0, 1, 0})),
};
// добавляем грани в массив сцены
t[st++] = Triangle{ octVertexes[0], octVertexes[4], octVertexes[2], c3 };
t[st++] = Triangle{ octVertexes[0], octVertexes[2], octVertexes[5], c3 };
t[st++] = Triangle{ octVertexes[0], octVertexes[5], octVertexes[3], c3 };
t[st++] = Triangle{ octVertexes[0], octVertexes[3], octVertexes[4], c3 };
t[st++] = Triangle{ octVertexes[1], octVertexes[2], octVertexes[4], c3 };
t[st++] = Triangle{ octVertexes[1], octVertexes[5], octVertexes[2], c3 };
t[st++] = Triangle{ octVertexes[1], octVertexes[3], octVertexes[5], c3 };
t[st++] = Triangle{ octVertexes[1], octVertexes[4], octVertexes[3], c3 };
// добавляем пол пола
t[st++] = Triangle{ fv[0], fv[2], fv[1], fc };
t[st++] = Triangle{ fv[1], fv[2], fv[3], fc };
}
// pos - позиция из кот стартует луч
// dir - направление куда смотрит луч
// возвращает цвет луча и пикселя
__host__ __device__
void RayTracing(Triangle* triangles, vec3 pos, vec3 dir, int* i, double* t)
{
// ищем ближайшее пересечение к нам, поэтому min
// нашего луча и треугольника сцены
int k, k_min = -1;
double ts_min = 0;
for (k = 0; k < 42; ++k) // перебираем полигоны и ищем пересечение
{
vec3 e1 = diff(triangles[k].b, triangles[k].a);
vec3 e2 = diff(triangles[k].c, triangles[k].a);
vec3 p = prod(dir, e2);
double div = dot(p, e1);
if (fabs(div) < 1e-10)
continue; // точно нет пересечений
vec3 t = diff(pos, triangles[k].a);
double u = dot(p, t) / div;
if (u < 0.0 || u > 1.0)
continue; // луч не лежит в нашем треугольнике
vec3 q = prod(t, e1);
double v = dot(q, dir) / div;
if (v < 0.0 || v + u > 1.0)
continue;
double ts = dot(q, e2) / div;
if (ts < 0.0)
continue; // нашли пересечение, но оно лежит за камерой
if (k_min == -1 || ts < ts_min)
{
k_min = k;
ts_min = ts;
}
}
*i = k_min;
*t = ts_min;
}
// посчитать базис
// посчитать вектора
// привести к базису
void Render(Triangle* triangles, vec3 pc, vec3 pv,
int w, int h, double angle, uchar4* data,
vec3 lightPosition, uchar4 lightColor)
{
double dw = 2.0 / (w - 1.0);
double dh = 2.0 / (h - 1.0);
double z = 1.0 / tan(angle * M_PI / 360.0);
// базис, связанный с камерой
vec3 bz = norm(diff(pv, pc));
vec3 bx = norm(prod(bz, { 0.0, 0.0, 1.0 }));
vec3 by = norm(prod(bx, bz));
int size = w * h;
int kmin;
double tmin;
#pragma omp parallel
{
int threadQuantity = omp_get_num_threads(); // кол-во порожд нитей в текущ парал обл
int threadId = omp_get_thread_num(); // номер нити
for (int k = threadId; k < size; k += threadQuantity)
{
int i = k % w;
int j = k / w;
vec3 v = { -1.0 + dw * i, (-1.0 + dh * j) * h / w, z };
// переносим построенный луч в базис с камерой
vec3 dir = norm(mult(bx, by, bz, v));
// мы получили лучик, теперь его надо перенсти на сцену
// рендерим пиксель откуда выпущен луч
RayTracing(triangles, pc, dir, &kmin, &tmin);
if (kmin != -1)
{
// нашли пересечение, знаем цвет пикселя
// вычисляем интесивность диффузной составл
double rr = (double)triangles[kmin].color.x / 255.0;
double gg = (double)triangles[kmin].color.y / 255.0;
double bb = (double)triangles[kmin].color.z / 255.0;
double ri = 0.2, gi = 0.2, bi = 0.2;
// p - точка пересечения луча с ближ треуг сцены
vec3 p = add(pc, mulc(dir, tmin));
vec3 l = diff(lightPosition, p);
vec3 n = prod(diff(triangles[kmin].b, triangles[kmin].a),
diff(triangles[kmin].c, triangles[kmin].a));
double dot_nl = dot(n, l);
if (dot_nl > 0)
{
ri += (lightColor.x / 255.0) * dot_nl / (len(n) * len(l));
gi += (lightColor.y / 255.0) * dot_nl / (len(n) * len(l));
bi += (lightColor.z / 255.0) * dot_nl / (len(n) * len(l));
}
data[(h - 1 - j) * w + i].x = (uchar)(255 * dmin(1.0, ri * rr));
data[(h - 1 - j) * w + i].y = (uchar)(255 * dmin(1.0, gi * gg));
data[(h - 1 - j) * w + i].z = (uchar)(255 * dmin(1.0, bi * bb));
}
else
{
// нет пересечений - черный цвет
data[(h - 1 - j) * w + i] = uchar4{ 0, 0, 0, 0 };
}
}
}
}
__global__
void DeviceRender(Triangle* triangles, vec3 pc, vec3 pv,
int w, int h, double angle, uchar4* data,
vec3 lightPosition, uchar4 lightColor)
{
double pi = acos(-1.0);
int i, j;
double dw = 2.0 / (w - 1.0); // шаг по ширине
double dh = 2.0 / (h - 1.0); // шаг по высоте
double z = 1.0 / tan(angle * pi / 360.0); // глубина, с кот выпускаем лучики
// базис связанный с камерой, там где она будет находиться
vec3 bz = norm(diff(pv, pc));
vec3 bx = norm(prod(bz, { 0.0, 0.0, 1.0 }));
vec3 by = norm(prod(bx, bz));
int kmin;
double tmin;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ofs = blockDim.x * gridDim.x;
while (tid < w * h)
{
i = tid % w;
j = tid / w;
tid += ofs;
// получаем вектор который наход в плоскости x,y
vec3 v = { -1.0 + dw * i, (-1.0 + dh * j) * h / w, z };
// переносим построенный луч в базис с камерой
vec3 dir = norm(mult(bx, by, bz, v));
// рендерим пиксель откуда выпущен луч
RayTracing(triangles, pc, dir, &kmin, &tmin);
if (kmin != -1)
{
// нашли пересечение, знаем цвет пикселя
// вычисляем интесивность диффузной составл
double rr = (double)triangles[kmin].color.x / 255.0;
double gg = (double)triangles[kmin].color.y / 255.0;
double bb = (double)triangles[kmin].color.z / 255.0;
double ri = 0.2, gi = 0.2, bi = 0.2;
// p - точка пересечения луча с ближ треуг сцены
vec3 p = add(pc, mulc(dir, tmin));
vec3 l = diff(lightPosition, p); // направленный луч света - разность между позицией источника света и позицией точки пересечения луча и ближ треуг сцены
vec3 n = prod(diff(triangles[kmin].b, triangles[kmin].a),
diff(triangles[kmin].c, triangles[kmin].a));
double dot_nl = dot(n, l);
if (dot_nl > 0)
{
ri += (lightColor.x / 255.0) * dot_nl / (len(n) * len(l));
gi += (lightColor.y / 255.0) * dot_nl / (len(n) * len(l));
bi += (lightColor.z / 255.0) * dot_nl / (len(n) * len(l));
}
data[(h - 1 - j) * w + i].x = (uchar)(255 * dmin(1.0, ri * rr));
data[(h - 1 - j) * w + i].y = (uchar)(255 * dmin(1.0, gi * gg));
data[(h - 1 - j) * w + i].z = (uchar)(255 * dmin(1.0, bi * bb));
}
else
{
// нет пересечений - черный цвет
data[(h - 1 - j) * w + i] = uchar4{ 0, 0, 0, 0 };
}
}
}
// откуда смотрим
// координаты x y z камеры от момента времени t
vec3 CoordCameraFromTime(double r0c, double z0c, double p0c,
double arc, double azc,
double wrc, double wzc, double wpc,
double prc, double pzc, double t)
{
double r = r0c + arc * sin(wrc * t + prc);
double z = z0c + azc * sin(wzc * t + pzc);
double phi = p0c + wpc * t;
return vec3{ r * cos(phi), r * sin(phi), z };
};
// куда смотрим
// координаты x y z точки взгляда от момента времени t
vec3 CoordViewPointFromTime(double r0n, double z0n, double p0n,
double arn, double azn,
double wrn, double wzn, double wpn,
double prn, double pzn, double t)
{
double r = r0n + arn * sin(wrn * t + prn);
double z = z0n + azn * sin(wzn * t + pzn);
double phi = p0n + wpn * t;
return vec3{ r * cos(phi), r * sin(phi), z };
};
// bcast широковещательная рассылка данных
// процесс н с номером 0 (главный) рассылает сообщение из своего буфера передачи
//всем процессам области связи коммуникатора
// mpi_comm_world - нужен для определения процессов участвующих в коммуникации
void MpiReader(int rank, int& input)
{
if (!rank)
{
std::cin >> input;
}
MPI_Bcast(&input, 1, MPI_INT, 0, MPI_COMM_WORLD);
}
void MpiReader(int rank, double& input)
{
if (!rank)
{
std::cin >> input;
}
MPI_Bcast(&input, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
}
void MpiReader(int rank, std::string& input)
{
int sizeFilename;
if (!rank)
{
std::cin >> input;
sizeFilename = input.size();
}
MPI_Bcast(&sizeFilename, 1, MPI_INT, 0, MPI_COMM_WORLD);
input.resize(sizeFilename);
MPI_Bcast(const_cast<char*>(input.c_str()), sizeFilename, MPI_CHAR, 0, MPI_COMM_WORLD);
}
int main(int argc, char* argv[])
{
int deviceSelection = 0;
if (argc >= 3)
{
printf("argc error\n");
return -1;
}
if (argc == 1)
{
deviceSelection = 1;
}
else if (strcmp(argv[1], "--default") == 0)
{
printf("400 \n");
printf("img_%% d.data \n");
printf("1240 960 100 \n");
printf("7.0 3.0 0.0 2.0 1.0 2.0 6.0 1.0 0.0 0.0 \n");
printf("2.0 0.0 0.0 0.5 0.1 1.0 4.0 1.0 0.0 0.0 \n");
printf("-2 -2 0 2 200 0 0 \n");
printf("-2 2 0 2 0 255 0 \n");
printf("2 0 0 2 0 0 255 \n");
printf("-4 -4 -1 -4 4 -1 4 -4 -1 4 4 -1 102 62 0 \n");
return 0;
}
else if (strcmp(argv[1], "--gpu") == 0)
{
deviceSelection = 1;
}
else if (strcmp(argv[1], "--cpu") == 0)
{
deviceSelection = 0;
}
int procRank, numberOfProcs;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numberOfProcs); // Функция определения числа процессов в области связи
MPI_Comm_rank(MPI_COMM_WORLD, &procRank); // Функция определения номера процесса
int deviceCnt;
cudaGetDeviceCount(&deviceCnt); // возвращ кол-во вычисл устройств
cudaSetDevice(procRank % deviceCnt); // устанавливаем устройство, которое будет использ для вычисл на гпу
int n, w, h;
double a = 100;
std::string path;
// параметры движения камеры
// параметры движения точки взгляда
double r0c, z0c, p0c, arc, azc, wrc, wzc, wpc, prc, pzc,
r0n, z0n, p0n, arn, azn, wrn, wzn, wpn, prn, pzn;
double r1 = 2, r2 = 2, r3 = 2;
vec3 o1 = { -2, -2, 0 };
vec3 o2 = { -2, 2, 0 };
vec3 o3 = { 2, 0, 0 };
int c1x, c1y, c1z;
uchar4 c1 = { 200, 0, 0 };
int c2x, c2y, c2z;
uchar4 c2 = { 0, 255, 0 };
int c3x, c3y, c3z;
uchar4 c3 = { 0, 0, 255 };
vec3 fv[4];
fv[0] = { -5, -5, -3 };
fv[1] = { -5, 5, -3 };
fv[2] = { 5, -5, -3 };
fv[3] = { 5, 5, -3 };
int fcx, fcy, fcz;
uchar4 fc = { 102, 62, 0 };
// :)
MpiReader(procRank, n); // количество кадров
MpiReader(procRank, path); // путь к выходным изображениям
// разрешение кадра и угол обзора
MpiReader(procRank, w); MpiReader(procRank, h); MpiReader(procRank, a);
// параметры движения камеры
MpiReader(procRank, r0c); MpiReader(procRank, z0c); MpiReader(procRank, p0c); MpiReader(procRank, arc); MpiReader(procRank, azc); MpiReader(procRank, wrc); MpiReader(procRank, wzc); MpiReader(procRank, wpc); MpiReader(procRank, prc); MpiReader(procRank, pzc);
MpiReader(procRank, r0n); MpiReader(procRank, z0n); MpiReader(procRank, p0n); MpiReader(procRank, arn); MpiReader(procRank, azn); MpiReader(procRank, wrn); MpiReader(procRank, wzn); MpiReader(procRank, wpn); MpiReader(procRank, prn); MpiReader(procRank, pzn);
MpiReader(procRank, o1.x); MpiReader(procRank, o1.y); MpiReader(procRank, o1.z); MpiReader(procRank, r1); MpiReader(procRank, c1x); MpiReader(procRank, c1y); MpiReader(procRank, c1z);
MpiReader(procRank, o2.x); MpiReader(procRank, o2.y); MpiReader(procRank, o2.z); MpiReader(procRank, r2); MpiReader(procRank, c2x); MpiReader(procRank, c2y); MpiReader(procRank, c2z);
MpiReader(procRank, o3.x); MpiReader(procRank, o3.y); MpiReader(procRank, o3.z); MpiReader(procRank, r3); MpiReader(procRank, c3x); MpiReader(procRank, c3y); MpiReader(procRank, c3z);
MpiReader(procRank, fv[0].x); MpiReader(procRank, fv[0].y); MpiReader(procRank, fv[0].z); MpiReader(procRank, fv[1].x); MpiReader(procRank, fv[1].y); MpiReader(procRank, fv[1].z);
MpiReader(procRank, fv[2].x); MpiReader(procRank, fv[2].y); MpiReader(procRank, fv[2].z); MpiReader(procRank, fv[3].x); MpiReader(procRank, fv[3].y); MpiReader(procRank, fv[3].z);
MpiReader(procRank, fcx); MpiReader(procRank, fcy); MpiReader(procRank, fcz);
c1.x = c1x;
c1.y = c1y;
c1.z = c1z;
c2.x = c2x;
c2.y = c2y;
c2.z = c2z;
c3.x = c3x;
c3.y = c3y;
c3.z = c3z;
fc.x = fcx;
fc.y = fcy;
fc.z = fcz;
char buff[256];
// буфер под рендеры (под нашу картинку)
uchar4* data = (uchar4*)malloc(sizeof(uchar4) * w * h);
vec3 pc, pv;
// крафтим триангулированную сцену
Triangle triangles[42];
BuildStage(triangles, r1, o1, c1, r2, o2, c2, r3, o3, c3, fv, fc);
// шаг по времени
double dt = 2 * M_PI / (double)n;
// положение источника света
vec3 lightPosition = { -2, 0, 4 };
// цвет источника света
uchar4 lightColor = { 255, 255, 255 };
//float sharedTime = 0;
double timeStart;
if (!procRank)
{
timeStart = MPI_Wtime();
}
if (deviceSelection == 0)
{
double cpuTime;
for (int k = procRank; k < n; k += numberOfProcs)
{
pc = CoordCameraFromTime(r0c, z0c, p0c, arc, azc, wrc, wzc, wpc, prc, pzc, k * dt);
pv = CoordViewPointFromTime(r0n, z0n, p0n, arn, azn, wrn, wzn, wpn, prn, pzn, k * dt);
auto start = steady_clock::now();
// рендерим
Render(triangles, pc, pv, w, h, a, data, lightPosition, lightColor);
auto end = steady_clock::now();
cpuTime = ((double)duration_cast<microseconds>(end - start).count()) / 1000.0;
// печать картинки в файл
sprintf(buff, path.c_str(), k);
printf("%d: %s %e ms\n", k, buff, cpuTime);
//sharedTime += cpuTime;
FILE* out = fopen(buff, "wb");
fwrite(&w, sizeof(int), 1, out);
fwrite(&h, sizeof(int), 1, out);
fwrite(data, sizeof(uchar4), w * h, out);
fclose(out);
}
//printf("All time: %e ms\n", sharedTime);
}
else
{
float deviceTime = 0;
cudaEvent_t start, stop;
CSC(cudaEventCreate(&start));
CSC(cudaEventCreate(&stop));
Triangle* deviceTriangles;
// копируем сцену в память гпу
CSC(cudaMalloc((void**)(&deviceTriangles), 42 * sizeof(Triangle)));
CSC(cudaMemcpy(deviceTriangles, triangles, 42 * sizeof(Triangle), cudaMemcpyHostToDevice));
uchar4* deviceData;
CSC(cudaMalloc((void**)(&deviceData), w * h * sizeof(uchar4)));
for (int k = procRank; k < n; k += numberOfProcs)
{
pc = CoordCameraFromTime(r0c, z0c, p0c, arc, azc, wrc, wzc, wpc, prc, pzc, k * dt);
pv = CoordViewPointFromTime(r0n, z0n, p0n, arn, azn, wrn, wzn, wpn, prn, pzn, k * dt);
CSC(cudaEventRecord(start));
DeviceRender<<<128, 128>>>(deviceTriangles, pc, pv, w, h, a, deviceData, lightPosition, lightColor);
CSC(cudaGetLastError());
CSC(cudaEventRecord(stop));
CSC(cudaEventSynchronize(stop));
CSC(cudaEventElapsedTime(&deviceTime, start, stop));
CSC(cudaMemcpy(data, deviceData, w * h * sizeof(uchar4), cudaMemcpyDeviceToHost));
sprintf(buff, path.c_str(), k);
printf("%d: %s %e ms\n", k, buff, deviceTime);
//sharedTime += deviceTime;
FILE* out = fopen(buff, "wb");
fwrite(&w, sizeof(int), 1, out);
fwrite(&h, sizeof(int), 1, out);
fwrite(data, sizeof(uchar4), w * h, out);
fclose(out);
}
//printf("All time: %e ms\n", sharedTime);
CSC(cudaEventDestroy(start));
CSC(cudaEventDestroy(stop));
CSC(cudaFree(deviceTriangles));
CSC(cudaFree(deviceData));
}
free(data);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
double timeEnd;
if (!procRank)
{
timeEnd = MPI_Wtime();
std::cout << "TIME: ";
std::cout << (timeEnd - timeStart) * 1000.0 << "ms" << std::endl;
}
return 0;
} |
bb7ee6c46fe37c72b59c685959087b53f7382757.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, true, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, float, true, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, true, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, true, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, true, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, float, true, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, true, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, true, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, true, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, float, true, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, true, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, true, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, float, true, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, float, true, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, float, true, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, float, true, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
| bb7ee6c46fe37c72b59c685959087b53f7382757.cu | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, true, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, float, true, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, true, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, true, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, true, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, float, true, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, true, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, true, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, true, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, float, true, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, true, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, true, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, float, true, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, float, true, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, float, true, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, float, true, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
|
70d69172b088daaa903ad110f6e231c6a48d4b69.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "resizeGPU.cuh"
//#define _DEBUG
#define BLOCK_DIM 64
#define threadNum 1024
#define WARP_SIZE 32
#define elemsPerThread 1
int32_t* deviceDataResized = NULL;
int32_t* deviceData = NULL;
int32_t* hostOriginalImage = NULL;
int32_t* hostResizedImage = NULL;
void reAllocPinned(int w, int h, int w2, int h2, int32_t* dataSource)
{
hipHostMalloc((void**)&hostOriginalImage, w*h* sizeof(int32_t)); // host pinned
hipHostMalloc((void**)&hostResizedImage, w2*h2 * sizeof(int32_t)); // host pinned
memcpy(hostOriginalImage, dataSource, w*h * sizeof(int32_t));
return;
}
void freePinned()
{
hipHostFree(hostOriginalImage);
hipHostFree(hostResizedImage);
return;
}
void initGPU(const int maxResolutionX, const int maxResolutionY)
{
hipMalloc((void**)&deviceDataResized, maxResolutionX*maxResolutionY * sizeof(int32_t));
hipMalloc((void**)&deviceData, maxResolutionX*maxResolutionY * sizeof(int32_t));
return;
}
void deinitGPU()
{
hipFree(deviceData);
hipFree(deviceDataResized);
return;
}
__global__ void SomeKernel(int32_t* originalImage, int32_t* resizedImage, int w, int h, int w2, int h2/*, float x_ratio, float y_ratio*/)
{
__shared__ int32_t tile[1024];
const float x_ratio = ((float)(w)) / w2;
const float y_ratio = ((float)(h)) / h2;
//const int blockbx = blockIdx.y * w2 + blockIdx.x*BLOCK_DIM;
//unsigned int threadId = blockIdx.x * threadNum*elemsPerThread + threadIdx.x;
unsigned int threadId = blockIdx.x * threadNum*elemsPerThread + threadIdx.x*elemsPerThread;
//__shared__ float result[threadNum*elemsPerThread];
unsigned int shift = 0;
//int32_t a, b, c, d, x, y, index;
while((threadId < w2*h2 && shift<elemsPerThread))
{
const int32_t i = threadId / w2;
const int32_t j = threadId - (i * w2);
//float x_diff, y_diff, blue, red, green;
const int32_t x = (int)((x_ratio * (j + 0.5f)) - 0.5f);
const int32_t y = (int)((y_ratio * (i + 0.5f)) - 0.5f);
const float x_diff = ((x_ratio * (j + 0.5f)) - 0.5f) - x;
const float y_diff = ((y_ratio * (i + 0.5f)) - 0.5f) - y;
const int32_t index = (y*w + x);
const int32_t a = originalImage[index];
const int32_t b = originalImage[index + 1];
const int32_t c = originalImage[index + w];
const int32_t d = originalImage[index + w + 1];
// blue element
// Yb = Ab(1-w)(1-h) + Bb(w)(1-h) + Cb(h)(1-w) + Db(wh)
const float blue = (a & 0xff)*(1 - x_diff)*(1 - y_diff) + (b & 0xff)*(x_diff)*(1 - y_diff) +
(c & 0xff)*(y_diff)*(1 - x_diff) + (d & 0xff)*(x_diff*y_diff);
// green element
// Yg = Ag(1-w)(1-h) + Bg(w)(1-h) + Cg(h)(1-w) + Dg(wh)
const float green = ((a >> 8) & 0xff)*(1 - x_diff)*(1 - y_diff) + ((b >> 8) & 0xff)*(x_diff)*(1 - y_diff) +
((c >> 8) & 0xff)*(y_diff)*(1 - x_diff) + ((d >> 8) & 0xff)*(x_diff*y_diff);
// red element
// Yr = Ar(1-w)(1-h) + Br(w)(1-h) + Cr(h)(1-w) + Dr(wh)
const float red = ((a >> 16) & 0xff)*(1 - x_diff)*(1 - y_diff) + ((b >> 16) & 0xff)*(x_diff)*(1 - y_diff) +
((c >> 16) & 0xff)*(y_diff)*(1 - x_diff) + ((d >> 16) & 0xff)*(x_diff*y_diff);
/*
resizedImage[threadId] =
0xff000000 |
((((int32_t)red) << 16) & 0xff0000) |
((((int32_t)green) << 8) & 0xff00) |
((int32_t)blue);
*/
tile[threadIdx.x] =
0xff000000 |
((((int32_t)red) << 16) & 0xff0000) |
((((int32_t)green) << 8) & 0xff00) |
((int32_t)blue);
threadId++;
//threadId+= WARP_SIZE;
shift++;
}
__syncthreads();
threadId = blockIdx.x * threadNum*elemsPerThread + threadIdx.x*elemsPerThread;
resizedImage[threadId] = tile[threadIdx.x];
/*
shift--;
threadId = blockIdx.x * threadNum*elemsPerThread + threadIdx.x*elemsPerThread+ shift;
while (shift >= 0)
{
resizedImage[threadId] = tile[shift];
shift--;
threadId--;
}
*/
}
int32_t* resizeBilinear_gpu(int w, int h, int w2, int h2)
{
#ifdef _DEBUG
hipError_t error; //store cuda error codes
#endif
int length = w2 * h2;
hipMemcpy(deviceData, hostOriginalImage, w*h * sizeof(int32_t), hipMemcpyHostToDevice);
//hipMemcpy2D(deviceData, w * sizeof(int32_t), hostOriginalImage, w * sizeof(int32_t), w * sizeof(int32_t), h, hipMemcpyHostToDevice);
//error = hipMemcpyToSymbol(deviceData, pixels, w*h * sizeof(int32_t),0, hipMemcpyHostToDevice);
#ifdef _DEBUG
if (error != hipSuccess)
{
printf("hipMemcpy (pixels->deviceData), returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
#endif
dim3 threads = dim3(threadNum, 1,1); //block size 32,32,x
dim3 blocks = dim3(w2*h2/ threadNum*elemsPerThread, 1,1);
//printf("Blockdim.x %d\n", blocks.x);
//printf("thrdim.x %d\n", threads.x);
SomeKernel << <blocks, threads >> >(deviceData, deviceDataResized, w, h, w2, h2/*, x_ratio, y_ratio*/);
hipDeviceSynchronize();
hipMemcpy(hostResizedImage, deviceDataResized, length * sizeof(int32_t), hipMemcpyDeviceToHost);
return hostResizedImage;
}
| 70d69172b088daaa903ad110f6e231c6a48d4b69.cu |
#include "resizeGPU.cuh"
//#define _DEBUG
#define BLOCK_DIM 64
#define threadNum 1024
#define WARP_SIZE 32
#define elemsPerThread 1
int32_t* deviceDataResized = NULL;
int32_t* deviceData = NULL;
int32_t* hostOriginalImage = NULL;
int32_t* hostResizedImage = NULL;
void reAllocPinned(int w, int h, int w2, int h2, int32_t* dataSource)
{
cudaMallocHost((void**)&hostOriginalImage, w*h* sizeof(int32_t)); // host pinned
cudaMallocHost((void**)&hostResizedImage, w2*h2 * sizeof(int32_t)); // host pinned
memcpy(hostOriginalImage, dataSource, w*h * sizeof(int32_t));
return;
}
void freePinned()
{
cudaFreeHost(hostOriginalImage);
cudaFreeHost(hostResizedImage);
return;
}
void initGPU(const int maxResolutionX, const int maxResolutionY)
{
cudaMalloc((void**)&deviceDataResized, maxResolutionX*maxResolutionY * sizeof(int32_t));
cudaMalloc((void**)&deviceData, maxResolutionX*maxResolutionY * sizeof(int32_t));
return;
}
void deinitGPU()
{
cudaFree(deviceData);
cudaFree(deviceDataResized);
return;
}
__global__ void SomeKernel(int32_t* originalImage, int32_t* resizedImage, int w, int h, int w2, int h2/*, float x_ratio, float y_ratio*/)
{
__shared__ int32_t tile[1024];
const float x_ratio = ((float)(w)) / w2;
const float y_ratio = ((float)(h)) / h2;
//const int blockbx = blockIdx.y * w2 + blockIdx.x*BLOCK_DIM;
//unsigned int threadId = blockIdx.x * threadNum*elemsPerThread + threadIdx.x;
unsigned int threadId = blockIdx.x * threadNum*elemsPerThread + threadIdx.x*elemsPerThread;
//__shared__ float result[threadNum*elemsPerThread];
unsigned int shift = 0;
//int32_t a, b, c, d, x, y, index;
while((threadId < w2*h2 && shift<elemsPerThread))
{
const int32_t i = threadId / w2;
const int32_t j = threadId - (i * w2);
//float x_diff, y_diff, blue, red, green;
const int32_t x = (int)((x_ratio * (j + 0.5f)) - 0.5f);
const int32_t y = (int)((y_ratio * (i + 0.5f)) - 0.5f);
const float x_diff = ((x_ratio * (j + 0.5f)) - 0.5f) - x;
const float y_diff = ((y_ratio * (i + 0.5f)) - 0.5f) - y;
const int32_t index = (y*w + x);
const int32_t a = originalImage[index];
const int32_t b = originalImage[index + 1];
const int32_t c = originalImage[index + w];
const int32_t d = originalImage[index + w + 1];
// blue element
// Yb = Ab(1-w)(1-h) + Bb(w)(1-h) + Cb(h)(1-w) + Db(wh)
const float blue = (a & 0xff)*(1 - x_diff)*(1 - y_diff) + (b & 0xff)*(x_diff)*(1 - y_diff) +
(c & 0xff)*(y_diff)*(1 - x_diff) + (d & 0xff)*(x_diff*y_diff);
// green element
// Yg = Ag(1-w)(1-h) + Bg(w)(1-h) + Cg(h)(1-w) + Dg(wh)
const float green = ((a >> 8) & 0xff)*(1 - x_diff)*(1 - y_diff) + ((b >> 8) & 0xff)*(x_diff)*(1 - y_diff) +
((c >> 8) & 0xff)*(y_diff)*(1 - x_diff) + ((d >> 8) & 0xff)*(x_diff*y_diff);
// red element
// Yr = Ar(1-w)(1-h) + Br(w)(1-h) + Cr(h)(1-w) + Dr(wh)
const float red = ((a >> 16) & 0xff)*(1 - x_diff)*(1 - y_diff) + ((b >> 16) & 0xff)*(x_diff)*(1 - y_diff) +
((c >> 16) & 0xff)*(y_diff)*(1 - x_diff) + ((d >> 16) & 0xff)*(x_diff*y_diff);
/*
resizedImage[threadId] =
0xff000000 |
((((int32_t)red) << 16) & 0xff0000) |
((((int32_t)green) << 8) & 0xff00) |
((int32_t)blue);
*/
tile[threadIdx.x] =
0xff000000 |
((((int32_t)red) << 16) & 0xff0000) |
((((int32_t)green) << 8) & 0xff00) |
((int32_t)blue);
threadId++;
//threadId+= WARP_SIZE;
shift++;
}
__syncthreads();
threadId = blockIdx.x * threadNum*elemsPerThread + threadIdx.x*elemsPerThread;
resizedImage[threadId] = tile[threadIdx.x];
/*
shift--;
threadId = blockIdx.x * threadNum*elemsPerThread + threadIdx.x*elemsPerThread+ shift;
while (shift >= 0)
{
resizedImage[threadId] = tile[shift];
shift--;
threadId--;
}
*/
}
int32_t* resizeBilinear_gpu(int w, int h, int w2, int h2)
{
#ifdef _DEBUG
cudaError_t error; //store cuda error codes
#endif
int length = w2 * h2;
cudaMemcpy(deviceData, hostOriginalImage, w*h * sizeof(int32_t), cudaMemcpyHostToDevice);
//cudaMemcpy2D(deviceData, w * sizeof(int32_t), hostOriginalImage, w * sizeof(int32_t), w * sizeof(int32_t), h, cudaMemcpyHostToDevice);
//error = cudaMemcpyToSymbol(deviceData, pixels, w*h * sizeof(int32_t),0, cudaMemcpyHostToDevice);
#ifdef _DEBUG
if (error != cudaSuccess)
{
printf("cudaMemcpy (pixels->deviceData), returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
#endif
dim3 threads = dim3(threadNum, 1,1); //block size 32,32,x
dim3 blocks = dim3(w2*h2/ threadNum*elemsPerThread, 1,1);
//printf("Blockdim.x %d\n", blocks.x);
//printf("thrdim.x %d\n", threads.x);
SomeKernel << <blocks, threads >> >(deviceData, deviceDataResized, w, h, w2, h2/*, x_ratio, y_ratio*/);
cudaDeviceSynchronize();
cudaMemcpy(hostResizedImage, deviceDataResized, length * sizeof(int32_t), cudaMemcpyDeviceToHost);
return hostResizedImage;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.