hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
CudaReproject.hip | // !!! This is a file automatically generated by hipify!!!
#include "opencv2/core.hpp"
#include "opencv2/core/cuda.hpp"
#include "opencv2/core/cuda_stream_accessor.hpp"
#include "opencv2/core/cuda/common.hpp"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/device_functions.h"
texture<uchar4, 2> srcTexture;
texture<float, 2> xmapTexture, ymapTexture, weightTexture;
template<typename DstElemType>
__global__ void reprojectNearestNeighborKernel(unsigned char* dstData,
int dstWidth, int dstHeight, int dstStep, int srcWidth, int srcHeight)
{
int dstx = threadIdx.x + blockIdx.x * blockDim.x;
int dsty = threadIdx.y + blockIdx.y * blockDim.y;
int srcx = tex2D(xmapTexture, dstx, dsty);
int srcy = tex2D(ymapTexture, dstx, dsty);
//unsigned char* ptrDst = dstData + dsty * dstStep + dstx * 4;
DstElemType* ptrDst = (DstElemType*)(dstData + dsty * dstStep) + dstx * 4;
if (srcx < 0 || srcx >= srcWidth || srcy < 0 || srcy >= srcHeight)
ptrDst[3] = ptrDst[2] = ptrDst[1] = ptrDst[0] = 0;
else
{
uchar4 val = tex2D(srcTexture, srcx, srcy);
ptrDst[0] = val.x;
ptrDst[1] = val.y;
ptrDst[2] = val.z;
ptrDst[3] = val.w;
}
}
const int BILINEAR_INTER_SHIFT = 10;
const int BILINEAR_INTER_BACK_SHIFT = BILINEAR_INTER_SHIFT * 2;
const int BILINEAR_UNIT = 1 << BILINEAR_INTER_SHIFT;
template<typename DstElemType>
__global__ void reprojectLinearKernel(unsigned char* dstData,
int dstWidth, int dstHeight, int dstStep, int srcWidth, int srcHeight)
{
int dstx = threadIdx.x + blockIdx.x * blockDim.x;
int dsty = threadIdx.y + blockIdx.y * blockDim.y;
float srcx = tex2D(xmapTexture, dstx, dsty);
float srcy = tex2D(ymapTexture, dstx, dsty);
//unsigned char* ptrDst = dstData + dsty * dstStep + dstx * 4;
DstElemType* ptrDst = (DstElemType*)(dstData + dsty * dstStep) + dstx * 4;
if (srcx < 0 || srcx >= srcWidth || srcy < 0 || srcy >= srcHeight)
ptrDst[3] = ptrDst[2] = ptrDst[1] = ptrDst[0] = 0;
else
{
int x0 = srcx, y0 = srcy;
int x1 = x0 + (x0 < srcWidth - 1), y1 = y0 + (y0 < srcHeight - 1);
int deltax0 = (srcx - x0) * BILINEAR_UNIT, deltax1 = BILINEAR_UNIT - deltax0;
int deltay0 = (srcy - y0) * BILINEAR_UNIT, deltay1 = BILINEAR_UNIT - deltay0;
int b = 0, g = 0, r = 0, w = 0;
uchar4 val;
val = tex2D(srcTexture, x0, y0);
w = deltax1 * deltay1;
b += val.x * w;
g += val.y * w;
r += val.z * w;
val = tex2D(srcTexture, x1, y0);
w = deltax0 * deltay1;
b += val.x * w;
g += val.y * w;
r += val.z * w;
val = tex2D(srcTexture, x0, y1);
w = deltax1 * deltay0;
b += val.x * w;
g += val.y * w;
r += val.z * w;
val = tex2D(srcTexture, x1, y1);
w = deltax0 * deltay0;
b += val.x * w;
g += val.y * w;
r += val.z * w;
ptrDst[0] = b >> BILINEAR_INTER_BACK_SHIFT;
ptrDst[1] = g >> BILINEAR_INTER_BACK_SHIFT;
ptrDst[2] = r >> BILINEAR_INTER_BACK_SHIFT;
ptrDst[3] = 0;
}
}
__device__ __forceinline__ unsigned char bicubic(const unsigned char rgb[4], const float w[4])
{
int res = (int)(rgb[0] * w[0] + rgb[1] * w[1] + rgb[2] * w[2] + rgb[3] * w[3] + 0.5);
res = res > 255 ? 255 : res;
res = res < 0 ? 0 : res;
return (unsigned char)res;
}
__device__ __forceinline__ void calcWeight(float deta, float weight[4])
{
weight[3] = (deta * deta * (-1.0F + deta));
weight[2] = (deta * (1.0F + deta * (1.0F - deta)));
weight[1] = (1.0F + deta * deta * (-2.0F + deta)) ;
weight[0] = (-deta * (1.0F + deta * (-2.0F + deta))) ;
}
template<typename DstElemType>
__device__ __forceinline__ void resampling(int width, int height,
float x, float y, DstElemType result[4])
{
int x2 = (int)x;
int y2 = (int)y;
int nx[4];
int ny[4];
for (int i = 0; i < 4;++i)
{
nx[i] = (x2 - 1 + i);
ny[i] = (y2 - 1 + i);
if (nx[i] < 0) nx[i] = 0;
if (nx[i] > width - 1) nx[i] = width - 1;
if (ny[i] < 0) ny[i] = 0;
if (ny[i] > height - 1) ny[i] = height - 1;
}
float u = (x - nx[1]);
float v = (y - ny[1]);
//u,v vertical while /100 horizontal
float tweight1[4], tweight2[4];
calcWeight(u, tweight1);//weight
calcWeight(v, tweight2);//weight
uchar4 val[4][4];
for (int j = 0; j < 4; j++)
{
for (int i = 0; i < 4; i++)
val[j][i] = tex2D(srcTexture, nx[i], ny[j]);
}
unsigned char* ptrVal = &val[0][0].x;
unsigned char temp0[4], temp1[4];
for (int k = 0; k < 3; ++k)
{
for (int j = 0; j < 4; j++)
{
//
for (int i = 0; i < 4; i++)
{
temp0[i] = ptrVal[j * 16 + i * 4 + k];
}
//4*4
temp1[j] = bicubic(temp0, tweight1);
}
result[k] = bicubic(temp1, tweight2);
}
result[3] = 0;
}
template<typename DstElemType>
__global__ void reprojectCubicKernel(unsigned char* dstData,
int dstWidth, int dstHeight, int dstStep, int srcWidth, int srcHeight)
{
int dstx = threadIdx.x + blockIdx.x * blockDim.x;
int dsty = threadIdx.y + blockIdx.y * blockDim.y;
float srcx = tex2D(xmapTexture, dstx, dsty);
float srcy = tex2D(ymapTexture, dstx, dsty);
//unsigned char* ptrDst = dstData + dsty * dstStep + dstx * 4;
DstElemType* ptrDst = (DstElemType*)(dstData + dsty * dstStep) + dstx * 4;
if (srcx < 0 || srcx >= srcWidth || srcy < 0 || srcy >= srcHeight)
ptrDst[3] = ptrDst[2] = ptrDst[1] = ptrDst[0] = 0;
else
resampling(srcWidth, srcHeight, srcx, srcy, ptrDst);
}
__global__ void reprojectWeightedAccumulate(unsigned char* dstData,
int dstWidth, int dstHeight, int dstStep, int srcWidth, int srcHeight)
{
int dstx = threadIdx.x + blockIdx.x * blockDim.x;
int dsty = threadIdx.y + blockIdx.y * blockDim.y;
float srcx = tex2D(xmapTexture, dstx, dsty);
float srcy = tex2D(ymapTexture, dstx, dsty);
if (srcx < 0 || srcx >= srcWidth || srcy < 0 || srcy >= srcHeight)
;
else
{
float temp[4];
float w = tex2D(weightTexture, dstx, dsty);
resampling(srcWidth, srcHeight, srcx, srcy, temp);
float* ptrDst = (float*)(dstData + dsty * dstStep) + dstx * 4;
ptrDst[0] += temp[0] * w;
ptrDst[1] += temp[1] * w;
ptrDst[2] += temp[2] * w;
ptrDst[3] = 0;
}
}
void cudaReproject(const cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst,
const cv::cuda::GpuMat& xmap, const cv::cuda::GpuMat& ymap, cv::cuda::Stream& stream)
{
CV_Assert(src.data && src.type() == CV_8UC4 &&
xmap.data && xmap.type() == CV_32FC1 && ymap.data && ymap.type() == CV_32FC1 &&
xmap.size() == ymap.size());
cv::Size dstSize = xmap.size();
dst.create(dstSize, CV_8UC4);
hipChannelFormatDesc chanDescUchar4 = hipCreateChannelDesc<uchar4>();
hipChannelFormatDesc chanDescFloat = hipCreateChannelDesc<float>();
cudaSafeCall(hipBindTexture2D(NULL, srcTexture, src.data, chanDescUchar4, src.cols, src.rows, src.step));
cudaSafeCall(hipBindTexture2D(NULL, xmapTexture, xmap.data, chanDescFloat, xmap.cols, xmap.rows, xmap.step));
cudaSafeCall(hipBindTexture2D(NULL, ymapTexture, ymap.data, chanDescFloat, ymap.cols, ymap.rows, ymap.step));
hipStream_t st = cv::cuda::StreamAccessor::getStream(stream);
dim3 block(16, 16);
dim3 grid((dstSize.width + block.x - 1) / block.x, (dstSize.height + block.y - 1) / block.y);
hipLaunchKernelGGL(( reprojectCubicKernel<unsigned char>), dim3(grid), dim3(block), 0, st, dst.data, dstSize.width, dstSize.height, dst.step, src.cols, src.rows);
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipUnbindTexture(srcTexture));
cudaSafeCall(hipUnbindTexture(xmapTexture));
cudaSafeCall(hipUnbindTexture(ymapTexture));
//cudaSafeCall(hipDeviceSynchronize());
}
void cudaReprojectTo16S(const cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst,
const cv::cuda::GpuMat& xmap, const cv::cuda::GpuMat& ymap, cv::cuda::Stream& stream)
{
CV_Assert(src.data && src.type() == CV_8UC4 &&
xmap.data && xmap.type() == CV_32FC1 && ymap.data && ymap.type() == CV_32FC1 &&
xmap.size() == ymap.size());
cv::Size dstSize = xmap.size();
dst.create(dstSize, CV_16SC4);
hipChannelFormatDesc chanDescUchar4 = hipCreateChannelDesc<uchar4>();
hipChannelFormatDesc chanDescFloat = hipCreateChannelDesc<float>();
cudaSafeCall(hipBindTexture2D(NULL, srcTexture, src.data, chanDescUchar4, src.cols, src.rows, src.step));
cudaSafeCall(hipBindTexture2D(NULL, xmapTexture, xmap.data, chanDescFloat, xmap.cols, xmap.rows, xmap.step));
cudaSafeCall(hipBindTexture2D(NULL, ymapTexture, ymap.data, chanDescFloat, ymap.cols, ymap.rows, ymap.step));
hipStream_t st = cv::cuda::StreamAccessor::getStream(stream);
dim3 block(16, 16);
dim3 grid((dstSize.width + block.x - 1) / block.x, (dstSize.height + block.y - 1) / block.y);
hipLaunchKernelGGL(( reprojectCubicKernel<short>), dim3(grid), dim3(block), 0, st, dst.data, dstSize.width, dstSize.height, dst.step, src.cols, src.rows);
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipUnbindTexture(srcTexture));
cudaSafeCall(hipUnbindTexture(xmapTexture));
cudaSafeCall(hipUnbindTexture(ymapTexture));
//cudaSafeCall(hipDeviceSynchronize());
}
void cudaReprojectWeightedAccumulateTo32F(const cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst,
const cv::cuda::GpuMat& xmap, const cv::cuda::GpuMat& ymap, const cv::cuda::GpuMat& weight,
cv::cuda::Stream& stream)
{
CV_Assert(src.data && src.type() == CV_8UC4 &&
xmap.data && xmap.type() == CV_32FC1 && ymap.data && ymap.type() == CV_32FC1 &&
weight.data && weight.type() == CV_32FC1 &&
xmap.size() == ymap.size() && xmap.size() == weight.size());
cv::Size dstSize = xmap.size();
dst.create(dstSize, CV_32FC4);
hipChannelFormatDesc chanDescUchar4 = hipCreateChannelDesc<uchar4>();
hipChannelFormatDesc chanDescFloat = hipCreateChannelDesc<float>();
cudaSafeCall(hipBindTexture2D(NULL, srcTexture, src.data, chanDescUchar4, src.cols, src.rows, src.step));
cudaSafeCall(hipBindTexture2D(NULL, xmapTexture, xmap.data, chanDescFloat, xmap.cols, xmap.rows, xmap.step));
cudaSafeCall(hipBindTexture2D(NULL, ymapTexture, ymap.data, chanDescFloat, ymap.cols, ymap.rows, ymap.step));
cudaSafeCall(hipBindTexture2D(NULL, weightTexture, weight.data, chanDescFloat, weight.cols, weight.rows, weight.step));
hipStream_t st = cv::cuda::StreamAccessor::getStream(stream);
dim3 block(16, 16);
dim3 grid((dstSize.width + block.x - 1) / block.x, (dstSize.height + block.y - 1) / block.y);
hipLaunchKernelGGL(( reprojectWeightedAccumulate), dim3(grid), dim3(block), 0, st, dst.data, dstSize.width, dstSize.height, dst.step, src.cols, src.rows);
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipUnbindTexture(srcTexture));
cudaSafeCall(hipUnbindTexture(xmapTexture));
cudaSafeCall(hipUnbindTexture(ymapTexture));
cudaSafeCall(hipUnbindTexture(weightTexture));
}
| CudaReproject.cu | #include "opencv2/core.hpp"
#include "opencv2/core/cuda.hpp"
#include "opencv2/core/cuda_stream_accessor.hpp"
#include "opencv2/core/cuda/common.hpp"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "device_functions.h"
texture<uchar4, 2> srcTexture;
texture<float, 2> xmapTexture, ymapTexture, weightTexture;
template<typename DstElemType>
__global__ void reprojectNearestNeighborKernel(unsigned char* dstData,
int dstWidth, int dstHeight, int dstStep, int srcWidth, int srcHeight)
{
int dstx = threadIdx.x + blockIdx.x * blockDim.x;
int dsty = threadIdx.y + blockIdx.y * blockDim.y;
int srcx = tex2D(xmapTexture, dstx, dsty);
int srcy = tex2D(ymapTexture, dstx, dsty);
//unsigned char* ptrDst = dstData + dsty * dstStep + dstx * 4;
DstElemType* ptrDst = (DstElemType*)(dstData + dsty * dstStep) + dstx * 4;
if (srcx < 0 || srcx >= srcWidth || srcy < 0 || srcy >= srcHeight)
ptrDst[3] = ptrDst[2] = ptrDst[1] = ptrDst[0] = 0;
else
{
uchar4 val = tex2D(srcTexture, srcx, srcy);
ptrDst[0] = val.x;
ptrDst[1] = val.y;
ptrDst[2] = val.z;
ptrDst[3] = val.w;
}
}
const int BILINEAR_INTER_SHIFT = 10;
const int BILINEAR_INTER_BACK_SHIFT = BILINEAR_INTER_SHIFT * 2;
const int BILINEAR_UNIT = 1 << BILINEAR_INTER_SHIFT;
template<typename DstElemType>
__global__ void reprojectLinearKernel(unsigned char* dstData,
int dstWidth, int dstHeight, int dstStep, int srcWidth, int srcHeight)
{
int dstx = threadIdx.x + blockIdx.x * blockDim.x;
int dsty = threadIdx.y + blockIdx.y * blockDim.y;
float srcx = tex2D(xmapTexture, dstx, dsty);
float srcy = tex2D(ymapTexture, dstx, dsty);
//unsigned char* ptrDst = dstData + dsty * dstStep + dstx * 4;
DstElemType* ptrDst = (DstElemType*)(dstData + dsty * dstStep) + dstx * 4;
if (srcx < 0 || srcx >= srcWidth || srcy < 0 || srcy >= srcHeight)
ptrDst[3] = ptrDst[2] = ptrDst[1] = ptrDst[0] = 0;
else
{
int x0 = srcx, y0 = srcy;
int x1 = x0 + (x0 < srcWidth - 1), y1 = y0 + (y0 < srcHeight - 1);
int deltax0 = (srcx - x0) * BILINEAR_UNIT, deltax1 = BILINEAR_UNIT - deltax0;
int deltay0 = (srcy - y0) * BILINEAR_UNIT, deltay1 = BILINEAR_UNIT - deltay0;
int b = 0, g = 0, r = 0, w = 0;
uchar4 val;
val = tex2D(srcTexture, x0, y0);
w = deltax1 * deltay1;
b += val.x * w;
g += val.y * w;
r += val.z * w;
val = tex2D(srcTexture, x1, y0);
w = deltax0 * deltay1;
b += val.x * w;
g += val.y * w;
r += val.z * w;
val = tex2D(srcTexture, x0, y1);
w = deltax1 * deltay0;
b += val.x * w;
g += val.y * w;
r += val.z * w;
val = tex2D(srcTexture, x1, y1);
w = deltax0 * deltay0;
b += val.x * w;
g += val.y * w;
r += val.z * w;
ptrDst[0] = b >> BILINEAR_INTER_BACK_SHIFT;
ptrDst[1] = g >> BILINEAR_INTER_BACK_SHIFT;
ptrDst[2] = r >> BILINEAR_INTER_BACK_SHIFT;
ptrDst[3] = 0;
}
}
__device__ __forceinline__ unsigned char bicubic(const unsigned char rgb[4], const float w[4])
{
int res = (int)(rgb[0] * w[0] + rgb[1] * w[1] + rgb[2] * w[2] + rgb[3] * w[3] + 0.5);
res = res > 255 ? 255 : res;
res = res < 0 ? 0 : res;
return (unsigned char)res;
}
__device__ __forceinline__ void calcWeight(float deta, float weight[4])
{
weight[3] = (deta * deta * (-1.0F + deta));
weight[2] = (deta * (1.0F + deta * (1.0F - deta)));
weight[1] = (1.0F + deta * deta * (-2.0F + deta)) ;
weight[0] = (-deta * (1.0F + deta * (-2.0F + deta))) ;
}
template<typename DstElemType>
__device__ __forceinline__ void resampling(int width, int height,
float x, float y, DstElemType result[4])
{
int x2 = (int)x;
int y2 = (int)y;
int nx[4];
int ny[4];
for (int i = 0; i < 4;++i)
{
nx[i] = (x2 - 1 + i);
ny[i] = (y2 - 1 + i);
if (nx[i] < 0) nx[i] = 0;
if (nx[i] > width - 1) nx[i] = width - 1;
if (ny[i] < 0) ny[i] = 0;
if (ny[i] > height - 1) ny[i] = height - 1;
}
float u = (x - nx[1]);
float v = (y - ny[1]);
//u,v vertical while /100 horizontal
float tweight1[4], tweight2[4];
calcWeight(u, tweight1);//weight
calcWeight(v, tweight2);//weight
uchar4 val[4][4];
for (int j = 0; j < 4; j++)
{
for (int i = 0; i < 4; i++)
val[j][i] = tex2D(srcTexture, nx[i], ny[j]);
}
unsigned char* ptrVal = &val[0][0].x;
unsigned char temp0[4], temp1[4];
for (int k = 0; k < 3; ++k)
{
for (int j = 0; j < 4; j++)
{
// 按行去每个通道
for (int i = 0; i < 4; i++)
{
temp0[i] = ptrVal[j * 16 + i * 4 + k];
}
//4*4区域的三个通道
temp1[j] = bicubic(temp0, tweight1);
}
result[k] = bicubic(temp1, tweight2);
}
result[3] = 0;
}
template<typename DstElemType>
__global__ void reprojectCubicKernel(unsigned char* dstData,
int dstWidth, int dstHeight, int dstStep, int srcWidth, int srcHeight)
{
int dstx = threadIdx.x + blockIdx.x * blockDim.x;
int dsty = threadIdx.y + blockIdx.y * blockDim.y;
float srcx = tex2D(xmapTexture, dstx, dsty);
float srcy = tex2D(ymapTexture, dstx, dsty);
//unsigned char* ptrDst = dstData + dsty * dstStep + dstx * 4;
DstElemType* ptrDst = (DstElemType*)(dstData + dsty * dstStep) + dstx * 4;
if (srcx < 0 || srcx >= srcWidth || srcy < 0 || srcy >= srcHeight)
ptrDst[3] = ptrDst[2] = ptrDst[1] = ptrDst[0] = 0;
else
resampling(srcWidth, srcHeight, srcx, srcy, ptrDst);
}
__global__ void reprojectWeightedAccumulate(unsigned char* dstData,
int dstWidth, int dstHeight, int dstStep, int srcWidth, int srcHeight)
{
int dstx = threadIdx.x + blockIdx.x * blockDim.x;
int dsty = threadIdx.y + blockIdx.y * blockDim.y;
float srcx = tex2D(xmapTexture, dstx, dsty);
float srcy = tex2D(ymapTexture, dstx, dsty);
if (srcx < 0 || srcx >= srcWidth || srcy < 0 || srcy >= srcHeight)
;
else
{
float temp[4];
float w = tex2D(weightTexture, dstx, dsty);
resampling(srcWidth, srcHeight, srcx, srcy, temp);
float* ptrDst = (float*)(dstData + dsty * dstStep) + dstx * 4;
ptrDst[0] += temp[0] * w;
ptrDst[1] += temp[1] * w;
ptrDst[2] += temp[2] * w;
ptrDst[3] = 0;
}
}
void cudaReproject(const cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst,
const cv::cuda::GpuMat& xmap, const cv::cuda::GpuMat& ymap, cv::cuda::Stream& stream)
{
CV_Assert(src.data && src.type() == CV_8UC4 &&
xmap.data && xmap.type() == CV_32FC1 && ymap.data && ymap.type() == CV_32FC1 &&
xmap.size() == ymap.size());
cv::Size dstSize = xmap.size();
dst.create(dstSize, CV_8UC4);
cudaChannelFormatDesc chanDescUchar4 = cudaCreateChannelDesc<uchar4>();
cudaChannelFormatDesc chanDescFloat = cudaCreateChannelDesc<float>();
cudaSafeCall(cudaBindTexture2D(NULL, srcTexture, src.data, chanDescUchar4, src.cols, src.rows, src.step));
cudaSafeCall(cudaBindTexture2D(NULL, xmapTexture, xmap.data, chanDescFloat, xmap.cols, xmap.rows, xmap.step));
cudaSafeCall(cudaBindTexture2D(NULL, ymapTexture, ymap.data, chanDescFloat, ymap.cols, ymap.rows, ymap.step));
cudaStream_t st = cv::cuda::StreamAccessor::getStream(stream);
dim3 block(16, 16);
dim3 grid((dstSize.width + block.x - 1) / block.x, (dstSize.height + block.y - 1) / block.y);
reprojectCubicKernel<unsigned char><<<grid, block, 0, st>>>(dst.data, dstSize.width, dstSize.height, dst.step, src.cols, src.rows);
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaUnbindTexture(srcTexture));
cudaSafeCall(cudaUnbindTexture(xmapTexture));
cudaSafeCall(cudaUnbindTexture(ymapTexture));
//cudaSafeCall(cudaDeviceSynchronize());
}
void cudaReprojectTo16S(const cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst,
const cv::cuda::GpuMat& xmap, const cv::cuda::GpuMat& ymap, cv::cuda::Stream& stream)
{
CV_Assert(src.data && src.type() == CV_8UC4 &&
xmap.data && xmap.type() == CV_32FC1 && ymap.data && ymap.type() == CV_32FC1 &&
xmap.size() == ymap.size());
cv::Size dstSize = xmap.size();
dst.create(dstSize, CV_16SC4);
cudaChannelFormatDesc chanDescUchar4 = cudaCreateChannelDesc<uchar4>();
cudaChannelFormatDesc chanDescFloat = cudaCreateChannelDesc<float>();
cudaSafeCall(cudaBindTexture2D(NULL, srcTexture, src.data, chanDescUchar4, src.cols, src.rows, src.step));
cudaSafeCall(cudaBindTexture2D(NULL, xmapTexture, xmap.data, chanDescFloat, xmap.cols, xmap.rows, xmap.step));
cudaSafeCall(cudaBindTexture2D(NULL, ymapTexture, ymap.data, chanDescFloat, ymap.cols, ymap.rows, ymap.step));
cudaStream_t st = cv::cuda::StreamAccessor::getStream(stream);
dim3 block(16, 16);
dim3 grid((dstSize.width + block.x - 1) / block.x, (dstSize.height + block.y - 1) / block.y);
reprojectCubicKernel<short><<<grid, block, 0, st>>>(dst.data, dstSize.width, dstSize.height, dst.step, src.cols, src.rows);
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaUnbindTexture(srcTexture));
cudaSafeCall(cudaUnbindTexture(xmapTexture));
cudaSafeCall(cudaUnbindTexture(ymapTexture));
//cudaSafeCall(cudaDeviceSynchronize());
}
void cudaReprojectWeightedAccumulateTo32F(const cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst,
const cv::cuda::GpuMat& xmap, const cv::cuda::GpuMat& ymap, const cv::cuda::GpuMat& weight,
cv::cuda::Stream& stream)
{
CV_Assert(src.data && src.type() == CV_8UC4 &&
xmap.data && xmap.type() == CV_32FC1 && ymap.data && ymap.type() == CV_32FC1 &&
weight.data && weight.type() == CV_32FC1 &&
xmap.size() == ymap.size() && xmap.size() == weight.size());
cv::Size dstSize = xmap.size();
dst.create(dstSize, CV_32FC4);
cudaChannelFormatDesc chanDescUchar4 = cudaCreateChannelDesc<uchar4>();
cudaChannelFormatDesc chanDescFloat = cudaCreateChannelDesc<float>();
cudaSafeCall(cudaBindTexture2D(NULL, srcTexture, src.data, chanDescUchar4, src.cols, src.rows, src.step));
cudaSafeCall(cudaBindTexture2D(NULL, xmapTexture, xmap.data, chanDescFloat, xmap.cols, xmap.rows, xmap.step));
cudaSafeCall(cudaBindTexture2D(NULL, ymapTexture, ymap.data, chanDescFloat, ymap.cols, ymap.rows, ymap.step));
cudaSafeCall(cudaBindTexture2D(NULL, weightTexture, weight.data, chanDescFloat, weight.cols, weight.rows, weight.step));
cudaStream_t st = cv::cuda::StreamAccessor::getStream(stream);
dim3 block(16, 16);
dim3 grid((dstSize.width + block.x - 1) / block.x, (dstSize.height + block.y - 1) / block.y);
reprojectWeightedAccumulate<<<grid, block, 0, st>>>(dst.data, dstSize.width, dstSize.height, dst.step, src.cols, src.rows);
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaUnbindTexture(srcTexture));
cudaSafeCall(cudaUnbindTexture(xmapTexture));
cudaSafeCall(cudaUnbindTexture(ymapTexture));
cudaSafeCall(cudaUnbindTexture(weightTexture));
}
|
7344d47c4bc686ec69273ced09104621b94c400f.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <inttypes.h>
#include <math.h>
#include "utils.hpp"
#include "update.hpp"
#include "cuStinger.hpp"
#include "algs.cuh"
#include "static_breadth_first_search/bfs_top_down.cuh"
// #include "static_breadth_first_search/bfs_bottom_up.cuh"
// #include "static_breadth_first_search/bfs_hybrid.cuh"
#include "static_connected_components/cc.cuh"
#include "static_page_rank/pr.cuh"
#include "static_betweenness_centrality/bc.cuh"
using namespace cuStingerAlgs;
#define CUDA(call, ...) do { \
hipError_t _e = (call); \
if (_e == hipSuccess) break; \
fprintf(stdout, \
"CUDA runtime error: %s (%d)\n", \
hipGetErrorString(_e), _e); \
return -1; \
} while (0)
int main(const int argc, char *argv[]){
int device=0;
hipSetDevice(device);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, device);
length_t nv, ne,*off;
vertexId_t *adj;
bool isDimacs,isSNAP,isRmat=false,isMarket;
string filename(argv[1]);
isDimacs = filename.find(".graph")==std::string::npos?false:true;
isSNAP = filename.find(".txt")==std::string::npos?false:true;
isRmat = filename.find("kron")==std::string::npos?false:true;
isMarket = filename.find(".mtx")==std::string::npos?false:true;
if(isDimacs){
readGraphDIMACS(argv[1],&off,&adj,&nv,&ne,isRmat);
}
else if(isSNAP){
readGraphSNAP(argv[1],&off,&adj,&nv,&ne,isRmat);
}
else if(isMarket){
readGraphMatrixMarket(argv[1],&off,&adj,&nv,&ne,(isRmat)?false:true);
}
else{
cout << "Unknown graph type" << endl;
}
cout << "Vertices: " << nv << " Edges: " << ne << endl;
hipEvent_t ce_start,ce_stop;
cuStinger custing(defaultInitAllocater,defaultUpdateAllocater);
cuStingerInitConfig hipInit;
hipInit.initState =eInitStateCSR;
hipInit.maxNV = nv+1;
hipInit.useVWeight = false;
hipInit.isSemantic = false; // Use edge types and vertex types
hipInit.useEWeight = false;
// CSR data
hipInit.csrNV = nv;
hipInit.csrNE = ne;
hipInit.csrOff = off;
hipInit.csrAdj = adj;
hipInit.csrVW = NULL;
hipInit.csrEW = NULL;
custing.initializeCuStinger(hipInit);
float totalTime;
ccBaseline scc;
scc.Init(custing);
scc.Reset();
start_clock(ce_start, ce_stop);
// scc.Run(custing);
totalTime = end_clock(ce_start, ce_stop);
// cout << "The number of iterations : " << scc.GetIterationCount() << endl;
// cout << "The number of connected-compoents : " << scc.CountConnectComponents(custing) << endl;
// cout << "Total time for connected-compoents : " << totalTime << endl;
scc.Release();
ccConcurrent scc2;
scc2.Init(custing);
scc2.Reset();
start_clock(ce_start, ce_stop);
// scc2.Run(custing);
totalTime = end_clock(ce_start, ce_stop);
// cout << "The number of iterations : " << scc2.GetIterationCount() << endl;
// cout << "The number of connected-compoents : " << scc2.CountConnectComponents(custing) << endl;
// cout << "Total time for connected-compoents : " << totalTime << endl;
scc2.Release();
ccConcurrentLB scc3;
scc3.Init(custing);
scc3.Reset();
start_clock(ce_start, ce_stop);
scc3.Run(custing);
totalTime = end_clock(ce_start, ce_stop);
cout << "The number of iterations : " << scc3.GetIterationCount() << endl;
cout << "The number of connected-compoents : " << scc3.CountConnectComponents(custing) << endl;
cout << "Total time for connected-compoents : " << totalTime << endl;
scc3.Release();
// ccConcurrentOptimized scc4;
// scc4.Init(custing);
// scc4.Reset();
// start_clock(ce_start, ce_stop);
// scc4.Run(custing);
// totalTime = end_clock(ce_start, ce_stop);
// cout << "The number of iterations : " << scc4.GetIterationCount() << endl;
// cout << "The number of connected-compoents : " << scc4.CountConnectComponents(custing) << endl;
// cout << "Total time for connected-compoents : " << totalTime << endl;
// scc4.Release();
// Finding largest vertex
vertexId_t maxV=0;
length_t maxLen=0;
for(int v=1; v<nv;v++){
if((off[v+1]-off[v])>maxLen){
maxV=v;
maxLen=off[v+1]-off[v];
}
}
// cout << "Largest vertex is: " << maxV << " With the length of :" << maxLen << endl;
bfsTD bfs;
bfs.Init(custing);
bfs.Reset();
bfs.setInputParameters(maxV);
start_clock(ce_start, ce_stop);
bfs.Run(custing);
totalTime = end_clock(ce_start, ce_stop);
cout << "The number of levels : " << bfs.getLevels() << endl;
cout << "The number of elements found : " << bfs.getElementsFound() << endl;
cout << "Total time for BFS - Top-Down : " << totalTime << endl;
bfs.Release();
// bfsBU bfsbu;
// bfsbu.Init(custing);
// bfsbu.Reset();
// bfsbu.setInputParameters(maxV);
// start_clock(ce_start, ce_stop);
// bfsbu.Run(custing);
// totalTime = end_clock(ce_start, ce_stop);
// cout << "The number of levels : " << bfsbu.getLevels() << endl;
// cout << "The number of elements found : " << bfsbu.getElementsFound(custing) << endl;
// cout << "Total time for BFS - Bottom-up: " << totalTime << endl;
// bfsbu.Release();
// bfsHybrid bfsHy;
// bfsHy.Init(custing);
// bfsHy.Reset();
// bfsHy.setInputParameters(maxV);
// start_clock(ce_start, ce_stop);
// bfsHy.Run(custing);
// totalTime = end_clock(ce_start, ce_stop);
// cout << "The number of levels : " << bfsHy.getLevels() << endl;
// cout << "The number of elements found : " << bfsHy.getElementsFound(custing) << endl;
// cout << "Total time for BFS - Hybrid : " << totalTime << endl;
// bfsHy.Release();
StaticPageRank pr;
pr.Init(custing);
pr.Reset();
pr.setInputParameters(5,0.001);
start_clock(ce_start, ce_stop);
pr.Run(custing);
totalTime = end_clock(ce_start, ce_stop);
cout << "The number of iterations : " << pr.getIterationCount() << endl;
cout << "Total time for pagerank : " << totalTime << endl;
cout << "Average time per iteartion : " << totalTime/(float)pr.getIterationCount() << endl;
// pr.printRankings(custing);
pr.Release();
StaticPageRank pr2;// =new StaticPageRank();
pr2.Init(custing);
pr2.Reset();
pr2.setInputParameters(5,0.001);
start_clock(ce_start, ce_stop);
pr2.Run(custing);
totalTime = end_clock(ce_start, ce_stop);
// cout << "The number of iterations : " << pr2.getIterationCount() << endl;
// cout << "Total time for pagerank : " << totalTime << endl;
// cout << "Average time per iteartion : " << totalTime/(float)pr2.getIterationCount() << endl;
// pr2.printRankings(custing);
pr2.Release();
float *bc = new float[nv];
for (int k = 0; k < nv; k++)
{
bc[k] = 0;
}
StaticBC sbc(bc);
sbc.Init(custing);
sbc.Reset();
start_clock(ce_start, ce_stop);
sbc.Run(custing);
totalTime = end_clock(ce_start, ce_stop);
cout << "Total time for Static Betweenness Centrality: " << totalTime << endl;
sbc.Reset();
sbc.Release();
delete[] bc;
custing.freecuStinger();
free(off);
free(adj);
return 0;
}
| 7344d47c4bc686ec69273ced09104621b94c400f.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <inttypes.h>
#include <math.h>
#include "utils.hpp"
#include "update.hpp"
#include "cuStinger.hpp"
#include "algs.cuh"
#include "static_breadth_first_search/bfs_top_down.cuh"
// #include "static_breadth_first_search/bfs_bottom_up.cuh"
// #include "static_breadth_first_search/bfs_hybrid.cuh"
#include "static_connected_components/cc.cuh"
#include "static_page_rank/pr.cuh"
#include "static_betweenness_centrality/bc.cuh"
using namespace cuStingerAlgs;
#define CUDA(call, ...) do { \
cudaError_t _e = (call); \
if (_e == cudaSuccess) break; \
fprintf(stdout, \
"CUDA runtime error: %s (%d)\n", \
cudaGetErrorString(_e), _e); \
return -1; \
} while (0)
int main(const int argc, char *argv[]){
int device=0;
cudaSetDevice(device);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, device);
length_t nv, ne,*off;
vertexId_t *adj;
bool isDimacs,isSNAP,isRmat=false,isMarket;
string filename(argv[1]);
isDimacs = filename.find(".graph")==std::string::npos?false:true;
isSNAP = filename.find(".txt")==std::string::npos?false:true;
isRmat = filename.find("kron")==std::string::npos?false:true;
isMarket = filename.find(".mtx")==std::string::npos?false:true;
if(isDimacs){
readGraphDIMACS(argv[1],&off,&adj,&nv,&ne,isRmat);
}
else if(isSNAP){
readGraphSNAP(argv[1],&off,&adj,&nv,&ne,isRmat);
}
else if(isMarket){
readGraphMatrixMarket(argv[1],&off,&adj,&nv,&ne,(isRmat)?false:true);
}
else{
cout << "Unknown graph type" << endl;
}
cout << "Vertices: " << nv << " Edges: " << ne << endl;
cudaEvent_t ce_start,ce_stop;
cuStinger custing(defaultInitAllocater,defaultUpdateAllocater);
cuStingerInitConfig cuInit;
cuInit.initState =eInitStateCSR;
cuInit.maxNV = nv+1;
cuInit.useVWeight = false;
cuInit.isSemantic = false; // Use edge types and vertex types
cuInit.useEWeight = false;
// CSR data
cuInit.csrNV = nv;
cuInit.csrNE = ne;
cuInit.csrOff = off;
cuInit.csrAdj = adj;
cuInit.csrVW = NULL;
cuInit.csrEW = NULL;
custing.initializeCuStinger(cuInit);
float totalTime;
ccBaseline scc;
scc.Init(custing);
scc.Reset();
start_clock(ce_start, ce_stop);
// scc.Run(custing);
totalTime = end_clock(ce_start, ce_stop);
// cout << "The number of iterations : " << scc.GetIterationCount() << endl;
// cout << "The number of connected-compoents : " << scc.CountConnectComponents(custing) << endl;
// cout << "Total time for connected-compoents : " << totalTime << endl;
scc.Release();
ccConcurrent scc2;
scc2.Init(custing);
scc2.Reset();
start_clock(ce_start, ce_stop);
// scc2.Run(custing);
totalTime = end_clock(ce_start, ce_stop);
// cout << "The number of iterations : " << scc2.GetIterationCount() << endl;
// cout << "The number of connected-compoents : " << scc2.CountConnectComponents(custing) << endl;
// cout << "Total time for connected-compoents : " << totalTime << endl;
scc2.Release();
ccConcurrentLB scc3;
scc3.Init(custing);
scc3.Reset();
start_clock(ce_start, ce_stop);
scc3.Run(custing);
totalTime = end_clock(ce_start, ce_stop);
cout << "The number of iterations : " << scc3.GetIterationCount() << endl;
cout << "The number of connected-compoents : " << scc3.CountConnectComponents(custing) << endl;
cout << "Total time for connected-compoents : " << totalTime << endl;
scc3.Release();
// ccConcurrentOptimized scc4;
// scc4.Init(custing);
// scc4.Reset();
// start_clock(ce_start, ce_stop);
// scc4.Run(custing);
// totalTime = end_clock(ce_start, ce_stop);
// cout << "The number of iterations : " << scc4.GetIterationCount() << endl;
// cout << "The number of connected-compoents : " << scc4.CountConnectComponents(custing) << endl;
// cout << "Total time for connected-compoents : " << totalTime << endl;
// scc4.Release();
// Finding largest vertex
vertexId_t maxV=0;
length_t maxLen=0;
for(int v=1; v<nv;v++){
if((off[v+1]-off[v])>maxLen){
maxV=v;
maxLen=off[v+1]-off[v];
}
}
// cout << "Largest vertex is: " << maxV << " With the length of :" << maxLen << endl;
bfsTD bfs;
bfs.Init(custing);
bfs.Reset();
bfs.setInputParameters(maxV);
start_clock(ce_start, ce_stop);
bfs.Run(custing);
totalTime = end_clock(ce_start, ce_stop);
cout << "The number of levels : " << bfs.getLevels() << endl;
cout << "The number of elements found : " << bfs.getElementsFound() << endl;
cout << "Total time for BFS - Top-Down : " << totalTime << endl;
bfs.Release();
// bfsBU bfsbu;
// bfsbu.Init(custing);
// bfsbu.Reset();
// bfsbu.setInputParameters(maxV);
// start_clock(ce_start, ce_stop);
// bfsbu.Run(custing);
// totalTime = end_clock(ce_start, ce_stop);
// cout << "The number of levels : " << bfsbu.getLevels() << endl;
// cout << "The number of elements found : " << bfsbu.getElementsFound(custing) << endl;
// cout << "Total time for BFS - Bottom-up: " << totalTime << endl;
// bfsbu.Release();
// bfsHybrid bfsHy;
// bfsHy.Init(custing);
// bfsHy.Reset();
// bfsHy.setInputParameters(maxV);
// start_clock(ce_start, ce_stop);
// bfsHy.Run(custing);
// totalTime = end_clock(ce_start, ce_stop);
// cout << "The number of levels : " << bfsHy.getLevels() << endl;
// cout << "The number of elements found : " << bfsHy.getElementsFound(custing) << endl;
// cout << "Total time for BFS - Hybrid : " << totalTime << endl;
// bfsHy.Release();
StaticPageRank pr;
pr.Init(custing);
pr.Reset();
pr.setInputParameters(5,0.001);
start_clock(ce_start, ce_stop);
pr.Run(custing);
totalTime = end_clock(ce_start, ce_stop);
cout << "The number of iterations : " << pr.getIterationCount() << endl;
cout << "Total time for pagerank : " << totalTime << endl;
cout << "Average time per iteartion : " << totalTime/(float)pr.getIterationCount() << endl;
// pr.printRankings(custing);
pr.Release();
StaticPageRank pr2;// =new StaticPageRank();
pr2.Init(custing);
pr2.Reset();
pr2.setInputParameters(5,0.001);
start_clock(ce_start, ce_stop);
pr2.Run(custing);
totalTime = end_clock(ce_start, ce_stop);
// cout << "The number of iterations : " << pr2.getIterationCount() << endl;
// cout << "Total time for pagerank : " << totalTime << endl;
// cout << "Average time per iteartion : " << totalTime/(float)pr2.getIterationCount() << endl;
// pr2.printRankings(custing);
pr2.Release();
float *bc = new float[nv];
for (int k = 0; k < nv; k++)
{
bc[k] = 0;
}
StaticBC sbc(bc);
sbc.Init(custing);
sbc.Reset();
start_clock(ce_start, ce_stop);
sbc.Run(custing);
totalTime = end_clock(ce_start, ce_stop);
cout << "Total time for Static Betweenness Centrality: " << totalTime << endl;
sbc.Reset();
sbc.Release();
delete[] bc;
custing.freecuStinger();
free(off);
free(adj);
return 0;
}
|
f162e77fe18180d5145361d458a39cf260c623d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/zgemm_reduce.cu, normal z -> d, Sun Nov 20 20:20:28 2016
*/
#include "magma_internal.h"
#include "magma_templates.h"
// size of work for a thread block
#define BLK_M 16
#define BLK_N 16
// BLK_K gets defined in magmablas_dgemm_reduce,
// because it depends on the CUDA architecture at runtime.
/******************************************************************************/
// BLK_K size is templated, as it depends on CUDA architecture at runtime.
// Hmm... how to compile for both CUDA arch 1.x and 2.x?
template< int BLK_K >
__global__
void dgemm_reduce_kernel(
int m, int n, int k,
double alpha,
const double* __restrict__ dA, int lda,
const double* __restrict__ dB, int ldb,
double beta,
double * __restrict__ dC, int ldc)
{
#if (__CUDA_ARCH__ >= 200)
const int tx = threadIdx.x;
if (blockIdx.x*BLK_M + threadIdx.y < m && blockIdx.y*BLK_N + threadIdx.z < n) {
dA += (blockIdx.x*BLK_M + threadIdx.y) * lda;
dB += (blockIdx.y*BLK_N + threadIdx.z) * ldb;
dC += blockIdx.x*BLK_M + blockIdx.y*BLK_N * ldc;
// was: sum[BLK_M][BLK_N+1][BLK_K+1];
// moved 3rd dimension to 1st dimension to make magma_sum_reduce_3d interface nicer.
__shared__ double sum[BLK_K][BLK_M+1][BLK_N+1];
double lsum;
/* w := v**H * C */
lsum = MAGMA_D_ZERO;
for( int j = tx; j < k; j += BLK_K )
lsum += MAGMA_D_CONJ( dA[j] )* dB[j];
sum[tx][threadIdx.y][threadIdx.z] = lsum;
magma_sum_reduce_3d< BLK_K, BLK_M+1, BLK_N+1 >( tx, threadIdx.y, threadIdx.z, sum );
/* C := C - v * w */
__syncthreads();
if (threadIdx.x == 0) {
if (MAGMA_D_EQUAL(beta, MAGMA_D_ZERO))
dC[threadIdx.y + threadIdx.z*ldc] = alpha*sum[0][threadIdx.y][threadIdx.z];
else
dC[threadIdx.y + threadIdx.z*ldc] = beta* dC[threadIdx.y + threadIdx.z*ldc] +
alpha*sum[0][threadIdx.y][threadIdx.z];
}
}
#endif
}
/***************************************************************************//**
Purpose
-------
DGEMM_REDUCE performs one of the matrix-matrix operations
C := alpha*A^T*B + beta*C,
where alpha and beta are scalars, and A, B and C are matrices, with A
a k-by-m matrix, B a k-by-n matrix, and C an m-by-n matrix.
This routine is tuned for m, n << k. Typically, m and n are expected
to be less than 128.
@ingroup magma_gemm
*******************************************************************************/
extern "C" void
magmablas_dgemm_reduce(
magma_int_t m, magma_int_t n, magma_int_t k,
double alpha,
magmaDouble_const_ptr dA, magma_int_t ldda,
magmaDouble_const_ptr dB, magma_int_t lddb,
double beta,
magmaDouble_ptr dC, magma_int_t lddc,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( k < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( lddb < k )
info = -8;
else if ( lddc < m )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x -- maximum 512 threads
const int NUM_THREADS = 512;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 2
dim3 threads( BLK_K, BLK_M, BLK_N );
dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ), 1 );
hipLaunchKernelGGL(( dgemm_reduce_kernel<BLK_K>) , dim3(blocks), dim3(threads), 0, queue->cuda_stream() ,
m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
}
else {
// --------------------
// call CUDA ARCH 2.x -- maximum 1024 threads
const int NUM_THREADS = 1024;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 4
dim3 threads( BLK_K, BLK_M, BLK_N );
dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ), 1 );
hipLaunchKernelGGL(( dgemm_reduce_kernel<BLK_K>) , dim3(blocks), dim3(threads), 0, queue->cuda_stream() ,
m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
}
}
| f162e77fe18180d5145361d458a39cf260c623d9.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/zgemm_reduce.cu, normal z -> d, Sun Nov 20 20:20:28 2016
*/
#include "magma_internal.h"
#include "magma_templates.h"
// size of work for a thread block
#define BLK_M 16
#define BLK_N 16
// BLK_K gets defined in magmablas_dgemm_reduce,
// because it depends on the CUDA architecture at runtime.
/******************************************************************************/
// BLK_K size is templated, as it depends on CUDA architecture at runtime.
// Hmm... how to compile for both CUDA arch 1.x and 2.x?
template< int BLK_K >
__global__
void dgemm_reduce_kernel(
int m, int n, int k,
double alpha,
const double* __restrict__ dA, int lda,
const double* __restrict__ dB, int ldb,
double beta,
double * __restrict__ dC, int ldc)
{
#if (__CUDA_ARCH__ >= 200)
const int tx = threadIdx.x;
if (blockIdx.x*BLK_M + threadIdx.y < m && blockIdx.y*BLK_N + threadIdx.z < n) {
dA += (blockIdx.x*BLK_M + threadIdx.y) * lda;
dB += (blockIdx.y*BLK_N + threadIdx.z) * ldb;
dC += blockIdx.x*BLK_M + blockIdx.y*BLK_N * ldc;
// was: sum[BLK_M][BLK_N+1][BLK_K+1];
// moved 3rd dimension to 1st dimension to make magma_sum_reduce_3d interface nicer.
__shared__ double sum[BLK_K][BLK_M+1][BLK_N+1];
double lsum;
/* w := v**H * C */
lsum = MAGMA_D_ZERO;
for( int j = tx; j < k; j += BLK_K )
lsum += MAGMA_D_CONJ( dA[j] )* dB[j];
sum[tx][threadIdx.y][threadIdx.z] = lsum;
magma_sum_reduce_3d< BLK_K, BLK_M+1, BLK_N+1 >( tx, threadIdx.y, threadIdx.z, sum );
/* C := C - v * w */
__syncthreads();
if (threadIdx.x == 0) {
if (MAGMA_D_EQUAL(beta, MAGMA_D_ZERO))
dC[threadIdx.y + threadIdx.z*ldc] = alpha*sum[0][threadIdx.y][threadIdx.z];
else
dC[threadIdx.y + threadIdx.z*ldc] = beta* dC[threadIdx.y + threadIdx.z*ldc] +
alpha*sum[0][threadIdx.y][threadIdx.z];
}
}
#endif
}
/***************************************************************************//**
Purpose
-------
DGEMM_REDUCE performs one of the matrix-matrix operations
C := alpha*A^T*B + beta*C,
where alpha and beta are scalars, and A, B and C are matrices, with A
a k-by-m matrix, B a k-by-n matrix, and C an m-by-n matrix.
This routine is tuned for m, n << k. Typically, m and n are expected
to be less than 128.
@ingroup magma_gemm
*******************************************************************************/
extern "C" void
magmablas_dgemm_reduce(
magma_int_t m, magma_int_t n, magma_int_t k,
double alpha,
magmaDouble_const_ptr dA, magma_int_t ldda,
magmaDouble_const_ptr dB, magma_int_t lddb,
double beta,
magmaDouble_ptr dC, magma_int_t lddc,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( k < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( lddb < k )
info = -8;
else if ( lddc < m )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x -- maximum 512 threads
const int NUM_THREADS = 512;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 2
dim3 threads( BLK_K, BLK_M, BLK_N );
dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ), 1 );
dgemm_reduce_kernel<BLK_K> <<< blocks, threads, 0, queue->cuda_stream() >>>
( m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
}
else {
// --------------------
// call CUDA ARCH 2.x -- maximum 1024 threads
const int NUM_THREADS = 1024;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 4
dim3 threads( BLK_K, BLK_M, BLK_N );
dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ), 1 );
dgemm_reduce_kernel<BLK_K> <<< blocks, threads, 0, queue->cuda_stream() >>>
( m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
}
}
|
2e2cba1b29a98173f2fe27a88577d4b0b776edfb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "multiply.h"
__global__ void multiply (float *a, float *b, float *c, int size){
//int globalPos = getGlobalIdx();
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
//printf ("global: %d \n (%d, %d)\n\n", globalPos, row, col);
while (row < size) {
while (col < size){
float temp = 0;
for (int i = 0; i < size; i++) {
temp += a[row * size + i] * b[i * size + col];
}
c[row * size + col] = temp;
col+= blockDim.y * gridDim.y;
}
col = blockIdx.x*blockDim.x+threadIdx.x;
row += blockDim.x * gridDim.x;
}
}
// from the striding paper
__device__ int getGlobalIdx(){
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
return threadId;
}
| 2e2cba1b29a98173f2fe27a88577d4b0b776edfb.cu | #include <stdio.h>
#include "multiply.h"
__global__ void multiply (float *a, float *b, float *c, int size){
//int globalPos = getGlobalIdx();
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
//printf ("global: %d \n (%d, %d)\n\n", globalPos, row, col);
while (row < size) {
while (col < size){
float temp = 0;
for (int i = 0; i < size; i++) {
temp += a[row * size + i] * b[i * size + col];
}
c[row * size + col] = temp;
col+= blockDim.y * gridDim.y;
}
col = blockIdx.x*blockDim.x+threadIdx.x;
row += blockDim.x * gridDim.x;
}
}
// from the striding paper
__device__ int getGlobalIdx(){
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
return threadId;
}
|
a1e2c3f191e0b7a44d0ae2eb60e7fdbd671791bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// old op include, fluid should be removed
#ifdef PADDLE_WITH_HIP
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#else
#include <hipcub/hipcub.hpp>
#endif
#include <vector>
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/funcs/axis_utils.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/impl/softmax_kernel_impl.h"
#include "paddle/phi/kernels/margin_cross_entropy_grad_kernel.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
#include "paddle/fluid/distributed/collective/process_group.h"
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/device/gpu/nccl_helper.h"
#endif
#include "paddle/phi/backends/gpu/gpu_context.h"
namespace phi {
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
template <typename T, typename Context>
void GetClassInterval(const gpuStream_t& stream,
const phi::Place& place,
const Context& dev_ctx,
const int rid,
const int rank,
const int nranks,
const int D,
DenseTensor* class_interval) {
std::vector<int> shard_dim_vec(nranks + 1, 0);
shard_dim_vec[rank + 1] = D;
if (nranks <= 1) {
paddle::framework::TensorFromVector(shard_dim_vec, dev_ctx, class_interval);
return;
}
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
DenseTensor num_classes_per_device;
paddle::framework::TensorFromVector(
shard_dim_vec, dev_ctx, &num_classes_per_device);
int* num_classes_per_device_ptr = num_classes_per_device.data<int>();
auto map = paddle::distributed::ProcessGroupMapFromGid::getInstance();
if (map->has(rid)) {
// Use ProcessGroup
paddle::distributed::ProcessGroup* pg = map->get(rid);
std::vector<phi::DenseTensor> in_tensor;
std::vector<phi::DenseTensor> out_tensor;
in_tensor.push_back(num_classes_per_device);
out_tensor.push_back(num_classes_per_device);
paddle::distributed::AllreduceOptions opts;
opts.reduce_op = paddle::distributed::ReduceOp::SUM;
auto task = pg->AllReduce(in_tensor, out_tensor, opts);
task->Wait();
} else {
const auto& comm =
paddle::platform::NCCLCommContext::Instance().Get(rid, place);
// use global calculate stream
const auto calcu_stream =
static_cast<GPUContext*>(
paddle::platform::DeviceContextPool::Instance().Get(place))
->stream();
PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::dynload::ncclAllReduce(
num_classes_per_device_ptr,
num_classes_per_device_ptr,
num_classes_per_device.numel(),
paddle::platform::ToNCCLDataType(paddle::framework::TransToProtoVarType(
num_classes_per_device.dtype())),
ncclSum,
comm->comm(),
calcu_stream));
}
class_interval->Resize({nranks + 1});
auto class_interval_ptr = dev_ctx.template Alloc<int>(class_interval);
size_t cub_temp_storage_bytes = 0;
hipcub::DeviceScan::InclusiveSum<int*, int*>(
nullptr, cub_temp_storage_bytes, nullptr, nullptr, nranks + 1, stream);
auto cub_temp_storage = paddle::memory::Alloc(place, cub_temp_storage_bytes);
hipcub::DeviceScan::InclusiveSum<int*, int*>(cub_temp_storage->ptr(),
cub_temp_storage_bytes,
num_classes_per_device_ptr,
class_interval_ptr,
nranks + 1,
stream);
return;
#endif
}
template <typename T, typename IndexT>
__global__ void CalculateGrad(T* logits_grad,
const T* loss_grad,
const T* logits,
const IndexT* label,
const float margin1,
const float margin2,
const float scale,
const int rank,
const int64_t N,
const int64_t D,
const int* class_interval_ptr) {
using MPType = typename phi::dtype::MPTypeTrait<T>::Type;
int start_index = class_interval_ptr[rank];
CUDA_KERNEL_LOOP(i, N * D) {
auto row = i / D;
auto col = i % D;
if ((col + start_index) == label[row]) {
logits_grad[i] = (logits_grad[i] - static_cast<T>(1.0)) * loss_grad[row];
if (fabs(margin1 - 1.0) > 1e-8 || fabs(margin2) > 1e-8) {
MPType dout = static_cast<MPType>(logits_grad[i]);
MPType one = static_cast<MPType>(1.0f);
MPType x = static_cast<MPType>(logits[i]);
MPType m1 = static_cast<MPType>(margin1);
MPType m2 = static_cast<MPType>(margin2);
MPType d = m1 * sin(m1 * acos(x) + m2) / sqrt(one - x * x);
logits_grad[i] = static_cast<T>(dout * d);
}
} else {
logits_grad[i] *= loss_grad[row];
}
if (fabs(scale - 1.0) > 1e-8) {
logits_grad[i] *= static_cast<T>(scale);
}
}
}
template <typename T, typename Context>
void MarginCrossEntropyGradKernel(const Context& dev_ctx,
const DenseTensor& logits,
const DenseTensor& label,
const DenseTensor& softmax,
const DenseTensor& loss_grad,
bool return_softmax,
int ring_id,
int rank,
int nranks,
float margin1,
float margin2,
float margin3,
float scale,
DenseTensor* logits_grad) {
const auto softmax_dims = softmax.dims();
const int axis = softmax_dims.size() - 1;
const int N = phi::funcs::SizeToAxis(axis, softmax_dims);
const int D = phi::funcs::SizeFromAxis(axis, softmax_dims);
if (return_softmax) {
phi::Copy<Context>(
dev_ctx, softmax, dev_ctx.GetPlace(), false, logits_grad);
} else {
logits_grad->ShareDataWith(softmax);
}
int blocks = NumBlocks(N * D);
int threads = kNumCUDAThreads;
const auto& label_type =
paddle::framework::TransToProtoVarType(label.dtype());
DenseTensor class_interval;
GetClassInterval<T, Context>(dev_ctx.stream(),
dev_ctx.GetPlace(),
dev_ctx,
ring_id,
rank,
nranks,
D,
&class_interval);
if (label_type == paddle::framework::proto::VarType::INT32) {
typedef int32_t LabelT;
hipLaunchKernelGGL(( CalculateGrad<T, LabelT>)
, dim3(blocks), dim3(threads), 0, dev_ctx.stream(), logits_grad->data<T>(),
loss_grad.data<T>(),
logits.data<T>(),
label.data<LabelT>(),
margin1,
margin2,
scale,
rank,
N,
D,
class_interval.data<int>());
} else if (label_type == paddle::framework::proto::VarType::INT64) {
typedef int64_t LabelT;
hipLaunchKernelGGL(( CalculateGrad<T, LabelT>)
, dim3(blocks), dim3(threads), 0, dev_ctx.stream(), logits_grad->data<T>(),
loss_grad.data<T>(),
logits.data<T>(),
label.data<LabelT>(),
margin1,
margin2,
scale,
rank,
N,
D,
class_interval.data<int>());
}
}
} // namespace phi
PD_REGISTER_KERNEL(margin_cross_entropy_grad,
GPU,
ALL_LAYOUT,
phi::MarginCrossEntropyGradKernel,
float,
double,
phi::dtype::float16) {}
| a1e2c3f191e0b7a44d0ae2eb60e7fdbd671791bb.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// old op include, fluid should be removed
#ifdef PADDLE_WITH_HIP
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#else
#include <cub/cub.cuh>
#endif
#include <vector>
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/funcs/axis_utils.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/impl/softmax_kernel_impl.h"
#include "paddle/phi/kernels/margin_cross_entropy_grad_kernel.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
#include "paddle/fluid/distributed/collective/process_group.h"
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/device/gpu/nccl_helper.h"
#endif
#include "paddle/phi/backends/gpu/gpu_context.h"
namespace phi {
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
template <typename T, typename Context>
void GetClassInterval(const gpuStream_t& stream,
const phi::Place& place,
const Context& dev_ctx,
const int rid,
const int rank,
const int nranks,
const int D,
DenseTensor* class_interval) {
std::vector<int> shard_dim_vec(nranks + 1, 0);
shard_dim_vec[rank + 1] = D;
if (nranks <= 1) {
paddle::framework::TensorFromVector(shard_dim_vec, dev_ctx, class_interval);
return;
}
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
DenseTensor num_classes_per_device;
paddle::framework::TensorFromVector(
shard_dim_vec, dev_ctx, &num_classes_per_device);
int* num_classes_per_device_ptr = num_classes_per_device.data<int>();
auto map = paddle::distributed::ProcessGroupMapFromGid::getInstance();
if (map->has(rid)) {
// Use ProcessGroup
paddle::distributed::ProcessGroup* pg = map->get(rid);
std::vector<phi::DenseTensor> in_tensor;
std::vector<phi::DenseTensor> out_tensor;
in_tensor.push_back(num_classes_per_device);
out_tensor.push_back(num_classes_per_device);
paddle::distributed::AllreduceOptions opts;
opts.reduce_op = paddle::distributed::ReduceOp::SUM;
auto task = pg->AllReduce(in_tensor, out_tensor, opts);
task->Wait();
} else {
const auto& comm =
paddle::platform::NCCLCommContext::Instance().Get(rid, place);
// use global calculate stream
const auto calcu_stream =
static_cast<GPUContext*>(
paddle::platform::DeviceContextPool::Instance().Get(place))
->stream();
PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::dynload::ncclAllReduce(
num_classes_per_device_ptr,
num_classes_per_device_ptr,
num_classes_per_device.numel(),
paddle::platform::ToNCCLDataType(paddle::framework::TransToProtoVarType(
num_classes_per_device.dtype())),
ncclSum,
comm->comm(),
calcu_stream));
}
class_interval->Resize({nranks + 1});
auto class_interval_ptr = dev_ctx.template Alloc<int>(class_interval);
size_t cub_temp_storage_bytes = 0;
cub::DeviceScan::InclusiveSum<int*, int*>(
nullptr, cub_temp_storage_bytes, nullptr, nullptr, nranks + 1, stream);
auto cub_temp_storage = paddle::memory::Alloc(place, cub_temp_storage_bytes);
cub::DeviceScan::InclusiveSum<int*, int*>(cub_temp_storage->ptr(),
cub_temp_storage_bytes,
num_classes_per_device_ptr,
class_interval_ptr,
nranks + 1,
stream);
return;
#endif
}
template <typename T, typename IndexT>
__global__ void CalculateGrad(T* logits_grad,
const T* loss_grad,
const T* logits,
const IndexT* label,
const float margin1,
const float margin2,
const float scale,
const int rank,
const int64_t N,
const int64_t D,
const int* class_interval_ptr) {
using MPType = typename phi::dtype::MPTypeTrait<T>::Type;
int start_index = class_interval_ptr[rank];
CUDA_KERNEL_LOOP(i, N * D) {
auto row = i / D;
auto col = i % D;
if ((col + start_index) == label[row]) {
logits_grad[i] = (logits_grad[i] - static_cast<T>(1.0)) * loss_grad[row];
if (fabs(margin1 - 1.0) > 1e-8 || fabs(margin2) > 1e-8) {
MPType dout = static_cast<MPType>(logits_grad[i]);
MPType one = static_cast<MPType>(1.0f);
MPType x = static_cast<MPType>(logits[i]);
MPType m1 = static_cast<MPType>(margin1);
MPType m2 = static_cast<MPType>(margin2);
MPType d = m1 * sin(m1 * acos(x) + m2) / sqrt(one - x * x);
logits_grad[i] = static_cast<T>(dout * d);
}
} else {
logits_grad[i] *= loss_grad[row];
}
if (fabs(scale - 1.0) > 1e-8) {
logits_grad[i] *= static_cast<T>(scale);
}
}
}
template <typename T, typename Context>
void MarginCrossEntropyGradKernel(const Context& dev_ctx,
const DenseTensor& logits,
const DenseTensor& label,
const DenseTensor& softmax,
const DenseTensor& loss_grad,
bool return_softmax,
int ring_id,
int rank,
int nranks,
float margin1,
float margin2,
float margin3,
float scale,
DenseTensor* logits_grad) {
const auto softmax_dims = softmax.dims();
const int axis = softmax_dims.size() - 1;
const int N = phi::funcs::SizeToAxis(axis, softmax_dims);
const int D = phi::funcs::SizeFromAxis(axis, softmax_dims);
if (return_softmax) {
phi::Copy<Context>(
dev_ctx, softmax, dev_ctx.GetPlace(), false, logits_grad);
} else {
logits_grad->ShareDataWith(softmax);
}
int blocks = NumBlocks(N * D);
int threads = kNumCUDAThreads;
const auto& label_type =
paddle::framework::TransToProtoVarType(label.dtype());
DenseTensor class_interval;
GetClassInterval<T, Context>(dev_ctx.stream(),
dev_ctx.GetPlace(),
dev_ctx,
ring_id,
rank,
nranks,
D,
&class_interval);
if (label_type == paddle::framework::proto::VarType::INT32) {
typedef int32_t LabelT;
CalculateGrad<T, LabelT>
<<<blocks, threads, 0, dev_ctx.stream()>>>(logits_grad->data<T>(),
loss_grad.data<T>(),
logits.data<T>(),
label.data<LabelT>(),
margin1,
margin2,
scale,
rank,
N,
D,
class_interval.data<int>());
} else if (label_type == paddle::framework::proto::VarType::INT64) {
typedef int64_t LabelT;
CalculateGrad<T, LabelT>
<<<blocks, threads, 0, dev_ctx.stream()>>>(logits_grad->data<T>(),
loss_grad.data<T>(),
logits.data<T>(),
label.data<LabelT>(),
margin1,
margin2,
scale,
rank,
N,
D,
class_interval.data<int>());
}
}
} // namespace phi
PD_REGISTER_KERNEL(margin_cross_entropy_grad,
GPU,
ALL_LAYOUT,
phi::MarginCrossEntropyGradKernel,
float,
double,
phi::dtype::float16) {}
|
e5f67217d28cd4d7ff9bd4cb3e82d1a0688ed5bc.hip | // !!! This is a file automatically generated by hipify!!!
// Includes
#include <stdio.h>
#include <stdlib.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 80
#define F 24
#define ITERATIONS (unsigned)( 10000 )
#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int iterations){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
int m_sum=0;
for (unsigned j=0; j<iterations; j++){
for(unsigned k=0; k<ITERATIONS; ++k){
C[((unsigned)(tid*F)+(unsigned)(k*max_tid*F))]=m_sum;
}
m_sum+=j;
}
__syncthreads();
}
// Host code
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
unsigned N =((unsigned)(max_tid*F)+(unsigned)(ITERATIONS*max_tid*F));
size_t size = N * sizeof(int);
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
//h_B = (float*)malloc(size);
//if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
//RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A, size) );
//checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
hipEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
checkCudaErrors(hipEventRecord(start));
hipLaunchKernelGGL(( PowerKernal), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_C, iterations);
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
hipDeviceSynchronize();
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
hipFree(d_A);
//if (d_B)
// hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
| e5f67217d28cd4d7ff9bd4cb3e82d1a0688ed5bc.cu | // Includes
#include <stdio.h>
#include <stdlib.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 80
#define F 24
#define ITERATIONS (unsigned)( 10000 )
#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line ){
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int iterations){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
int m_sum=0;
for (unsigned j=0; j<iterations; j++){
for(unsigned k=0; k<ITERATIONS; ++k){
C[((unsigned)(tid*F)+(unsigned)(k*max_tid*F))]=m_sum;
}
m_sum+=j;
}
__syncthreads();
}
// Host code
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
unsigned N =((unsigned)(max_tid*F)+(unsigned)(ITERATIONS*max_tid*F));
size_t size = N * sizeof(int);
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
//h_B = (float*)malloc(size);
//if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
//RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
//checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
cudaEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
checkCudaErrors(cudaEventRecord(start));
PowerKernal<<<dimGrid,dimBlock>>>(d_A, d_C, iterations);
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
cudaThreadSynchronize();
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
cudaFree(d_A);
//if (d_B)
// cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
|
feca49fd8f4e5c85075ab04f4f8d3b48eb79e458.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stereoLite.h"
/// image to downscale
texture<float, hipTextureType2D, hipReadModeElementType> texFine;
texture<float2, hipTextureType2D, hipReadModeElementType> texFineFloat2;
// *********************************
// Downscaling
// *********************************
__global__ void LiteDownscaleKernel(int width, int height, int stride, float *out)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= width || iy >= height)
{
return;
}
float dx = 1.0f / (float)width;
float dy = 1.0f / (float)height;
float x = ((float)ix + 0.5f) * dx;
float y = ((float)iy + 0.5f) * dy;
int pos = ix + iy * stride;
out[pos] = 0.25f * (tex2D(texFine, x - dx * 0.25f, y) + tex2D(texFine, x + dx * 0.25f, y) +
tex2D(texFine, x, y - dy * 0.25f) + tex2D(texFine, x, y + dy * 0.25f));
}
void StereoLite::Downscale(const float *src, int width, int height, int stride,
int newWidth, int newHeight, int newStride, float *out)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(newWidth, threads.x), iDivUp(newHeight, threads.y));
// mirror if a coordinate value is out-of-range
texFine.addressMode[0] = hipAddressModeMirror;
texFine.addressMode[1] = hipAddressModeMirror;
texFine.filterMode = hipFilterModeLinear;
texFine.normalized = true;
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
checkCudaErrors(hipBindTexture2D(0, texFine, src, width, height, stride * sizeof(float)));
LiteDownscaleKernel << < blocks, threads >> > (newWidth, newHeight, newStride, out);
}
__global__ void LiteDownscaleNearestNeighborKernel(int width, int height, int stride, float *out)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= width || iy >= height)
{
return;
}
float dx = 1.0f / (float)width;
float dy = 1.0f / (float)height;
float x = ((float)ix + 0.5f) * dx;
float y = ((float)iy + 0.5f) * dy;
int pos = ix + iy * stride;
out[pos] = tex2D(texFine, x, y);
/*out[pos] = 0.25f * (tex2D(texFine, x - dx * 0.25f, y) + tex2D(texFine, x + dx * 0.25f, y) +
tex2D(texFine, x, y - dy * 0.25f) + tex2D(texFine, x, y + dy * 0.25f));*/
}
void StereoLite::DownscaleNearestNeighbor(const float *src, int width, int height, int stride,
int newWidth, int newHeight, int newStride, float *out)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(newWidth, threads.x), iDivUp(newHeight, threads.y));
// mirror if a coordinate value is out-of-range
texFine.addressMode[0] = hipAddressModeMirror;
texFine.addressMode[1] = hipAddressModeMirror;
texFine.filterMode = hipFilterModePoint;
texFine.normalized = true;
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
checkCudaErrors(hipBindTexture2D(0, texFine, src, width, height, stride * sizeof(float)));
LiteDownscaleNearestNeighborKernel << < blocks, threads >> > (newWidth, newHeight, newStride, out);
}
__global__ void LiteDownscaleScalingNNKernel(int width, int height, int stride, float scale, float *out)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= width || iy >= height)
{
return;
}
float dx = 1.0f / (float)width;
float dy = 1.0f / (float)height;
float x = ((float)ix + 0.5f) * dx;
float y = ((float)iy + 0.5f) * dy;
int pos = ix + iy * stride;
out[pos] = tex2D(texFine, x, y) / scale;
}
void StereoLite::DownscaleNearestNeighbor(const float *src, int width, int height, int stride,
int newWidth, int newHeight, int newStride, float scale, float *out)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(newWidth, threads.x), iDivUp(newHeight, threads.y));
// mirror if a coordinate value is out-of-range
texFine.addressMode[0] = hipAddressModeMirror;
texFine.addressMode[1] = hipAddressModeMirror;
texFine.filterMode = hipFilterModePoint;
texFine.normalized = true;
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
checkCudaErrors(hipBindTexture2D(0, texFine, src, width, height, stride * sizeof(float)));
LiteDownscaleScalingNNKernel << < blocks, threads >> > (newWidth, newHeight, newStride, scale, out);
}
// *********************************
// Downscaling for Float2
// *********************************
__global__ void LiteDownscaleKernel(int width, int height, int stride, float2 *out)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= width || iy >= height)
{
return;
}
float dx = 1.0f / (float)width;
float dy = 1.0f / (float)height;
float x = ((float)ix + 0.5f) * dx;
float y = ((float)iy + 0.5f) * dy;
int pos = ix + iy * stride;
float2 val00 = tex2D(texFineFloat2, x - dx * 0.25f, y);
float2 val01 = tex2D(texFineFloat2, x + dx * 0.25f, y);
float2 val10 = tex2D(texFineFloat2, x, y - dy * 0.25f);
float2 val11 = tex2D(texFineFloat2, x, y + dy * 0.25f);
out[pos].x = 0.25f * (val00.x + val01.x + val10.x + val11.x);
out[pos].y = 0.25f * (val00.y + val01.y + val10.y + val11.y);
}
void StereoLite::Downscale(const float2 *src, int width, int height, int stride,
int newWidth, int newHeight, int newStride, float2 *out)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(newWidth, threads.x), iDivUp(newHeight, threads.y));
// mirror if a coordinate value is out-of-range
texFineFloat2.addressMode[0] = hipAddressModeMirror;
texFineFloat2.addressMode[1] = hipAddressModeMirror;
texFineFloat2.filterMode = hipFilterModeLinear;
texFineFloat2.normalized = true;
hipChannelFormatDesc desc = hipCreateChannelDesc<float2>();
checkCudaErrors(hipBindTexture2D(0, texFineFloat2, src, width, height, stride * sizeof(float2)));
LiteDownscaleKernel << < blocks, threads >> > (newWidth, newHeight, newStride, out);
}
// ***********************************
// Downscale with vector downscaling
//************************************
__global__ void LiteDownscaleScalingKernel(int width, int height, int stride, float scale, float *out)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= width || iy >= height)
{
return;
}
float dx = 1.0f / (float)width;
float dy = 1.0f / (float)height;
float x = ((float)ix + 0.5f) * dx;
float y = ((float)iy + 0.5f) * dy;
int pos = ix + iy * stride;
out[pos] = scale * 0.25f * (tex2D(texFine, x - dx * 0.25f, y) + tex2D(texFine, x + dx * 0.25f, y) +
tex2D(texFine, x, y - dy * 0.25f) + tex2D(texFine, x, y + dy * 0.25f));
}
void StereoLite::Downscale(const float *src, int width, int height, int stride,
int newWidth, int newHeight, int newStride, float scale, float *out)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(newWidth, threads.x), iDivUp(newHeight, threads.y));
// mirror if a coordinate value is out-of-range
texFine.addressMode[0] = hipAddressModeMirror;
texFine.addressMode[1] = hipAddressModeMirror;
texFine.filterMode = hipFilterModeLinear;
texFine.normalized = true;
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
checkCudaErrors(hipBindTexture2D(0, texFine, src, width, height, stride * sizeof(float)));
LiteDownscaleScalingKernel << < blocks, threads >> > (newWidth, newHeight, newStride, scale, out);
}
// ***********************************
// Downscale with vector downscaling for Float2
//************************************
__global__ void LiteDownscaleScalingKernel(int width, int height, int stride, float scale, float2 *out)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= width || iy >= height)
{
return;
}
float dx = 1.0f / (float)width;
float dy = 1.0f / (float)height;
float x = ((float)ix + 0.5f) * dx;
float y = ((float)iy + 0.5f) * dy;
int pos = ix + iy * stride;
float2 val00 = tex2D(texFineFloat2, x - dx * 0.25f, y);
float2 val01 = tex2D(texFineFloat2, x + dx * 0.25f, y);
float2 val10 = tex2D(texFineFloat2, x, y - dy * 0.25f);
float2 val11 = tex2D(texFineFloat2, x, y + dy * 0.25f);
out[pos].x = scale * 0.25f * (val00.x + val01.x + val10.x + val11.x);
out[pos].y = scale * 0.25f * (val00.y + val01.y + val10.y + val11.y);
}
void StereoLite::Downscale(const float2 *src, int width, int height, int stride,
int newWidth, int newHeight, int newStride, float scale, float2 *out)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(newWidth, threads.x), iDivUp(newHeight, threads.y));
// mirror if a coordinate value is out-of-range
texFineFloat2.addressMode[0] = hipAddressModeMirror;
texFineFloat2.addressMode[1] = hipAddressModeMirror;
texFineFloat2.filterMode = hipFilterModeLinear;
texFineFloat2.normalized = true;
hipChannelFormatDesc desc = hipCreateChannelDesc<float2>();
checkCudaErrors(hipBindTexture2D(0, texFineFloat2, src, width, height, stride * sizeof(float2)));
LiteDownscaleScalingKernel << < blocks, threads >> > (newWidth, newHeight, newStride, scale, out);
} | feca49fd8f4e5c85075ab04f4f8d3b48eb79e458.cu | #include "stereoLite.h"
/// image to downscale
texture<float, cudaTextureType2D, cudaReadModeElementType> texFine;
texture<float2, cudaTextureType2D, cudaReadModeElementType> texFineFloat2;
// *********************************
// Downscaling
// *********************************
__global__ void LiteDownscaleKernel(int width, int height, int stride, float *out)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= width || iy >= height)
{
return;
}
float dx = 1.0f / (float)width;
float dy = 1.0f / (float)height;
float x = ((float)ix + 0.5f) * dx;
float y = ((float)iy + 0.5f) * dy;
int pos = ix + iy * stride;
out[pos] = 0.25f * (tex2D(texFine, x - dx * 0.25f, y) + tex2D(texFine, x + dx * 0.25f, y) +
tex2D(texFine, x, y - dy * 0.25f) + tex2D(texFine, x, y + dy * 0.25f));
}
void StereoLite::Downscale(const float *src, int width, int height, int stride,
int newWidth, int newHeight, int newStride, float *out)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(newWidth, threads.x), iDivUp(newHeight, threads.y));
// mirror if a coordinate value is out-of-range
texFine.addressMode[0] = cudaAddressModeMirror;
texFine.addressMode[1] = cudaAddressModeMirror;
texFine.filterMode = cudaFilterModeLinear;
texFine.normalized = true;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
checkCudaErrors(cudaBindTexture2D(0, texFine, src, width, height, stride * sizeof(float)));
LiteDownscaleKernel << < blocks, threads >> > (newWidth, newHeight, newStride, out);
}
__global__ void LiteDownscaleNearestNeighborKernel(int width, int height, int stride, float *out)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= width || iy >= height)
{
return;
}
float dx = 1.0f / (float)width;
float dy = 1.0f / (float)height;
float x = ((float)ix + 0.5f) * dx;
float y = ((float)iy + 0.5f) * dy;
int pos = ix + iy * stride;
out[pos] = tex2D(texFine, x, y);
/*out[pos] = 0.25f * (tex2D(texFine, x - dx * 0.25f, y) + tex2D(texFine, x + dx * 0.25f, y) +
tex2D(texFine, x, y - dy * 0.25f) + tex2D(texFine, x, y + dy * 0.25f));*/
}
void StereoLite::DownscaleNearestNeighbor(const float *src, int width, int height, int stride,
int newWidth, int newHeight, int newStride, float *out)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(newWidth, threads.x), iDivUp(newHeight, threads.y));
// mirror if a coordinate value is out-of-range
texFine.addressMode[0] = cudaAddressModeMirror;
texFine.addressMode[1] = cudaAddressModeMirror;
texFine.filterMode = cudaFilterModePoint;
texFine.normalized = true;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
checkCudaErrors(cudaBindTexture2D(0, texFine, src, width, height, stride * sizeof(float)));
LiteDownscaleNearestNeighborKernel << < blocks, threads >> > (newWidth, newHeight, newStride, out);
}
__global__ void LiteDownscaleScalingNNKernel(int width, int height, int stride, float scale, float *out)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= width || iy >= height)
{
return;
}
float dx = 1.0f / (float)width;
float dy = 1.0f / (float)height;
float x = ((float)ix + 0.5f) * dx;
float y = ((float)iy + 0.5f) * dy;
int pos = ix + iy * stride;
out[pos] = tex2D(texFine, x, y) / scale;
}
void StereoLite::DownscaleNearestNeighbor(const float *src, int width, int height, int stride,
int newWidth, int newHeight, int newStride, float scale, float *out)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(newWidth, threads.x), iDivUp(newHeight, threads.y));
// mirror if a coordinate value is out-of-range
texFine.addressMode[0] = cudaAddressModeMirror;
texFine.addressMode[1] = cudaAddressModeMirror;
texFine.filterMode = cudaFilterModePoint;
texFine.normalized = true;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
checkCudaErrors(cudaBindTexture2D(0, texFine, src, width, height, stride * sizeof(float)));
LiteDownscaleScalingNNKernel << < blocks, threads >> > (newWidth, newHeight, newStride, scale, out);
}
// *********************************
// Downscaling for Float2
// *********************************
__global__ void LiteDownscaleKernel(int width, int height, int stride, float2 *out)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= width || iy >= height)
{
return;
}
float dx = 1.0f / (float)width;
float dy = 1.0f / (float)height;
float x = ((float)ix + 0.5f) * dx;
float y = ((float)iy + 0.5f) * dy;
int pos = ix + iy * stride;
float2 val00 = tex2D(texFineFloat2, x - dx * 0.25f, y);
float2 val01 = tex2D(texFineFloat2, x + dx * 0.25f, y);
float2 val10 = tex2D(texFineFloat2, x, y - dy * 0.25f);
float2 val11 = tex2D(texFineFloat2, x, y + dy * 0.25f);
out[pos].x = 0.25f * (val00.x + val01.x + val10.x + val11.x);
out[pos].y = 0.25f * (val00.y + val01.y + val10.y + val11.y);
}
void StereoLite::Downscale(const float2 *src, int width, int height, int stride,
int newWidth, int newHeight, int newStride, float2 *out)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(newWidth, threads.x), iDivUp(newHeight, threads.y));
// mirror if a coordinate value is out-of-range
texFineFloat2.addressMode[0] = cudaAddressModeMirror;
texFineFloat2.addressMode[1] = cudaAddressModeMirror;
texFineFloat2.filterMode = cudaFilterModeLinear;
texFineFloat2.normalized = true;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float2>();
checkCudaErrors(cudaBindTexture2D(0, texFineFloat2, src, width, height, stride * sizeof(float2)));
LiteDownscaleKernel << < blocks, threads >> > (newWidth, newHeight, newStride, out);
}
// ***********************************
// Downscale with vector downscaling
//************************************
__global__ void LiteDownscaleScalingKernel(int width, int height, int stride, float scale, float *out)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= width || iy >= height)
{
return;
}
float dx = 1.0f / (float)width;
float dy = 1.0f / (float)height;
float x = ((float)ix + 0.5f) * dx;
float y = ((float)iy + 0.5f) * dy;
int pos = ix + iy * stride;
out[pos] = scale * 0.25f * (tex2D(texFine, x - dx * 0.25f, y) + tex2D(texFine, x + dx * 0.25f, y) +
tex2D(texFine, x, y - dy * 0.25f) + tex2D(texFine, x, y + dy * 0.25f));
}
void StereoLite::Downscale(const float *src, int width, int height, int stride,
int newWidth, int newHeight, int newStride, float scale, float *out)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(newWidth, threads.x), iDivUp(newHeight, threads.y));
// mirror if a coordinate value is out-of-range
texFine.addressMode[0] = cudaAddressModeMirror;
texFine.addressMode[1] = cudaAddressModeMirror;
texFine.filterMode = cudaFilterModeLinear;
texFine.normalized = true;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
checkCudaErrors(cudaBindTexture2D(0, texFine, src, width, height, stride * sizeof(float)));
LiteDownscaleScalingKernel << < blocks, threads >> > (newWidth, newHeight, newStride, scale, out);
}
// ***********************************
// Downscale with vector downscaling for Float2
//************************************
__global__ void LiteDownscaleScalingKernel(int width, int height, int stride, float scale, float2 *out)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= width || iy >= height)
{
return;
}
float dx = 1.0f / (float)width;
float dy = 1.0f / (float)height;
float x = ((float)ix + 0.5f) * dx;
float y = ((float)iy + 0.5f) * dy;
int pos = ix + iy * stride;
float2 val00 = tex2D(texFineFloat2, x - dx * 0.25f, y);
float2 val01 = tex2D(texFineFloat2, x + dx * 0.25f, y);
float2 val10 = tex2D(texFineFloat2, x, y - dy * 0.25f);
float2 val11 = tex2D(texFineFloat2, x, y + dy * 0.25f);
out[pos].x = scale * 0.25f * (val00.x + val01.x + val10.x + val11.x);
out[pos].y = scale * 0.25f * (val00.y + val01.y + val10.y + val11.y);
}
void StereoLite::Downscale(const float2 *src, int width, int height, int stride,
int newWidth, int newHeight, int newStride, float scale, float2 *out)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(newWidth, threads.x), iDivUp(newHeight, threads.y));
// mirror if a coordinate value is out-of-range
texFineFloat2.addressMode[0] = cudaAddressModeMirror;
texFineFloat2.addressMode[1] = cudaAddressModeMirror;
texFineFloat2.filterMode = cudaFilterModeLinear;
texFineFloat2.normalized = true;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float2>();
checkCudaErrors(cudaBindTexture2D(0, texFineFloat2, src, width, height, stride * sizeof(float2)));
LiteDownscaleScalingKernel << < blocks, threads >> > (newWidth, newHeight, newStride, scale, out);
} |
dc17b05e7ea5fcfa4eb7421cec16a8ec3fc16acf.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include <helper_functions.h>
#include <helper_cuda.h>
// #include "../kernelHandler.h"
#include "helpers.h"
#include "imageConvolutionSerial.h"
#include "imageConvolutionParallel.h"
#include "imageConvolutionParallelSharedMemory.h"
#include "imageConvolutionParallelConstantMemory.h"
#include "imageConvolutionParallelSharedConstantMemory.h"
#include "imageConvolutionTextureMemory.h"
#include "imageConvolution2DTextureMemory.h"
#include "imageConvolutionParallelSharedMemoryNoOverlap.h"
#include "imageConvolutionParallelSharedConstantMemoryNoOverlap.h"
const char *imageFilename = "res//images//1024_lena_bw.pgm";
//const char *imageFilename = "galaxy.ascii.pgm";
#define ITERATIONS 100
#define BLOCK_WIDTH 24
#define FILE_INDEX 3
float *imageConvolutionParallel(const char *imageFilename, char **argv, int option, bool print_save = true)
{
// load image from disk
float *hData = NULL;
char buf[512];
unsigned int width, height;
char *imagePath = sdkFindFilePath(imageFilename, argv[0]);
float *results;
if (imagePath == NULL)
{
printf("Unable to source image file: %s\n", imageFilename);
exit(EXIT_FAILURE);
}
sdkLoadPGM(imagePath, &hData, &width, &height);
if (print_save)
printf("Loaded '%s', %d x %d pixels\n", imageFilename, width, height);
FILE *fp = fopen("kernels.txt", "r");
if (fp == NULL)
{
perror("Error in opening file");
exit(EXIT_FAILURE);
}
int numOfKernels;
fgets(buf, sizeof(buf), fp);
sscanf(buf, "%d", &numOfKernels);
// printf("%d kernel to be loaded\n", numOfKernels);
kernel **kernels = loadAllKernels(fp, numOfKernels);
// printKernels(kernels, numOfKernels);
// kernelHandler kh = kernelHandler("../kernels.txt");
//printf("Kernels loaded\n");
results = (float *)malloc(numOfKernels * sizeof(float));
//Get Kernels
// FILE *fp = fopen("kernels.txt", "r");
// if (fp == NULL)
// {
// perror("Error in opening file");
// exit(EXIT_FAILURE);
// }
for (int i = 0; i < numOfKernels; i++)
{
char outputFilename[1024];
char *file_name;
float *result;
float totalTime = 0.0;
if (print_save)
printf("\n\n\nKernel Dimension : %dx%d\n", kernels[i]->dimension, kernels[i]->dimension);
int j = 0;
for (; j < ITERATIONS; j++)
{
hipDeviceReset();
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
switch (option)
{
case 1:
result = applyKernelToImageSerial(hData, width, height, *kernels[i], imagePath);
if (j == 0)
sprintf(file_name, "_%dx%d_serial_out.pgm", kernels[i]->dimension, kernels[i]->dimension);
break;
case 2:
result = applyKernelToImageParallelNaive(hData, width, height, kernels[i], imagePath, BLOCK_WIDTH);
if (j == 0)
sprintf(file_name, "_%dx%d_parallel_out.pgm", kernels[i]->dimension, kernels[i]->dimension);
break;
case 3:
result = applyKernelToImageParallelSharedMemory(hData, width, height, *kernels[i], imagePath, BLOCK_WIDTH);
if (j == 0)
sprintf(file_name, "_%dx%d_parallel_shared_out.pgm", kernels[i]->dimension, kernels[i]->dimension);
break;
case 4:
result = applyKernelToImageParallelConstantMemory(hData, width, height, *kernels[i], imagePath, BLOCK_WIDTH);
if (j == 0)
sprintf(file_name, "_%dx%d_parallel_constant_out.pgm", kernels[i]->dimension, kernels[i]->dimension);
break;
case 5:
result = applyKernelToImageParallelSharedConstantMemory(hData, width, height, *kernels[i], imagePath, BLOCK_WIDTH);
if (j == 0)
sprintf(file_name, "_%dx%d_parallel_shared_constant_out.pgm", kernels[i]->dimension, kernels[i]->dimension);
break;
case 6:
result = applyKernelToImageParallelTextureMomory(hData, width, height, *kernels[i], imagePath, BLOCK_WIDTH);
if (j == 0)
sprintf(file_name, "_%dx%d_parallel_texture_out.pgm", kernels[i]->dimension, kernels[i]->dimension);
break;
case 7:
result = applyKernelToImageParallel2DTextureMomory(hData, width, height, *kernels[i], imagePath, BLOCK_WIDTH);
if (j == 0)
sprintf(file_name, "_%dx%d_paralled_2D_texture_out.pgm", kernels[i]->dimension, kernels[i]->dimension);
break;
case 8:
result = applyKernelToImageParallelSharedMemory(hData, width, height, *kernels[i], imagePath, BLOCK_WIDTH + kernels[i]->dimension - 1);
if (j == 0)
sprintf(file_name, "_%dx%d_parallel_shared_mod_out.pgm", kernels[i]->dimension, kernels[i]->dimension);
break;
case 9:
result = applyKernelToImageParallelSharedConstantMemory(hData, width, height, *kernels[i], imagePath, BLOCK_WIDTH + kernels[i]->dimension - 1);
if (j == 0)
sprintf(file_name, "_%dx%d_parallel_shared_constant_mod_out.pgm", kernels[i]->dimension, kernels[i]->dimension);
break;
case 10:
result = applyKernelToImageParallelSharedMemoryNoOverlap(hData, width, height, *kernels[i], imagePath, BLOCK_WIDTH);
if (j == 0)
sprintf(file_name, "_%dx%d_parallel_shared_no_overlap_out.pgm", kernels[i]->dimension, kernels[i]->dimension);
break;
case 11:
result = applyKernelToImageParallelSharedConstantMemoryNoOverlap(hData, width, height, *kernels[i], imagePath, BLOCK_WIDTH);
if (j == 0)
sprintf(file_name, "_%dx%d_parallel_shared_constant_no_overlap_out.pgm", kernels[i]->dimension, kernels[i]->dimension);
break;
default:
printf("Incorrect input \n");
}
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
if (print_save)
printf("[%3d] Time : %f \n", j, milliseconds);
totalTime += milliseconds;
if (totalTime > 300000)
{
j++;
printf("Completed %d iteration.... exiting\n", j);
break;
}
if (j != ITERATIONS - 1)
free(result);
}
if (print_save)
printf("%d | %f \n", kernels[i]->dimension, totalTime / j);
results[i] = totalTime / j;
if (print_save)
{
for (int j = 0; j < height; j++)
{
printf("[%3d] : ", j);
for (int i = 0; i < width; i++)
{
printf(" |%5.2f|", result[j * width + i]);
}
printf("\n");
}
strcpy(outputFilename, imagePath);
strcpy(outputFilename + strlen(imagePath) - 4, file_name);
printf("Saving to %s", outputFilename);
sdkSavePGM(outputFilename, result, width, height);
free(result);
}
}
freeKernels(kernels, numOfKernels);
free(hData);
return results;
}
int main(int argc, char **argv)
{
printf("Image convolution project \n");
printf("Please select an option \n");
printf("1 - Serial Implementation \n");
printf("2 - Naive parallel implementation \n");
printf("3 - Shared memory implementation \n");
printf("4 - Constant memory implementation \n");
printf("5 - Shared Constant memory implementation \n");
printf("6 - Texture memory implementation \n");
printf("7 - 2D Texture memory implementation \n");
printf("8 - Shared memory modified implementation \n");
printf("9 - Shared + constant memory modified implementation \n");
printf("10 - Shared memory No Overlap implementation \n");
printf("11 - Shared Constant memory No Overlap implementation \n");
printf("12 - All \n ");
int option;
char buf[512];
scanf("%d", &option);
// kernelHandler kh = kernelHandler("../kernels.txt");
//printf("Kernels loaded\n");
char image_files[5][35] = {"res//images//256_lena_bw.pgm", "res//images//lena_bw.pgm", "res//images//1024_lena_bw.pgm", "res//images//2048_lena_bw.pgm", "res//images//4096_lena_bw.pgm"};
if (option < 12)
imageConvolutionParallel(image_files[FILE_INDEX], argv, option);
else if (option == 12)
{
for (int k = 4; k < 5; k++)
{
FILE *fp = fopen("kernels.txt", "r");
if (fp == NULL)
{
perror("Error in opening file");
exit(EXIT_FAILURE);
}
int numOfKernels;
fgets(buf, sizeof(buf), fp);
sscanf(buf, "%d", &numOfKernels);
kernel **kernels = loadAllKernels(fp, numOfKernels);
float *results;
for (int i = 1; i < 12; i++)
{
if (i > 9)
{
results = imageConvolutionParallel(image_files[k], argv, i, false);
for (int j = 0; j < numOfKernels; j++)
{
printf("\n%8.3f", results[j]);
}
printf("\n");
}
printf("Image %d : Type %d DONE\n", k, i);
}
// printf("Image : %s\n", image_files[k]);
// printf("| MxM | Serial |Parallel| Shared |Constant| SC | Text | 2DText |\n");
// for (int i = 0; i < numOfKernels; i++)
// {
// printf("|%2dx%2d|", kernels[i]->dimension, kernels[i]->dimension);
// for (int j = 1; j < 12; j++)
// {
// if (i != 8 && i != 9)
// printf("%8.3f|", results[j - 1][i]);
// }
// printf("\n");
// }
printf("=================================\n\n\n");
}
}
else
printf("\n\nInvalid Input !!!");
// switch (option)
// {
// case 1:
// imageConvolutionSerial(imageFilename, argv);
// break;
// case 2:
// imageConvolutionParallel(imageFilename, argv);
// break;
// case 3:
// imageConvolutionParallelSharedMemory(imageFilename, argv);
// break;
// case 4:
// imageConvolutionParallelConstantMemory(imageFilename, argv);
// break;
// case 5:
// imageConvolutionParallelSharedConstantMemory(imageFilename, argv);
// break;
// case 6:
// imageConvolutionParallelTextureMomory(imageFilename, argv);
// break;
// default:
// printf("Incorrect input \n");
// }
return 0;
}
| dc17b05e7ea5fcfa4eb7421cec16a8ec3fc16acf.cu | #include <iostream>
#include <stdio.h>
#include <helper_functions.h>
#include <helper_cuda.h>
// #include "../kernelHandler.h"
#include "helpers.h"
#include "imageConvolutionSerial.h"
#include "imageConvolutionParallel.h"
#include "imageConvolutionParallelSharedMemory.h"
#include "imageConvolutionParallelConstantMemory.h"
#include "imageConvolutionParallelSharedConstantMemory.h"
#include "imageConvolutionTextureMemory.h"
#include "imageConvolution2DTextureMemory.h"
#include "imageConvolutionParallelSharedMemoryNoOverlap.h"
#include "imageConvolutionParallelSharedConstantMemoryNoOverlap.h"
const char *imageFilename = "res//images//1024_lena_bw.pgm";
//const char *imageFilename = "galaxy.ascii.pgm";
#define ITERATIONS 100
#define BLOCK_WIDTH 24
#define FILE_INDEX 3
float *imageConvolutionParallel(const char *imageFilename, char **argv, int option, bool print_save = true)
{
// load image from disk
float *hData = NULL;
char buf[512];
unsigned int width, height;
char *imagePath = sdkFindFilePath(imageFilename, argv[0]);
float *results;
if (imagePath == NULL)
{
printf("Unable to source image file: %s\n", imageFilename);
exit(EXIT_FAILURE);
}
sdkLoadPGM(imagePath, &hData, &width, &height);
if (print_save)
printf("Loaded '%s', %d x %d pixels\n", imageFilename, width, height);
FILE *fp = fopen("kernels.txt", "r");
if (fp == NULL)
{
perror("Error in opening file");
exit(EXIT_FAILURE);
}
int numOfKernels;
fgets(buf, sizeof(buf), fp);
sscanf(buf, "%d", &numOfKernels);
// printf("%d kernel to be loaded\n", numOfKernels);
kernel **kernels = loadAllKernels(fp, numOfKernels);
// printKernels(kernels, numOfKernels);
// kernelHandler kh = kernelHandler("../kernels.txt");
//printf("Kernels loaded\n");
results = (float *)malloc(numOfKernels * sizeof(float));
//Get Kernels
// FILE *fp = fopen("kernels.txt", "r");
// if (fp == NULL)
// {
// perror("Error in opening file");
// exit(EXIT_FAILURE);
// }
for (int i = 0; i < numOfKernels; i++)
{
char outputFilename[1024];
char *file_name;
float *result;
float totalTime = 0.0;
if (print_save)
printf("\n\n\nKernel Dimension : %dx%d\n", kernels[i]->dimension, kernels[i]->dimension);
int j = 0;
for (; j < ITERATIONS; j++)
{
cudaDeviceReset();
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
switch (option)
{
case 1:
result = applyKernelToImageSerial(hData, width, height, *kernels[i], imagePath);
if (j == 0)
sprintf(file_name, "_%dx%d_serial_out.pgm", kernels[i]->dimension, kernels[i]->dimension);
break;
case 2:
result = applyKernelToImageParallelNaive(hData, width, height, kernels[i], imagePath, BLOCK_WIDTH);
if (j == 0)
sprintf(file_name, "_%dx%d_parallel_out.pgm", kernels[i]->dimension, kernels[i]->dimension);
break;
case 3:
result = applyKernelToImageParallelSharedMemory(hData, width, height, *kernels[i], imagePath, BLOCK_WIDTH);
if (j == 0)
sprintf(file_name, "_%dx%d_parallel_shared_out.pgm", kernels[i]->dimension, kernels[i]->dimension);
break;
case 4:
result = applyKernelToImageParallelConstantMemory(hData, width, height, *kernels[i], imagePath, BLOCK_WIDTH);
if (j == 0)
sprintf(file_name, "_%dx%d_parallel_constant_out.pgm", kernels[i]->dimension, kernels[i]->dimension);
break;
case 5:
result = applyKernelToImageParallelSharedConstantMemory(hData, width, height, *kernels[i], imagePath, BLOCK_WIDTH);
if (j == 0)
sprintf(file_name, "_%dx%d_parallel_shared_constant_out.pgm", kernels[i]->dimension, kernels[i]->dimension);
break;
case 6:
result = applyKernelToImageParallelTextureMomory(hData, width, height, *kernels[i], imagePath, BLOCK_WIDTH);
if (j == 0)
sprintf(file_name, "_%dx%d_parallel_texture_out.pgm", kernels[i]->dimension, kernels[i]->dimension);
break;
case 7:
result = applyKernelToImageParallel2DTextureMomory(hData, width, height, *kernels[i], imagePath, BLOCK_WIDTH);
if (j == 0)
sprintf(file_name, "_%dx%d_paralled_2D_texture_out.pgm", kernels[i]->dimension, kernels[i]->dimension);
break;
case 8:
result = applyKernelToImageParallelSharedMemory(hData, width, height, *kernels[i], imagePath, BLOCK_WIDTH + kernels[i]->dimension - 1);
if (j == 0)
sprintf(file_name, "_%dx%d_parallel_shared_mod_out.pgm", kernels[i]->dimension, kernels[i]->dimension);
break;
case 9:
result = applyKernelToImageParallelSharedConstantMemory(hData, width, height, *kernels[i], imagePath, BLOCK_WIDTH + kernels[i]->dimension - 1);
if (j == 0)
sprintf(file_name, "_%dx%d_parallel_shared_constant_mod_out.pgm", kernels[i]->dimension, kernels[i]->dimension);
break;
case 10:
result = applyKernelToImageParallelSharedMemoryNoOverlap(hData, width, height, *kernels[i], imagePath, BLOCK_WIDTH);
if (j == 0)
sprintf(file_name, "_%dx%d_parallel_shared_no_overlap_out.pgm", kernels[i]->dimension, kernels[i]->dimension);
break;
case 11:
result = applyKernelToImageParallelSharedConstantMemoryNoOverlap(hData, width, height, *kernels[i], imagePath, BLOCK_WIDTH);
if (j == 0)
sprintf(file_name, "_%dx%d_parallel_shared_constant_no_overlap_out.pgm", kernels[i]->dimension, kernels[i]->dimension);
break;
default:
printf("Incorrect input \n");
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
if (print_save)
printf("[%3d] Time : %f \n", j, milliseconds);
totalTime += milliseconds;
if (totalTime > 300000)
{
j++;
printf("Completed %d iteration.... exiting\n", j);
break;
}
if (j != ITERATIONS - 1)
free(result);
}
if (print_save)
printf("%d | %f \n", kernels[i]->dimension, totalTime / j);
results[i] = totalTime / j;
if (print_save)
{
for (int j = 0; j < height; j++)
{
printf("[%3d] : ", j);
for (int i = 0; i < width; i++)
{
printf(" |%5.2f|", result[j * width + i]);
}
printf("\n");
}
strcpy(outputFilename, imagePath);
strcpy(outputFilename + strlen(imagePath) - 4, file_name);
printf("Saving to %s", outputFilename);
sdkSavePGM(outputFilename, result, width, height);
free(result);
}
}
freeKernels(kernels, numOfKernels);
free(hData);
return results;
}
int main(int argc, char **argv)
{
printf("Image convolution project \n");
printf("Please select an option \n");
printf("1 - Serial Implementation \n");
printf("2 - Naive parallel implementation \n");
printf("3 - Shared memory implementation \n");
printf("4 - Constant memory implementation \n");
printf("5 - Shared Constant memory implementation \n");
printf("6 - Texture memory implementation \n");
printf("7 - 2D Texture memory implementation \n");
printf("8 - Shared memory modified implementation \n");
printf("9 - Shared + constant memory modified implementation \n");
printf("10 - Shared memory No Overlap implementation \n");
printf("11 - Shared Constant memory No Overlap implementation \n");
printf("12 - All \n ");
int option;
char buf[512];
scanf("%d", &option);
// kernelHandler kh = kernelHandler("../kernels.txt");
//printf("Kernels loaded\n");
char image_files[5][35] = {"res//images//256_lena_bw.pgm", "res//images//lena_bw.pgm", "res//images//1024_lena_bw.pgm", "res//images//2048_lena_bw.pgm", "res//images//4096_lena_bw.pgm"};
if (option < 12)
imageConvolutionParallel(image_files[FILE_INDEX], argv, option);
else if (option == 12)
{
for (int k = 4; k < 5; k++)
{
FILE *fp = fopen("kernels.txt", "r");
if (fp == NULL)
{
perror("Error in opening file");
exit(EXIT_FAILURE);
}
int numOfKernels;
fgets(buf, sizeof(buf), fp);
sscanf(buf, "%d", &numOfKernels);
kernel **kernels = loadAllKernels(fp, numOfKernels);
float *results;
for (int i = 1; i < 12; i++)
{
if (i > 9)
{
results = imageConvolutionParallel(image_files[k], argv, i, false);
for (int j = 0; j < numOfKernels; j++)
{
printf("\n%8.3f", results[j]);
}
printf("\n");
}
printf("Image %d : Type %d DONE\n", k, i);
}
// printf("Image : %s\n", image_files[k]);
// printf("| MxM | Serial |Parallel| Shared |Constant| SC | Text | 2DText |\n");
// for (int i = 0; i < numOfKernels; i++)
// {
// printf("|%2dx%2d|", kernels[i]->dimension, kernels[i]->dimension);
// for (int j = 1; j < 12; j++)
// {
// if (i != 8 && i != 9)
// printf("%8.3f|", results[j - 1][i]);
// }
// printf("\n");
// }
printf("=================================\n\n\n");
}
}
else
printf("\n\nInvalid Input !!!");
// switch (option)
// {
// case 1:
// imageConvolutionSerial(imageFilename, argv);
// break;
// case 2:
// imageConvolutionParallel(imageFilename, argv);
// break;
// case 3:
// imageConvolutionParallelSharedMemory(imageFilename, argv);
// break;
// case 4:
// imageConvolutionParallelConstantMemory(imageFilename, argv);
// break;
// case 5:
// imageConvolutionParallelSharedConstantMemory(imageFilename, argv);
// break;
// case 6:
// imageConvolutionParallelTextureMomory(imageFilename, argv);
// break;
// default:
// printf("Incorrect input \n");
// }
return 0;
}
|
2d3b0f6f0f6767c8dc6732247f3b84e42daceb81.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/jit_macros.h>
#if AT_USE_JITERATOR()
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <ATen/hip/jiterator.h>
#include <ATen/hip/jiterator_impl.h>
#include <iostream>
#include <utility>
#include <chrono>
namespace at {
namespace native {
static inline void launch_jitted_vectorized_kernel_dynamic(
const std::string& name, TensorIteratorBase& iter,
DeviceIndex dev_idx, int64_t N, const std::string& f, void* data_ptr,
const c10::SmallVector<at::Scalar>& extra_args, bool return_by_ref) {
TORCH_INTERNAL_ASSERT(N > 0 && N <= std::numeric_limits<int32_t>::max());
// N is still int64_t for the computation, but it's always safe to cast result to int
const uint32_t grid = (N + block_work_size() - 1) / block_work_size();
const int vec_size = jitted_can_vectorize_up_to(iter);
bool vectorized = vec_size > 1;
// Different kernels are compiled depending on what we're vectorizing up to (1, 2 or 4 elements)
// fn_ptr is set to the appropriate function based on the vec size and GPU used
// TODO: Memory use can probably be optimized by re-using kernels across GPUs with
// the same compute capability
int nInputs = iter.ninputs();
int nOutputs = iter.noutputs();
const at::ScalarType common_dtype = iter.common_dtype();
std::string f_inputs_type_str = at::cuda::jit::typeName(common_dtype);
std::string compute_type_str = at::cuda::jit::typeName(toOpMathType(common_dtype));
std::string result_type_str = at::cuda::jit::typeName(common_dtype);
c10::SmallVector<std::string> extra_args_types = get_extra_args_typenames(extra_args);
// The cache key includes all the parameters to generate_code + vec_size + dev_idx
std::stringstream ss;
ss << nInputs << "_" << nOutputs << f;
ss << f_inputs_type_str << compute_type_str << result_type_str;
ss << static_cast<int>(at::cuda::jit::BinaryFuncVariant::NoScalar);
ss << extra_args_types;
ss << vec_size;
// DeviceIndex, e.g. int8_t, is not treated as a number by the stream, cast to int as a workaround
ss << static_cast<int>(dev_idx);
const std::string cache_key = ss.str();
static std::mutex _jiterator_mutex;
static std::unordered_map<std::string, at::cuda::jit::NvrtcFunction> fns;
at::cuda::jit::NvrtcFunction* fn_ptr = &fns[cache_key];
if (!fn_ptr->function) {
const std::lock_guard<std::mutex> lock{_jiterator_mutex};
if (!fn_ptr->function) { // cache miss!
// Generates program
auto code = at::cuda::jit::generate_code(nInputs, nOutputs, f, name,
f_inputs_type_str, compute_type_str, result_type_str,
/*contiguous=*/true, /*dynamic_casting=*/false,
at::cuda::jit::BinaryFuncVariant::NoScalar,
extra_args_types,
vectorized, vec_size,
return_by_ref);
std::string kernel_name = vectorized ? name + "_vectorized" + std::to_string(vec_size) : name;
// Acquires the program
*fn_ptr = at::cuda::jit::jit_pwise_function(code, kernel_name);
}
}
// size of `extra_args` is unknown at compile-time
auto extra_args_size = extra_args.size();
float scalar_val = 0;
if (vectorized) {
// pack args for kernel launch
constexpr int kernel_args = 3;
auto args = std::make_unique<void*[]>(kernel_args + extra_args_size);
args[0] = static_cast<void*>(&N);
args[1] = data_ptr;
args[2] = static_cast<void*>(&scalar_val);
for (const auto i : c10::irange(extra_args_size)) {
// since 3 slots are already filled in `args`
args[i + 3] = const_cast<void*>(extra_args[i].data_ptr());
}
at::cuda::jit::launch_jitted_pwise_function(*fn_ptr, args.get(), {grid, 1u, 1u}, {num_threads(), 1u, 1u});
} else {
TrivialOffsetCalculatorVariant input_offset_calculator(iter.ninputs());
void* ic_ptr = input_offset_calculator.data_ptr();
TrivialOffsetCalculatorVariant output_offset_calculator(iter.noutputs());
void* oc_ptr = output_offset_calculator.data_ptr();
auto l = memory::LoadWithoutCast();
auto s = memory::StoreWithoutCast();
// pack args for kernel launch
constexpr int kernel_args = 7;
auto args = std::make_unique<void*[]>(kernel_args + extra_args_size);
args[0] = static_cast<void*>(&N);
args[1] = data_ptr;
args[2] = ic_ptr;
args[3] = oc_ptr;
args[4] = static_cast<void*>(&l);
args[5] = static_cast<void*>(&s);
args[6] = static_cast<void*>(&scalar_val);
for (const auto i : c10::irange(extra_args_size)) {
// since 7 slots are already filled in `args`
args[i + 7] = const_cast<void*>(extra_args[i].data_ptr());
}
at::cuda::jit::launch_jitted_pwise_function(*fn_ptr, args.get(), {grid, 1u, 1u}, {num_threads(), 1u, 1u});
}
}
static inline void launch_jitted_unrolled_kernel_dynamic(
const std::string& name, TensorIteratorBase& iter,
DeviceIndex dev_idx, int64_t N, const std::string& f, void* data_ptr,
void* ic_ptr, void* oc_ptr, void* l_ptr, void* s_ptr, bool contiguous, bool dynamic_casting,
const c10::SmallVector<at::Scalar>& extra_args, bool return_by_ref) {
TORCH_INTERNAL_ASSERT(N > 0 && N <= std::numeric_limits<int32_t>::max());
//casting result to int is always safe, intermediate is int64 and won't overflow
const uint32_t grid = (N + block_work_size() - 1) / block_work_size();
int nInputs = iter.ninputs();
int nOutputs = iter.noutputs();
const at::ScalarType common_dtype = iter.common_dtype();
std::string f_inputs_type_str = at::cuda::jit::typeName(common_dtype);
std::string compute_type_str = at::cuda::jit::typeName(toOpMathType(common_dtype));
std::string result_type_str = at::cuda::jit::typeName(common_dtype);
c10::SmallVector<std::string> extra_args_types = get_extra_args_typenames(extra_args);
// The cache key includes all the parameters to generate_code + dev_idx
std::stringstream ss;
ss << nInputs << "_" << nOutputs << f;
ss << f_inputs_type_str << compute_type_str << result_type_str;
ss << contiguous << dynamic_casting;
ss << static_cast<int>(at::cuda::jit::BinaryFuncVariant::NoScalar);
ss << extra_args_types;
ss << dev_idx;
const std::string cache_key = ss.str();
static std::mutex _jiterator_mutex;
static std::unordered_map<std::string, at::cuda::jit::NvrtcFunction> fns;
at::cuda::jit::NvrtcFunction* fn_ptr = &fns[cache_key];
if (!fn_ptr->function) {
const std::lock_guard<std::mutex> lock{_jiterator_mutex};
if (!fn_ptr->function) {
auto code = at::cuda::jit::generate_code(nInputs, nOutputs, f, name,
f_inputs_type_str, compute_type_str, result_type_str,
contiguous, dynamic_casting,
at::cuda::jit::BinaryFuncVariant::NoScalar,
extra_args_types, /*vectorized*/false, /*vec_size*/0, return_by_ref);
*fn_ptr = at::cuda::jit::jit_pwise_function(code, name);
}
}
float scalar_val = 0;
// pack args for kernel launch
constexpr int kernel_args = 7;
auto extra_args_size = extra_args.size();
auto args = std::make_unique<void*[]>(kernel_args + extra_args_size);
args[0] = static_cast<void*>(&N);
args[1] = data_ptr;
args[2] = ic_ptr;
args[3] = oc_ptr;
args[4] = l_ptr;
args[5] = s_ptr;
args[6] = static_cast<void*>(&scalar_val);
for (const auto i : c10::irange(extra_args_size)) {
// since 7 slots are already filled in `args`
args[i + 7] = const_cast<void*>(extra_args[i].data_ptr());
}
at::cuda::jit::launch_jitted_pwise_function(*fn_ptr, args.get(), {grid, 1u, 1u}, {num_threads(), 1u, 1u});
}
void jitted_gpu_kernel_dynamic_impl(
const std::string& kernel_name,
TensorIteratorBase& iter,
const std::string& f,
const bool dynamic_casting,
const c10::SmallVector<at::Scalar>& extra_args,
bool return_by_ref) {
TORCH_INTERNAL_ASSERT(iter.can_use_32bit_indexing());
TORCH_INTERNAL_ASSERT(iter.noutputs() <= 8);
TORCH_INTERNAL_ASSERT(iter.ninputs() <= 8);
ArrayVariant data(iter);
void* data_ptr = data.data_ptr();
int64_t numel = iter.numel();
bool contiguous = iter.is_contiguous();
// Decides which of 4 kernel types to launch
// Variations are:
// - Case 1: no dynamic casting and contiguous
// - Case 2: no dynamic casting and noncontiguous
// - Case 3: dynamic casting and contiguous
// - Case 4: dynamic casting and noncontiguous
// These cases align with the non-jitted CUDALoops.cuh cases in gpu_kernel_impl
if (!dynamic_casting) {
if (contiguous) {
// Case 1: no dynamic casting and contiguous
launch_jitted_vectorized_kernel_dynamic(kernel_name, iter,
iter.device().index(), numel, f, data_ptr, extra_args, return_by_ref);
return;
}
// Case 2: no dynamic casting and noncontiguous
OffsetCalculatorVariant</*is_input=*/true> input_offset_calculator(iter);
void* ic_ptr = input_offset_calculator.data_ptr();
OffsetCalculatorVariant</*is_input=*/false> output_offset_calculator(iter);
void* oc_ptr = output_offset_calculator.data_ptr();
auto loader = memory::LoadWithoutCast();
auto storer = memory::StoreWithoutCast();
void* l_ptr = static_cast<void*>(&loader);
void* s_ptr = static_cast<void*>(&storer);
launch_jitted_unrolled_kernel_dynamic(
kernel_name, iter, iter.device().index(), numel, f, data_ptr,
ic_ptr, oc_ptr, l_ptr, s_ptr, contiguous, dynamic_casting, extra_args, return_by_ref);
return;
}
// Cases 3 and 4 are handled below
// Both require construction of one or more storers and loaders
// Creates load casts from inputs (note offset indexing into the iterators noutpus...n tensors)
LoadWithCastVariant loader(iter);
void* l_ptr = loader.data_ptr();
// Creates store cast to output (the 0...noutpus-1 tensor in TensorIterator)
StoreWithCastVariant storer(iter);
void* s_ptr = storer.data_ptr();
if (contiguous) {
// Case 3: dynamic casting and contiguous
TrivialOffsetCalculatorVariant input_offset_calculator(iter.ninputs());
void* ic_ptr = input_offset_calculator.data_ptr();
TrivialOffsetCalculatorVariant output_offset_calculator(iter.noutputs());
void* oc_ptr = output_offset_calculator.data_ptr();
launch_jitted_unrolled_kernel_dynamic(
kernel_name, iter, iter.device().index(), numel, f, data_ptr,
ic_ptr, oc_ptr, l_ptr, s_ptr, contiguous, dynamic_casting, extra_args, return_by_ref);
return;
}
// Case 4: dynamic casting and noncontiguous
OffsetCalculatorVariant</*is_input=*/true> input_offset_calculator(iter);
void* ic_ptr = input_offset_calculator.data_ptr();
OffsetCalculatorVariant</*is_input=*/false> output_offset_calculator(iter);
void* oc_ptr = output_offset_calculator.data_ptr();
launch_jitted_unrolled_kernel_dynamic(
kernel_name, iter, iter.device().index(), numel, f, data_ptr,
ic_ptr, oc_ptr, l_ptr, s_ptr, contiguous, dynamic_casting, extra_args, return_by_ref);
}
// Entrypoint for dynamic version of jitted GPU kernels, which accepts dynamic number of inputs
// and arbitrary types of input and extra args. This dynamic version is needed for jiterator with python interface,
// since the kernel definition is unknown at the compilation time.
// Similarly, launch_jitted_vectorized_kernel_dynamic and launch_jitted_unrolled_kernel_dynamic are created
// to handle arbitrary functions defined in python user code.
// For templated version, see note [Jiterator] in JitLoops.cuh for more details
void jitted_gpu_kernel_dynamic(
const std::string& kernel_name,
TensorIteratorBase& iter,
const std::string& f,
const c10::SmallVector<at::Scalar>& extra_args,
bool return_by_ref) {
// TODO: much of preamble is common to both jitted_gpu_kernel and gpu_kernel
// Maybe it could be refactored?
for (int arg = 0; arg < iter.ntensors(); arg++) {
TORCH_INTERNAL_ASSERT(
iter.device(arg).is_cuda(),
"argument ", arg, ": expected a CUDA device but found ", iter.device(arg));
}
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
jitted_gpu_kernel_dynamic(kernel_name, sub_iter, f, extra_args, return_by_ref);
}
return;
}
// Computes if dynamic casting is needed
// Dynamic casting is needed if an input's or output's dtype differs from the common dtype
bool needs_dynamic_casting = false;
const at::ScalarType common_dtype = iter.common_dtype();
for (auto i = 0; i < iter.ntensors(); ++i) {
if (iter.dtype(i) != common_dtype) {
needs_dynamic_casting = true;
break;
}
}
jitted_gpu_kernel_dynamic_impl(kernel_name, iter, f, needs_dynamic_casting, extra_args, return_by_ref);
}
} // namespace native
namespace cuda {
c10::SmallVector<at::Tensor> CompileAndLaunchKernel(
const std::string& code_string,
const std::string& kernel_name,
const int num_outputs,
const c10::SmallVector<at::Tensor>& tensors,
const c10::SmallVector<at::Scalar>& extra_args,
bool return_by_ref) {
c10::SmallVector<at::Tensor> outs(num_outputs);
TensorIteratorConfig config;
config
.set_check_mem_overlap(true)
.allow_cpu_scalars(false)
.promote_inputs_to_common_dtype(true)
.cast_common_dtype_to_outputs(true)
.enforce_safe_casting_to_output(true)
.check_all_same_device(true);
for (int i = 0; i < num_outputs; ++i) {
config.add_owned_output(outs[i]);
}
for (const auto& t: tensors) {
config.add_input(t);
}
TensorIterator iter = config.build();
HIPGuardMasqueradingAsCUDA guard(iter.device());
at::native::jitted_gpu_kernel_dynamic(kernel_name, iter, code_string, extra_args, return_by_ref);
c10::SmallVector<at::Tensor> outputs;
for (int i = 0; i < num_outputs; ++i) {
outputs.emplace_back(iter.output(i));
}
return outputs;
}
}} // namespace at::cuda
#endif // AT_USE_JITERATOR()
| 2d3b0f6f0f6767c8dc6732247f3b84e42daceb81.cu | #include <ATen/jit_macros.h>
#if AT_USE_JITERATOR()
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/jiterator.h>
#include <ATen/cuda/jiterator_impl.h>
#include <iostream>
#include <utility>
#include <chrono>
namespace at {
namespace native {
static inline void launch_jitted_vectorized_kernel_dynamic(
const std::string& name, TensorIteratorBase& iter,
DeviceIndex dev_idx, int64_t N, const std::string& f, void* data_ptr,
const c10::SmallVector<at::Scalar>& extra_args, bool return_by_ref) {
TORCH_INTERNAL_ASSERT(N > 0 && N <= std::numeric_limits<int32_t>::max());
// N is still int64_t for the computation, but it's always safe to cast result to int
const uint32_t grid = (N + block_work_size() - 1) / block_work_size();
const int vec_size = jitted_can_vectorize_up_to(iter);
bool vectorized = vec_size > 1;
// Different kernels are compiled depending on what we're vectorizing up to (1, 2 or 4 elements)
// fn_ptr is set to the appropriate function based on the vec size and GPU used
// TODO: Memory use can probably be optimized by re-using kernels across GPUs with
// the same compute capability
int nInputs = iter.ninputs();
int nOutputs = iter.noutputs();
const at::ScalarType common_dtype = iter.common_dtype();
std::string f_inputs_type_str = at::cuda::jit::typeName(common_dtype);
std::string compute_type_str = at::cuda::jit::typeName(toOpMathType(common_dtype));
std::string result_type_str = at::cuda::jit::typeName(common_dtype);
c10::SmallVector<std::string> extra_args_types = get_extra_args_typenames(extra_args);
// The cache key includes all the parameters to generate_code + vec_size + dev_idx
std::stringstream ss;
ss << nInputs << "_" << nOutputs << f;
ss << f_inputs_type_str << compute_type_str << result_type_str;
ss << static_cast<int>(at::cuda::jit::BinaryFuncVariant::NoScalar);
ss << extra_args_types;
ss << vec_size;
// DeviceIndex, e.g. int8_t, is not treated as a number by the stream, cast to int as a workaround
ss << static_cast<int>(dev_idx);
const std::string cache_key = ss.str();
static std::mutex _jiterator_mutex;
static std::unordered_map<std::string, at::cuda::jit::NvrtcFunction> fns;
at::cuda::jit::NvrtcFunction* fn_ptr = &fns[cache_key];
if (!fn_ptr->function) {
const std::lock_guard<std::mutex> lock{_jiterator_mutex};
if (!fn_ptr->function) { // cache miss!
// Generates program
auto code = at::cuda::jit::generate_code(nInputs, nOutputs, f, name,
f_inputs_type_str, compute_type_str, result_type_str,
/*contiguous=*/true, /*dynamic_casting=*/false,
at::cuda::jit::BinaryFuncVariant::NoScalar,
extra_args_types,
vectorized, vec_size,
return_by_ref);
std::string kernel_name = vectorized ? name + "_vectorized" + std::to_string(vec_size) : name;
// Acquires the program
*fn_ptr = at::cuda::jit::jit_pwise_function(code, kernel_name);
}
}
// size of `extra_args` is unknown at compile-time
auto extra_args_size = extra_args.size();
float scalar_val = 0;
if (vectorized) {
// pack args for kernel launch
constexpr int kernel_args = 3;
auto args = std::make_unique<void*[]>(kernel_args + extra_args_size);
args[0] = static_cast<void*>(&N);
args[1] = data_ptr;
args[2] = static_cast<void*>(&scalar_val);
for (const auto i : c10::irange(extra_args_size)) {
// since 3 slots are already filled in `args`
args[i + 3] = const_cast<void*>(extra_args[i].data_ptr());
}
at::cuda::jit::launch_jitted_pwise_function(*fn_ptr, args.get(), {grid, 1u, 1u}, {num_threads(), 1u, 1u});
} else {
TrivialOffsetCalculatorVariant input_offset_calculator(iter.ninputs());
void* ic_ptr = input_offset_calculator.data_ptr();
TrivialOffsetCalculatorVariant output_offset_calculator(iter.noutputs());
void* oc_ptr = output_offset_calculator.data_ptr();
auto l = memory::LoadWithoutCast();
auto s = memory::StoreWithoutCast();
// pack args for kernel launch
constexpr int kernel_args = 7;
auto args = std::make_unique<void*[]>(kernel_args + extra_args_size);
args[0] = static_cast<void*>(&N);
args[1] = data_ptr;
args[2] = ic_ptr;
args[3] = oc_ptr;
args[4] = static_cast<void*>(&l);
args[5] = static_cast<void*>(&s);
args[6] = static_cast<void*>(&scalar_val);
for (const auto i : c10::irange(extra_args_size)) {
// since 7 slots are already filled in `args`
args[i + 7] = const_cast<void*>(extra_args[i].data_ptr());
}
at::cuda::jit::launch_jitted_pwise_function(*fn_ptr, args.get(), {grid, 1u, 1u}, {num_threads(), 1u, 1u});
}
}
static inline void launch_jitted_unrolled_kernel_dynamic(
const std::string& name, TensorIteratorBase& iter,
DeviceIndex dev_idx, int64_t N, const std::string& f, void* data_ptr,
void* ic_ptr, void* oc_ptr, void* l_ptr, void* s_ptr, bool contiguous, bool dynamic_casting,
const c10::SmallVector<at::Scalar>& extra_args, bool return_by_ref) {
TORCH_INTERNAL_ASSERT(N > 0 && N <= std::numeric_limits<int32_t>::max());
//casting result to int is always safe, intermediate is int64 and won't overflow
const uint32_t grid = (N + block_work_size() - 1) / block_work_size();
int nInputs = iter.ninputs();
int nOutputs = iter.noutputs();
const at::ScalarType common_dtype = iter.common_dtype();
std::string f_inputs_type_str = at::cuda::jit::typeName(common_dtype);
std::string compute_type_str = at::cuda::jit::typeName(toOpMathType(common_dtype));
std::string result_type_str = at::cuda::jit::typeName(common_dtype);
c10::SmallVector<std::string> extra_args_types = get_extra_args_typenames(extra_args);
// The cache key includes all the parameters to generate_code + dev_idx
std::stringstream ss;
ss << nInputs << "_" << nOutputs << f;
ss << f_inputs_type_str << compute_type_str << result_type_str;
ss << contiguous << dynamic_casting;
ss << static_cast<int>(at::cuda::jit::BinaryFuncVariant::NoScalar);
ss << extra_args_types;
ss << dev_idx;
const std::string cache_key = ss.str();
static std::mutex _jiterator_mutex;
static std::unordered_map<std::string, at::cuda::jit::NvrtcFunction> fns;
at::cuda::jit::NvrtcFunction* fn_ptr = &fns[cache_key];
if (!fn_ptr->function) {
const std::lock_guard<std::mutex> lock{_jiterator_mutex};
if (!fn_ptr->function) {
auto code = at::cuda::jit::generate_code(nInputs, nOutputs, f, name,
f_inputs_type_str, compute_type_str, result_type_str,
contiguous, dynamic_casting,
at::cuda::jit::BinaryFuncVariant::NoScalar,
extra_args_types, /*vectorized*/false, /*vec_size*/0, return_by_ref);
*fn_ptr = at::cuda::jit::jit_pwise_function(code, name);
}
}
float scalar_val = 0;
// pack args for kernel launch
constexpr int kernel_args = 7;
auto extra_args_size = extra_args.size();
auto args = std::make_unique<void*[]>(kernel_args + extra_args_size);
args[0] = static_cast<void*>(&N);
args[1] = data_ptr;
args[2] = ic_ptr;
args[3] = oc_ptr;
args[4] = l_ptr;
args[5] = s_ptr;
args[6] = static_cast<void*>(&scalar_val);
for (const auto i : c10::irange(extra_args_size)) {
// since 7 slots are already filled in `args`
args[i + 7] = const_cast<void*>(extra_args[i].data_ptr());
}
at::cuda::jit::launch_jitted_pwise_function(*fn_ptr, args.get(), {grid, 1u, 1u}, {num_threads(), 1u, 1u});
}
void jitted_gpu_kernel_dynamic_impl(
const std::string& kernel_name,
TensorIteratorBase& iter,
const std::string& f,
const bool dynamic_casting,
const c10::SmallVector<at::Scalar>& extra_args,
bool return_by_ref) {
TORCH_INTERNAL_ASSERT(iter.can_use_32bit_indexing());
TORCH_INTERNAL_ASSERT(iter.noutputs() <= 8);
TORCH_INTERNAL_ASSERT(iter.ninputs() <= 8);
ArrayVariant data(iter);
void* data_ptr = data.data_ptr();
int64_t numel = iter.numel();
bool contiguous = iter.is_contiguous();
// Decides which of 4 kernel types to launch
// Variations are:
// - Case 1: no dynamic casting and contiguous
// - Case 2: no dynamic casting and noncontiguous
// - Case 3: dynamic casting and contiguous
// - Case 4: dynamic casting and noncontiguous
// These cases align with the non-jitted CUDALoops.cuh cases in gpu_kernel_impl
if (!dynamic_casting) {
if (contiguous) {
// Case 1: no dynamic casting and contiguous
launch_jitted_vectorized_kernel_dynamic(kernel_name, iter,
iter.device().index(), numel, f, data_ptr, extra_args, return_by_ref);
return;
}
// Case 2: no dynamic casting and noncontiguous
OffsetCalculatorVariant</*is_input=*/true> input_offset_calculator(iter);
void* ic_ptr = input_offset_calculator.data_ptr();
OffsetCalculatorVariant</*is_input=*/false> output_offset_calculator(iter);
void* oc_ptr = output_offset_calculator.data_ptr();
auto loader = memory::LoadWithoutCast();
auto storer = memory::StoreWithoutCast();
void* l_ptr = static_cast<void*>(&loader);
void* s_ptr = static_cast<void*>(&storer);
launch_jitted_unrolled_kernel_dynamic(
kernel_name, iter, iter.device().index(), numel, f, data_ptr,
ic_ptr, oc_ptr, l_ptr, s_ptr, contiguous, dynamic_casting, extra_args, return_by_ref);
return;
}
// Cases 3 and 4 are handled below
// Both require construction of one or more storers and loaders
// Creates load casts from inputs (note offset indexing into the iterators noutpus...n tensors)
LoadWithCastVariant loader(iter);
void* l_ptr = loader.data_ptr();
// Creates store cast to output (the 0...noutpus-1 tensor in TensorIterator)
StoreWithCastVariant storer(iter);
void* s_ptr = storer.data_ptr();
if (contiguous) {
// Case 3: dynamic casting and contiguous
TrivialOffsetCalculatorVariant input_offset_calculator(iter.ninputs());
void* ic_ptr = input_offset_calculator.data_ptr();
TrivialOffsetCalculatorVariant output_offset_calculator(iter.noutputs());
void* oc_ptr = output_offset_calculator.data_ptr();
launch_jitted_unrolled_kernel_dynamic(
kernel_name, iter, iter.device().index(), numel, f, data_ptr,
ic_ptr, oc_ptr, l_ptr, s_ptr, contiguous, dynamic_casting, extra_args, return_by_ref);
return;
}
// Case 4: dynamic casting and noncontiguous
OffsetCalculatorVariant</*is_input=*/true> input_offset_calculator(iter);
void* ic_ptr = input_offset_calculator.data_ptr();
OffsetCalculatorVariant</*is_input=*/false> output_offset_calculator(iter);
void* oc_ptr = output_offset_calculator.data_ptr();
launch_jitted_unrolled_kernel_dynamic(
kernel_name, iter, iter.device().index(), numel, f, data_ptr,
ic_ptr, oc_ptr, l_ptr, s_ptr, contiguous, dynamic_casting, extra_args, return_by_ref);
}
// Entrypoint for dynamic version of jitted GPU kernels, which accepts dynamic number of inputs
// and arbitrary types of input and extra args. This dynamic version is needed for jiterator with python interface,
// since the kernel definition is unknown at the compilation time.
// Similarly, launch_jitted_vectorized_kernel_dynamic and launch_jitted_unrolled_kernel_dynamic are created
// to handle arbitrary functions defined in python user code.
// For templated version, see note [Jiterator] in JitLoops.cuh for more details
void jitted_gpu_kernel_dynamic(
const std::string& kernel_name,
TensorIteratorBase& iter,
const std::string& f,
const c10::SmallVector<at::Scalar>& extra_args,
bool return_by_ref) {
// TODO: much of preamble is common to both jitted_gpu_kernel and gpu_kernel
// Maybe it could be refactored?
for (int arg = 0; arg < iter.ntensors(); arg++) {
TORCH_INTERNAL_ASSERT(
iter.device(arg).is_cuda(),
"argument ", arg, ": expected a CUDA device but found ", iter.device(arg));
}
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
jitted_gpu_kernel_dynamic(kernel_name, sub_iter, f, extra_args, return_by_ref);
}
return;
}
// Computes if dynamic casting is needed
// Dynamic casting is needed if an input's or output's dtype differs from the common dtype
bool needs_dynamic_casting = false;
const at::ScalarType common_dtype = iter.common_dtype();
for (auto i = 0; i < iter.ntensors(); ++i) {
if (iter.dtype(i) != common_dtype) {
needs_dynamic_casting = true;
break;
}
}
jitted_gpu_kernel_dynamic_impl(kernel_name, iter, f, needs_dynamic_casting, extra_args, return_by_ref);
}
} // namespace native
namespace cuda {
c10::SmallVector<at::Tensor> CompileAndLaunchKernel(
const std::string& code_string,
const std::string& kernel_name,
const int num_outputs,
const c10::SmallVector<at::Tensor>& tensors,
const c10::SmallVector<at::Scalar>& extra_args,
bool return_by_ref) {
c10::SmallVector<at::Tensor> outs(num_outputs);
TensorIteratorConfig config;
config
.set_check_mem_overlap(true)
.allow_cpu_scalars(false)
.promote_inputs_to_common_dtype(true)
.cast_common_dtype_to_outputs(true)
.enforce_safe_casting_to_output(true)
.check_all_same_device(true);
for (int i = 0; i < num_outputs; ++i) {
config.add_owned_output(outs[i]);
}
for (const auto& t: tensors) {
config.add_input(t);
}
TensorIterator iter = config.build();
CUDAGuard guard(iter.device());
at::native::jitted_gpu_kernel_dynamic(kernel_name, iter, code_string, extra_args, return_by_ref);
c10::SmallVector<at::Tensor> outputs;
for (int i = 0; i < num_outputs; ++i) {
outputs.emplace_back(iter.output(i));
}
return outputs;
}
}} // namespace at::cuda
#endif // AT_USE_JITERATOR()
|
929ce9074925f3ba46f3ba71f871da4fb90bb6a2.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at
// the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights
// reserved. See files LICENSE and NOTICE for details.
//
// This file is part of CEED, a collection of benchmarks, miniapps, software
// libraries and APIs for efficient high-order finite element and spectral
// element discretizations for exascale applications. For more information and
// source code availability see http://github.com/ceed.
//
// The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
// a collaborative effort of two U.S. Department of Energy organizations (Office
// of Science and the National Nuclear Security Administration) responsible for
// the planning and preparation of a capable exascale ecosystem, including
// software, applications, hardware, advanced system engineering and early
// testbed platforms, in support of the nation's exascale computing imperative.
#include "../include/ceed.h"
#include <hip/hip_runtime.h>
__global__ void setValueK(CeedScalar * __restrict__ vec, CeedInt size, CeedScalar val) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= size)
return;
vec[idx] = val;
}
extern "C" int CeedDeviceSetValue(CeedScalar* d_array, CeedInt length, CeedScalar val) {
const int bsize = 512;
const int vecsize = length;
int gridsize = vecsize / bsize;
if (bsize * gridsize < vecsize)
gridsize += 1;
hipLaunchKernelGGL(( setValueK), dim3(gridsize),dim3(bsize), 0, 0, d_array, length, val);
return 0;
}
| 929ce9074925f3ba46f3ba71f871da4fb90bb6a2.cu | // Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at
// the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights
// reserved. See files LICENSE and NOTICE for details.
//
// This file is part of CEED, a collection of benchmarks, miniapps, software
// libraries and APIs for efficient high-order finite element and spectral
// element discretizations for exascale applications. For more information and
// source code availability see http://github.com/ceed.
//
// The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
// a collaborative effort of two U.S. Department of Energy organizations (Office
// of Science and the National Nuclear Security Administration) responsible for
// the planning and preparation of a capable exascale ecosystem, including
// software, applications, hardware, advanced system engineering and early
// testbed platforms, in support of the nation's exascale computing imperative.
#include "../include/ceed.h"
#include <cuda.h>
__global__ void setValueK(CeedScalar * __restrict__ vec, CeedInt size, CeedScalar val) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= size)
return;
vec[idx] = val;
}
extern "C" int CeedDeviceSetValue(CeedScalar* d_array, CeedInt length, CeedScalar val) {
const int bsize = 512;
const int vecsize = length;
int gridsize = vecsize / bsize;
if (bsize * gridsize < vecsize)
gridsize += 1;
setValueK<<<gridsize,bsize>>>(d_array, length, val);
return 0;
}
|
0327e92a45bc29ad5d6e121ddc6199e5759d26ec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_calc_dt_kernel_min;
int xdim0_calc_dt_kernel_min_h = -1;
__constant__ int ydim0_calc_dt_kernel_min;
int ydim0_calc_dt_kernel_min_h = -1;
#undef OPS_ACC0
#define OPS_ACC0(x, y, z) \
(x + xdim0_calc_dt_kernel_min * (y) + \
xdim0_calc_dt_kernel_min * ydim0_calc_dt_kernel_min * (z))
// user function
__device__
void
calc_dt_kernel_min(const double *dt_min, double *dt_min_val) {
*dt_min_val = MIN(*dt_min_val, dt_min[OPS_ACC0(0, 0, 0)]);
}
#undef OPS_ACC0
__global__ void ops_calc_dt_kernel_min(const double *__restrict arg0,
double *__restrict arg1, int size0,
int size1, int size2) {
double arg1_l[1];
for (int d = 0; d < 1; d++)
arg1_l[d] = INFINITY_double;
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_calc_dt_kernel_min +
idx_z * 1 * 1 * xdim0_calc_dt_kernel_min * ydim0_calc_dt_kernel_min;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
calc_dt_kernel_min(arg0, arg1_l);
}
for (int d = 0; d < 1; d++)
ops_reduction_cuda<OPS_MIN>(&arg1[d +
(blockIdx.x + blockIdx.y * gridDim.x +
blockIdx.z * gridDim.x * gridDim.y) *
1],
arg1_l[d]);
}
// host stub function
void ops_par_loop_calc_dt_kernel_min(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1) {
// Timing
double t1, t2, c1, c2;
ops_arg args[2] = {arg0, arg1};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 2, range, 38))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(38, "calc_dt_kernel_min");
OPS_kernels[38].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
if (xdim0 != xdim0_calc_dt_kernel_min_h ||
ydim0 != ydim0_calc_dt_kernel_min_h) {
hipMemcpyToSymbol(xdim0_calc_dt_kernel_min, &xdim0, sizeof(int));
xdim0_calc_dt_kernel_min_h = xdim0;
hipMemcpyToSymbol(ydim0_calc_dt_kernel_min, &ydim0, sizeof(int));
ydim0_calc_dt_kernel_min_h = ydim0;
}
#ifdef OPS_MPI
double *arg1h =
(double *)(((ops_reduction)args[1].data)->data +
((ops_reduction)args[1].data)->size * block->index);
#else
double *arg1h = (double *)(((ops_reduction)args[1].data)->data);
#endif
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int nblocks = ((x_size - 1) / OPS_block_size_x + 1) *
((y_size - 1) / OPS_block_size_y + 1) * z_size;
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double));
reduct_size = MAX(reduct_size, sizeof(double) * 1);
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg1.data = OPS_reduct_h + reduct_bytes;
arg1.data_d = OPS_reduct_d + reduct_bytes;
for (int b = 0; b < maxblocks; b++)
for (int d = 0; d < 1; d++)
((double *)arg1.data)[d + b * 1] = INFINITY_double;
reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double));
mvReductArraysToDevice(reduct_bytes);
int dat0 = args[0].dat->elem_size;
char *p_a[2];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args, 2, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[38].mpi_time += t2 - t1;
}
int nshared = 0;
int nthread = OPS_block_size_x * OPS_block_size_y;
nshared = MAX(nshared, sizeof(double) * 1);
nshared = MAX(nshared * nthread, reduct_size * nthread);
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_calc_dt_kernel_min), dim3(grid), dim3(tblock), nshared, 0,
(double *)p_a[0], (double *)arg1.data_d, x_size, y_size, z_size);
mvReductArraysToHost(reduct_bytes);
for (int b = 0; b < maxblocks; b++) {
for (int d = 0; d < 1; d++) {
arg1h[d] = MIN(arg1h[d], ((double *)arg1.data)[d + b * 1]);
}
}
arg1.data = (char *)arg1h;
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[38].time += t1 - t2;
}
ops_set_dirtybit_device(args, 2);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[38].mpi_time += t2 - t1;
OPS_kernels[38].transfer += ops_compute_transfer(dim, start, end, &arg0);
}
}
| 0327e92a45bc29ad5d6e121ddc6199e5759d26ec.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_calc_dt_kernel_min;
int xdim0_calc_dt_kernel_min_h = -1;
__constant__ int ydim0_calc_dt_kernel_min;
int ydim0_calc_dt_kernel_min_h = -1;
#undef OPS_ACC0
#define OPS_ACC0(x, y, z) \
(x + xdim0_calc_dt_kernel_min * (y) + \
xdim0_calc_dt_kernel_min * ydim0_calc_dt_kernel_min * (z))
// user function
__device__
void
calc_dt_kernel_min(const double *dt_min, double *dt_min_val) {
*dt_min_val = MIN(*dt_min_val, dt_min[OPS_ACC0(0, 0, 0)]);
}
#undef OPS_ACC0
__global__ void ops_calc_dt_kernel_min(const double *__restrict arg0,
double *__restrict arg1, int size0,
int size1, int size2) {
double arg1_l[1];
for (int d = 0; d < 1; d++)
arg1_l[d] = INFINITY_double;
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_calc_dt_kernel_min +
idx_z * 1 * 1 * xdim0_calc_dt_kernel_min * ydim0_calc_dt_kernel_min;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
calc_dt_kernel_min(arg0, arg1_l);
}
for (int d = 0; d < 1; d++)
ops_reduction_cuda<OPS_MIN>(&arg1[d +
(blockIdx.x + blockIdx.y * gridDim.x +
blockIdx.z * gridDim.x * gridDim.y) *
1],
arg1_l[d]);
}
// host stub function
void ops_par_loop_calc_dt_kernel_min(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1) {
// Timing
double t1, t2, c1, c2;
ops_arg args[2] = {arg0, arg1};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 2, range, 38))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(38, "calc_dt_kernel_min");
OPS_kernels[38].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
if (xdim0 != xdim0_calc_dt_kernel_min_h ||
ydim0 != ydim0_calc_dt_kernel_min_h) {
cudaMemcpyToSymbol(xdim0_calc_dt_kernel_min, &xdim0, sizeof(int));
xdim0_calc_dt_kernel_min_h = xdim0;
cudaMemcpyToSymbol(ydim0_calc_dt_kernel_min, &ydim0, sizeof(int));
ydim0_calc_dt_kernel_min_h = ydim0;
}
#ifdef OPS_MPI
double *arg1h =
(double *)(((ops_reduction)args[1].data)->data +
((ops_reduction)args[1].data)->size * block->index);
#else
double *arg1h = (double *)(((ops_reduction)args[1].data)->data);
#endif
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int nblocks = ((x_size - 1) / OPS_block_size_x + 1) *
((y_size - 1) / OPS_block_size_y + 1) * z_size;
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double));
reduct_size = MAX(reduct_size, sizeof(double) * 1);
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg1.data = OPS_reduct_h + reduct_bytes;
arg1.data_d = OPS_reduct_d + reduct_bytes;
for (int b = 0; b < maxblocks; b++)
for (int d = 0; d < 1; d++)
((double *)arg1.data)[d + b * 1] = INFINITY_double;
reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double));
mvReductArraysToDevice(reduct_bytes);
int dat0 = args[0].dat->elem_size;
char *p_a[2];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args, 2, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[38].mpi_time += t2 - t1;
}
int nshared = 0;
int nthread = OPS_block_size_x * OPS_block_size_y;
nshared = MAX(nshared, sizeof(double) * 1);
nshared = MAX(nshared * nthread, reduct_size * nthread);
// call kernel wrapper function, passing in pointers to data
ops_calc_dt_kernel_min<<<grid, tblock, nshared>>>(
(double *)p_a[0], (double *)arg1.data_d, x_size, y_size, z_size);
mvReductArraysToHost(reduct_bytes);
for (int b = 0; b < maxblocks; b++) {
for (int d = 0; d < 1; d++) {
arg1h[d] = MIN(arg1h[d], ((double *)arg1.data)[d + b * 1]);
}
}
arg1.data = (char *)arg1h;
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[38].time += t1 - t2;
}
ops_set_dirtybit_device(args, 2);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[38].mpi_time += t2 - t1;
OPS_kernels[38].transfer += ops_compute_transfer(dim, start, end, &arg0);
}
}
|
3e139ebaf2edf84950f23a5f468cc245f31f43d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "common.h"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
// copied from cutorch/lib/THC/THCTensorRandom.cu
#define MAX_NUM_BLOCKS 64
#define BLOCK_SIZE 256
#define NUM_BLOCKS(n) min((int)THCCeilDiv(n, (long) BLOCK_SIZE), MAX_NUM_BLOCKS)
__global__ void rreluUpdateOutputTrain(int n, hiprandStateMtgp32_t *state,
float *input, float* noise, float *output, double a, double b)
{
CUDA_KERNEL_LOOP(i, n)
{
if (input[i] <= 0)
{
float r = hiprand_uniform(&state[blockIdx.x]);
r = r * (b-a) + a;
output[i] = input[i] * r;
noise[i] = r;
}
else
{
output[i] = input[i];
noise[i] = 1;
}
}
}
struct RReLUUpdateOutputEval_functor
{
const float negSlope_;
RReLUUpdateOutputEval_functor(float negSlope)
: negSlope_(negSlope)
{}
__device__ __forceinline__ void operator()(float *out, float *in)
{
const float x = *in;
const float r = x <= 0 ? negSlope_ : 1;
*out = x * r;
}
};
struct RReLUUpdateOutputEvalIP_functor
{
const float negSlope_;
RReLUUpdateOutputEvalIP_functor(float negSlope)
: negSlope_(negSlope)
{}
__device__ __forceinline__ void operator()(float *x)
{
if (*x <= 0)
{
*x = *x * negSlope_;
}
}
};
void THNN_CudaRReLU_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output,
THCudaTensor *noise, double lower, double upper, bool train, bool inplace, void *generator)
{
THCUNN_assertSameGPU(state, 3, input, output, noise);
if (state->rngState->current_gen == NULL)
{
THError("Random number generators have not been initialized.");
}
if (train)
{
input = THCudaTensor_newContiguous(state, input);
THCudaTensor_resizeAs(state, noise, input);
float *input_data = THCudaTensor_data(state, input);
float *noise_data = THCudaTensor_data(state, noise);
long n = THCudaTensor_nElement(state, input);
if (inplace)
{
hipLaunchKernelGGL(( rreluUpdateOutputTrain), dim3(NUM_BLOCKS(n)), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
n, state->rngState->current_gen->gen_states,
input_data, noise_data, input_data, lower, upper);
THCudaTensor_set(state, output, input);
}
else
{
THCudaTensor_resizeAs(state, output, input);
float *output_data = THCudaTensor_data(state, output);
hipLaunchKernelGGL(( rreluUpdateOutputTrain), dim3(NUM_BLOCKS(n)), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
n, state->rngState->current_gen->gen_states,
input_data, noise_data, output_data, lower, upper);
}
THCudaCheck(hipGetLastError());
THCudaTensor_free(state, input);
}
else
{
const double negSlope = (lower + upper) / 2;
if (inplace)
{
THCudaTensor_pointwiseApply1(state, input, RReLUUpdateOutputEvalIP_functor(negSlope));
THCudaTensor_set(state, output, input);
}
else
{
THCudaTensor_resizeAs(state, output, input);
THCudaTensor_pointwiseApply2(state, output, input, RReLUUpdateOutputEval_functor(negSlope));
}
}
}
struct RReLUupdateGradInputEval_functor
{
const float negSlope_;
RReLUupdateGradInputEval_functor(float negSlope)
: negSlope_(negSlope)
{}
__device__ __forceinline__ void operator()(float *gradIn, float *gradOut, float *in)
{
*gradIn = (*in) <= 0 ? (*gradOut) * negSlope_ : (*gradOut);
}
};
struct RReLUupdateGradInputEvalIP_functor
{
const float negSlope_;
RReLUupdateGradInputEvalIP_functor(float negSlope)
: negSlope_(negSlope)
{}
__device__ __forceinline__ void operator()(float *gradOut, float *in)
{
if (*in <= 0)
{
*gradOut = (*gradOut) * negSlope_;
}
}
};
void THNN_CudaRReLU_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput,
THCudaTensor *gradInput, THCudaTensor *noise, double lower, double upper, bool train, bool inplace)
{
THCUNN_assertSameGPU(state, 4, input, gradOutput, gradInput, noise);
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
if (train && upper - lower > 1E-6) // e.g. if upper == lower, RReLU behaves like LeakyReLU
{
// multiply the gradient by the noise tensor
if (inplace)
{
THCudaTensor_cmul(state, gradOutput, gradOutput, noise);
THCudaTensor_set(state, gradInput, gradOutput);
}
else
{
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_cmul(state, gradInput, gradOutput, noise);
}
}
else
{
// use constant factor for negative input values
const double negSlope = (lower + upper) / 2;
if (inplace)
{
THCudaTensor_pointwiseApply2(state, gradOutput, input, RReLUupdateGradInputEvalIP_functor(negSlope));
THCudaTensor_set(state, gradInput, gradOutput);
}
else
{
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_pointwiseApply3(state, gradInput, gradOutput, input, RReLUupdateGradInputEval_functor(negSlope));
}
}
THCudaTensor_free(state, gradOutput);
}
| 3e139ebaf2edf84950f23a5f468cc245f31f43d2.cu | #include "THCUNN.h"
#include "common.h"
#include <curand.h>
#include <curand_kernel.h>
// copied from cutorch/lib/THC/THCTensorRandom.cu
#define MAX_NUM_BLOCKS 64
#define BLOCK_SIZE 256
#define NUM_BLOCKS(n) min((int)THCCeilDiv(n, (long) BLOCK_SIZE), MAX_NUM_BLOCKS)
__global__ void rreluUpdateOutputTrain(int n, curandStateMtgp32 *state,
float *input, float* noise, float *output, double a, double b)
{
CUDA_KERNEL_LOOP(i, n)
{
if (input[i] <= 0)
{
float r = curand_uniform(&state[blockIdx.x]);
r = r * (b-a) + a;
output[i] = input[i] * r;
noise[i] = r;
}
else
{
output[i] = input[i];
noise[i] = 1;
}
}
}
struct RReLUUpdateOutputEval_functor
{
const float negSlope_;
RReLUUpdateOutputEval_functor(float negSlope)
: negSlope_(negSlope)
{}
__device__ __forceinline__ void operator()(float *out, float *in)
{
const float x = *in;
const float r = x <= 0 ? negSlope_ : 1;
*out = x * r;
}
};
struct RReLUUpdateOutputEvalIP_functor
{
const float negSlope_;
RReLUUpdateOutputEvalIP_functor(float negSlope)
: negSlope_(negSlope)
{}
__device__ __forceinline__ void operator()(float *x)
{
if (*x <= 0)
{
*x = *x * negSlope_;
}
}
};
void THNN_CudaRReLU_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output,
THCudaTensor *noise, double lower, double upper, bool train, bool inplace, void *generator)
{
THCUNN_assertSameGPU(state, 3, input, output, noise);
if (state->rngState->current_gen == NULL)
{
THError("Random number generators have not been initialized.");
}
if (train)
{
input = THCudaTensor_newContiguous(state, input);
THCudaTensor_resizeAs(state, noise, input);
float *input_data = THCudaTensor_data(state, input);
float *noise_data = THCudaTensor_data(state, noise);
long n = THCudaTensor_nElement(state, input);
if (inplace)
{
rreluUpdateOutputTrain<<<NUM_BLOCKS(n), BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
n, state->rngState->current_gen->gen_states,
input_data, noise_data, input_data, lower, upper);
THCudaTensor_set(state, output, input);
}
else
{
THCudaTensor_resizeAs(state, output, input);
float *output_data = THCudaTensor_data(state, output);
rreluUpdateOutputTrain<<<NUM_BLOCKS(n), BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
n, state->rngState->current_gen->gen_states,
input_data, noise_data, output_data, lower, upper);
}
THCudaCheck(cudaGetLastError());
THCudaTensor_free(state, input);
}
else
{
const double negSlope = (lower + upper) / 2;
if (inplace)
{
THCudaTensor_pointwiseApply1(state, input, RReLUUpdateOutputEvalIP_functor(negSlope));
THCudaTensor_set(state, output, input);
}
else
{
THCudaTensor_resizeAs(state, output, input);
THCudaTensor_pointwiseApply2(state, output, input, RReLUUpdateOutputEval_functor(negSlope));
}
}
}
struct RReLUupdateGradInputEval_functor
{
const float negSlope_;
RReLUupdateGradInputEval_functor(float negSlope)
: negSlope_(negSlope)
{}
__device__ __forceinline__ void operator()(float *gradIn, float *gradOut, float *in)
{
*gradIn = (*in) <= 0 ? (*gradOut) * negSlope_ : (*gradOut);
}
};
struct RReLUupdateGradInputEvalIP_functor
{
const float negSlope_;
RReLUupdateGradInputEvalIP_functor(float negSlope)
: negSlope_(negSlope)
{}
__device__ __forceinline__ void operator()(float *gradOut, float *in)
{
if (*in <= 0)
{
*gradOut = (*gradOut) * negSlope_;
}
}
};
void THNN_CudaRReLU_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput,
THCudaTensor *gradInput, THCudaTensor *noise, double lower, double upper, bool train, bool inplace)
{
THCUNN_assertSameGPU(state, 4, input, gradOutput, gradInput, noise);
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
if (train && upper - lower > 1E-6) // e.g. if upper == lower, RReLU behaves like LeakyReLU
{
// multiply the gradient by the noise tensor
if (inplace)
{
THCudaTensor_cmul(state, gradOutput, gradOutput, noise);
THCudaTensor_set(state, gradInput, gradOutput);
}
else
{
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_cmul(state, gradInput, gradOutput, noise);
}
}
else
{
// use constant factor for negative input values
const double negSlope = (lower + upper) / 2;
if (inplace)
{
THCudaTensor_pointwiseApply2(state, gradOutput, input, RReLUupdateGradInputEvalIP_functor(negSlope));
THCudaTensor_set(state, gradInput, gradOutput);
}
else
{
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_pointwiseApply3(state, gradInput, gradOutput, input, RReLUupdateGradInputEval_functor(negSlope));
}
}
THCudaTensor_free(state, gradOutput);
}
|
6177998cee859fb5fa87f972cece70fa25b89060.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#include <assert.h>
#define MAX_L 1024
#define MAX_S 64
__device__ static void _load(int* tgt, const int* src, int L, int l, int s);
__global__ static void _cuda_make_obs(const float* feats, int rows, int cols, int stride, const int* lab, float *data, int d_stride, int S);
__global__ static void _cuda_make_tran(int rows, int cols, const int* lab, float *data, int d_stride, int S);
__global__ static void _cuda_make_obs(const float* feats, int rows, int cols, int stride, const int* lab, int l, float *data, int d_stride, int S);
__global__ static void _cuda_make_tran(int rows, int cols, const int* lab, int l, float *data, int d_stride, int S);
__global__ static void _cuda_find_max(const float* arr, int len, float *val, int* index);
void cuda_make_obs(dim3 grid, dim3 block, const float* feats, int rows, int cols, int stride, const int* lab, float *data, int d_stride, int S){
assert( rows < MAX_L);
assert( S < MAX_S);
hipLaunchKernelGGL(( _cuda_make_obs), dim3(grid), dim3(block), 0, 0, feats, rows, cols, stride, lab, data, d_stride, S);
}
void cuda_make_tran(dim3 grid, dim3 block, int rows, int cols, const int* lab, float *data, int d_stride, int S){
assert( rows < MAX_L);
assert( S < MAX_S);
hipLaunchKernelGGL(( _cuda_make_tran), dim3(grid), dim3(block), 0, 0, rows, cols, lab, data, d_stride, S);
}
void cuda_find_max(dim3 grid, dim3 block, int sharemem, const float* arr, int len, float *val, int* index){
hipLaunchKernelGGL(( _cuda_find_max), dim3(grid), dim3(block), sharemem, 0, arr, len, val, index);
}
void cuda_make_obs(dim3 grid, dim3 block, const float* feats, int rows, int cols, int stride, const int* lab, int l, float *data, int d_stride, int S){
assert( rows < MAX_L);
assert( S < MAX_S);
hipLaunchKernelGGL(( _cuda_make_obs), dim3(grid), dim3(block), 0, 0, feats, rows, cols, stride, lab, l, data, d_stride, S);
}
void cuda_make_tran(dim3 grid, dim3 block, int rows, int cols, const int* lab, int l, float *data, int d_stride, int S){
assert( rows < MAX_L);
assert( S < MAX_S);
hipLaunchKernelGGL(( _cuda_make_tran), dim3(grid), dim3(block), 0, 0, rows, cols, lab, l, data, d_stride, S);
}
__device__
static void _load(int* tgt, const int* src, int L, int l, int s){
for(int i = 0; i < L; i++)
tgt[i] = src[i];
tgt[l] = s;
}
__global__
static void _cuda_make_obs(const float* feats, int rows, int cols, int stride,
const int* lab, float *data, int d_stride, int S){
int L = rows;
int F = cols;
// change lab[l] = s, and feats[c]
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int s = idx % S; idx /= S;
int l = idx % L; idx /= L;
int f = idx;
if(f >= F) return;
// TODO
// load label into share memory
// load feats
int mylab[MAX_L];
_load(mylab, lab, L, l, s);
float tmparr[MAX_S];
for(int i = 0; i < S; ++i)
tmparr[i] = 0;
for(int i = 0; i < L; ++i){
tmparr[mylab[i]] += feats[i * stride + f];
}
float *tgt = data + (l*S + s)*d_stride;
for(int i = 0; i < S; ++i){
tgt[i*F + f] = tmparr[i]/L;
}
}
__global__
static void _cuda_make_tran(int rows, int cols, const int* lab, float *data, int d_stride, int S){
int L = rows;
int F = cols;
// change lab[l] = s, and feats[c]
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int l = idx % L; idx /= L;
int s = idx;
if( s >= S ) return;
// TODO
// load label into share memory
// load feats
int mylab[MAX_L];
_load(mylab, lab, L, l, s);
float *tgt = data + (l*S + s)*d_stride + S*F;
float one = 1/(float)L;
for(int i = 1; i < L; ++i)
tgt[mylab[i - 1]*S + mylab[i]] += one;
}
__global__
static void _cuda_make_obs(const float* feats, int rows, int cols, int stride,
const int* lab, int l, float *data, int d_stride, int S){
int L = rows;
int F = cols;
// change lab[l] = s, and feats[c]
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int s = idx % S; idx /= S;
int f = idx;
if(f >= F) return;
// TODO
// load label into share memory
// load feats
int mylab[MAX_L];
_load(mylab, lab, L, l, s);
float tmparr[MAX_S];
for(int i = 0; i < S; ++i)
tmparr[i] = 0;
for(int i = 0; i < L; ++i){
tmparr[mylab[i]] += feats[i * stride + f];
}
float *tgt = data + s*d_stride;
for(int i = 0; i < S; ++i){
tgt[i*F + f] = tmparr[i]/L;
}
}
__global__
static void _cuda_make_tran(int rows, int cols, const int* lab, int l, float *data, int d_stride, int S){
int L = rows;
int F = cols;
// change lab[l] = s, and feats[c]
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int s = idx;
if( s >= S ) return;
// TODO
// load label into share memory
// load feats
int mylab[MAX_L];
_load(mylab, lab, L, l, s);
float *tgt = data + s*d_stride + S*F;
float one = 1/(float)L;
for(int i = 1; i < L; ++i)
tgt[mylab[i - 1]*S + mylab[i]] += one;
}
__global__
static void _cuda_find_max(const float* arr, int len, float *val, int* index){
extern __shared__ float share[];
float* sdata = (float*)share;
int* idx = (int* )&sdata[blockDim.x];
int tid = threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < len){
sdata[tid] = arr[i];
idx[tid] = i;
}else{
sdata[tid] = 0;
idx[tid] = -1;
}
__syncthreads();
//if(i >= len) return;
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
if(sdata[tid] < sdata[tid + s]){
sdata[tid] = sdata[tid + s];
idx[tid] = idx[tid + s];
}
}
__syncthreads();
}
if(tid == 0){
val[blockIdx.x] = sdata[0];
index[blockIdx.x] = idx[0];
}
}
| 6177998cee859fb5fa87f972cece70fa25b89060.cu | #include "kernel.h"
#include <assert.h>
#define MAX_L 1024
#define MAX_S 64
__device__ static void _load(int* tgt, const int* src, int L, int l, int s);
__global__ static void _cuda_make_obs(const float* feats, int rows, int cols, int stride, const int* lab, float *data, int d_stride, int S);
__global__ static void _cuda_make_tran(int rows, int cols, const int* lab, float *data, int d_stride, int S);
__global__ static void _cuda_make_obs(const float* feats, int rows, int cols, int stride, const int* lab, int l, float *data, int d_stride, int S);
__global__ static void _cuda_make_tran(int rows, int cols, const int* lab, int l, float *data, int d_stride, int S);
__global__ static void _cuda_find_max(const float* arr, int len, float *val, int* index);
void cuda_make_obs(dim3 grid, dim3 block, const float* feats, int rows, int cols, int stride, const int* lab, float *data, int d_stride, int S){
assert( rows < MAX_L);
assert( S < MAX_S);
_cuda_make_obs<<<grid, block>>>(feats, rows, cols, stride, lab, data, d_stride, S);
}
void cuda_make_tran(dim3 grid, dim3 block, int rows, int cols, const int* lab, float *data, int d_stride, int S){
assert( rows < MAX_L);
assert( S < MAX_S);
_cuda_make_tran<<<grid, block>>>(rows, cols, lab, data, d_stride, S);
}
void cuda_find_max(dim3 grid, dim3 block, int sharemem, const float* arr, int len, float *val, int* index){
_cuda_find_max<<<grid, block, sharemem>>>(arr, len, val, index);
}
void cuda_make_obs(dim3 grid, dim3 block, const float* feats, int rows, int cols, int stride, const int* lab, int l, float *data, int d_stride, int S){
assert( rows < MAX_L);
assert( S < MAX_S);
_cuda_make_obs<<<grid, block>>>(feats, rows, cols, stride, lab, l, data, d_stride, S);
}
void cuda_make_tran(dim3 grid, dim3 block, int rows, int cols, const int* lab, int l, float *data, int d_stride, int S){
assert( rows < MAX_L);
assert( S < MAX_S);
_cuda_make_tran<<<grid, block>>>(rows, cols, lab, l, data, d_stride, S);
}
__device__
static void _load(int* tgt, const int* src, int L, int l, int s){
for(int i = 0; i < L; i++)
tgt[i] = src[i];
tgt[l] = s;
}
__global__
static void _cuda_make_obs(const float* feats, int rows, int cols, int stride,
const int* lab, float *data, int d_stride, int S){
int L = rows;
int F = cols;
// change lab[l] = s, and feats[c]
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int s = idx % S; idx /= S;
int l = idx % L; idx /= L;
int f = idx;
if(f >= F) return;
// TODO
// load label into share memory
// load feats
int mylab[MAX_L];
_load(mylab, lab, L, l, s);
float tmparr[MAX_S];
for(int i = 0; i < S; ++i)
tmparr[i] = 0;
for(int i = 0; i < L; ++i){
tmparr[mylab[i]] += feats[i * stride + f];
}
float *tgt = data + (l*S + s)*d_stride;
for(int i = 0; i < S; ++i){
tgt[i*F + f] = tmparr[i]/L;
}
}
__global__
static void _cuda_make_tran(int rows, int cols, const int* lab, float *data, int d_stride, int S){
int L = rows;
int F = cols;
// change lab[l] = s, and feats[c]
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int l = idx % L; idx /= L;
int s = idx;
if( s >= S ) return;
// TODO
// load label into share memory
// load feats
int mylab[MAX_L];
_load(mylab, lab, L, l, s);
float *tgt = data + (l*S + s)*d_stride + S*F;
float one = 1/(float)L;
for(int i = 1; i < L; ++i)
tgt[mylab[i - 1]*S + mylab[i]] += one;
}
__global__
static void _cuda_make_obs(const float* feats, int rows, int cols, int stride,
const int* lab, int l, float *data, int d_stride, int S){
int L = rows;
int F = cols;
// change lab[l] = s, and feats[c]
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int s = idx % S; idx /= S;
int f = idx;
if(f >= F) return;
// TODO
// load label into share memory
// load feats
int mylab[MAX_L];
_load(mylab, lab, L, l, s);
float tmparr[MAX_S];
for(int i = 0; i < S; ++i)
tmparr[i] = 0;
for(int i = 0; i < L; ++i){
tmparr[mylab[i]] += feats[i * stride + f];
}
float *tgt = data + s*d_stride;
for(int i = 0; i < S; ++i){
tgt[i*F + f] = tmparr[i]/L;
}
}
__global__
static void _cuda_make_tran(int rows, int cols, const int* lab, int l, float *data, int d_stride, int S){
int L = rows;
int F = cols;
// change lab[l] = s, and feats[c]
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int s = idx;
if( s >= S ) return;
// TODO
// load label into share memory
// load feats
int mylab[MAX_L];
_load(mylab, lab, L, l, s);
float *tgt = data + s*d_stride + S*F;
float one = 1/(float)L;
for(int i = 1; i < L; ++i)
tgt[mylab[i - 1]*S + mylab[i]] += one;
}
__global__
static void _cuda_find_max(const float* arr, int len, float *val, int* index){
extern __shared__ float share[];
float* sdata = (float*)share;
int* idx = (int* )&sdata[blockDim.x];
int tid = threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < len){
sdata[tid] = arr[i];
idx[tid] = i;
}else{
sdata[tid] = 0;
idx[tid] = -1;
}
__syncthreads();
//if(i >= len) return;
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
if(sdata[tid] < sdata[tid + s]){
sdata[tid] = sdata[tid + s];
idx[tid] = idx[tid + s];
}
}
__syncthreads();
}
if(tid == 0){
val[blockIdx.x] = sdata[0];
index[blockIdx.x] = idx[0];
}
}
|
7b920d7382f19799b1b61f05cae26cec26bdfb08.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 256, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 16, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 7b920d7382f19799b1b61f05cae26cec26bdfb08.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 256, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 16, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
72eec33ba7c44d4eed20007dddc32905d0f948bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "utils.h"
#include <hip/hip_runtime_api.h>
#include <algorithm>
#include <vector>
#include <chrono>
#include <thread>
/// Memory-bound dummy kernel. Do not edit.
__global__ void fastKernel(const double *a, double *b, int M) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= M)
return;
b[idx] = 10.0 * a[idx];
}
/// Compute-bound dummy kernel. Do not edit.
__global__ void slowKernel(const double *a, double *b, int M) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= M)
return;
double x = a[idx];
for (int i = 0; i < 10000; ++i)
x *= 1.01;
b[idx] = (x != 0.1231 ? 10.0 : -1.0) * a[idx];
}
/// Check whether `bHost` contains the correct result. Do not edit.
void checkResults(const double *bHost, int N) {
for (int i = 0; i < N; ++i) {
if (bHost[i] != 100.0 * i) {
printf("Incorrect value for i=%d: value before kernel=%.1f "
"expected after=%.1f now=%.1f\n",
i, 10.0 * i, 100. * i, bHost[i]);
exit(1);
}
}
}
hipStream_t streams[8];
/// Asynchronously, and in chunks, copy the array to the device, execute the
/// kernel and copy the result back.
template <typename Kernel>
void runAsync(const char *kernelName, Kernel kernel, int N, int chunkSize, int numStreams) {
double *aHost;
double *bHost;
CUDA_CHECK(hipHostMalloc(&aHost, N * sizeof(double)));
CUDA_CHECK(hipHostMalloc(&bHost, N * sizeof(double)));
for (int i = 0; i < N; ++i)
aHost[i] = 10.0 * i;
// Allocate chunks and create streams.
std::vector<double *> aDev(numStreams);
std::vector<double *> bDev(numStreams);
// std::vector<hipStream_t> streams(numStreams);
for (int i = 0; i < numStreams; ++i) {
CUDA_CHECK(hipMalloc(&aDev[i], chunkSize * sizeof(double)));
CUDA_CHECK(hipMalloc(&bDev[i], chunkSize * sizeof(double)));
}
// Instead of benchmark() we use a simplified measure() which invokes the
// function only once (to get a cleaner profiling information).
double dt = measure([&]() {
// Handle chunk by chunk.
for (int chunk = 0; chunk * chunkSize < N; ++chunk) {
int offset = chunk * chunkSize;
int size = ::min(chunkSize, N - offset);
int stream = chunk % numStreams;
// Host -> device.
CUDA_CHECK(hipMemcpyAsync(aDev[stream], aHost + offset, size * sizeof(double),
hipMemcpyHostToDevice, streams[stream]));
// Kernel.
int threads = 1024;
int blocks = (size + threads - 1) / threads;
CUDA_LAUNCH_EX(kernel, blocks, threads, 0, streams[stream],
aDev[stream], bDev[stream], size);
// Device -> host.
CUDA_CHECK(hipMemcpyAsync(bHost + offset, bDev[stream], size * sizeof(double),
hipMemcpyDeviceToHost, streams[stream]));
}
// Synchronize. This MUST be within the lambda for the time measurement to work.
for (int i = 0; i < numStreams; ++i)
CUDA_CHECK(hipStreamSynchronize(streams[i]));
});
checkResults(bHost, N);
printf("async %s N=%9d chunkSize=%9d numStreams=%d time=%fs\n",
kernelName, N, chunkSize, numStreams, dt);
// Destroy streams and deallocate the chunks.
for (int i = numStreams - 1; i >= 0; --i) {
CUDA_CHECK(hipFree(bDev[i]));
CUDA_CHECK(hipFree(aDev[i]));
}
CUDA_CHECK(hipHostFree(bHost));
CUDA_CHECK(hipHostFree(aHost));
}
/// Synchronously copy the whole array to the device, execute the kernel and
/// copy the result back. Do not edit.
template <typename Kernel>
void runSync(const char *kernelName, Kernel kernel, int N) {
double *aHost;
double *bHost;
double *aDev;
double *bDev;
CUDA_CHECK(hipHostMalloc(&aHost, N * sizeof(double)));
CUDA_CHECK(hipHostMalloc(&bHost, N * sizeof(double)));
CUDA_CHECK(hipMalloc(&aDev, N * sizeof(double)));
CUDA_CHECK(hipMalloc(&bDev, N * sizeof(double)));
for (int i = 0; i < N; ++i)
aHost[i] = 10.0 * i;
// Host -> device.
double dt1 = measure([&]() {
CUDA_CHECK(hipMemcpy(aDev, aHost, N * sizeof(double), hipMemcpyHostToDevice));
});
// Kernel.
double dt2 = measure([&]() {
// We cannot execute more than maxBlocks blocks, so we split the work
// into multiple launches. That's another reason for using chunks.
int threads = 1024;
int maxBlocks = 65'536;
int blocks = (N + threads - 1) / threads;
for (int i = 0; i < blocks; i += maxBlocks) {
CUDA_LAUNCH(kernel, ::min(maxBlocks, blocks - i), threads,
aDev + i * threads, bDev + i * threads, N);
}
});
// Device -> host.
double dt3 = measure([&]() {
CUDA_CHECK(hipMemcpy(bHost, bDev, N * sizeof(double), hipMemcpyDeviceToHost));
});
checkResults(bHost, N);
printf("sync %s N=%9d upload=%fs kernel=%fs download=%fs total=%fs\n",
kernelName, N, dt1, dt2, dt3, dt1 + dt2 + dt3);
CUDA_CHECK(hipFree(bDev));
CUDA_CHECK(hipFree(aDev));
CUDA_CHECK(hipHostFree(bHost));
CUDA_CHECK(hipHostFree(aHost));
}
/// Selection of runs to use for profiling.
void profile() {
runSync("fastKernel", fastKernel, 100'000'000);
runAsync("fastKernel", fastKernel, 100'000'000, 10'000'000, 4);
runSync("slowKernel", slowKernel, 100'000'000);
runAsync("slowKernel", slowKernel, 100'000'000, 10'000'000, 4);
hipProfilerStop();
}
/// Selection of runs to use for benchmarking.
void runBenchmarks() {
runSync("fastKernel", fastKernel, 1'000'000);
runSync("fastKernel", fastKernel, 100'000'000);
runAsync("fastKernel", fastKernel, 100'000'000, 100'000'000, 1);
runAsync("fastKernel", fastKernel, 100'000'000, 10'000'000, 4);
runAsync("fastKernel", fastKernel, 100'000'000, 10'000'000, 8);
runAsync("fastKernel", fastKernel, 100'000'000, 1'000'000, 4);
runAsync("fastKernel", fastKernel, 100'000'000, 1'000'000, 8);
printf("\n");
runSync("slowKernel", slowKernel, 1'000'000);
runSync("slowKernel", slowKernel, 100'000'000);
runAsync("slowKernel", slowKernel, 100'000'000, 100'000'000, 1);
runAsync("slowKernel", slowKernel, 100'000'000, 10'000'000, 4);
runAsync("slowKernel", slowKernel, 100'000'000, 10'000'000, 8);
runAsync("slowKernel", slowKernel, 100'000'000, 1'000'000, 4);
runAsync("slowKernel", slowKernel, 100'000'000, 1'000'000, 8);
}
int main() {
// Use same streams for all runs.
for (int i = 0; i < 8; ++i)
CUDA_CHECK(hipStreamCreate(&streams[i]));
profile();
// runBenchmarks();
for (int i = 7; i >= 0; --i)
CUDA_CHECK(hipStreamDestroy(streams[i]));
}
| 72eec33ba7c44d4eed20007dddc32905d0f948bd.cu | #include "utils.h"
#include <cuda_profiler_api.h>
#include <algorithm>
#include <vector>
#include <chrono>
#include <thread>
/// Memory-bound dummy kernel. Do not edit.
__global__ void fastKernel(const double *a, double *b, int M) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= M)
return;
b[idx] = 10.0 * a[idx];
}
/// Compute-bound dummy kernel. Do not edit.
__global__ void slowKernel(const double *a, double *b, int M) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= M)
return;
double x = a[idx];
for (int i = 0; i < 10000; ++i)
x *= 1.01;
b[idx] = (x != 0.1231 ? 10.0 : -1.0) * a[idx];
}
/// Check whether `bHost` contains the correct result. Do not edit.
void checkResults(const double *bHost, int N) {
for (int i = 0; i < N; ++i) {
if (bHost[i] != 100.0 * i) {
printf("Incorrect value for i=%d: value before kernel=%.1f "
"expected after=%.1f now=%.1f\n",
i, 10.0 * i, 100. * i, bHost[i]);
exit(1);
}
}
}
cudaStream_t streams[8];
/// Asynchronously, and in chunks, copy the array to the device, execute the
/// kernel and copy the result back.
template <typename Kernel>
void runAsync(const char *kernelName, Kernel kernel, int N, int chunkSize, int numStreams) {
double *aHost;
double *bHost;
CUDA_CHECK(cudaMallocHost(&aHost, N * sizeof(double)));
CUDA_CHECK(cudaMallocHost(&bHost, N * sizeof(double)));
for (int i = 0; i < N; ++i)
aHost[i] = 10.0 * i;
// Allocate chunks and create streams.
std::vector<double *> aDev(numStreams);
std::vector<double *> bDev(numStreams);
// std::vector<cudaStream_t> streams(numStreams);
for (int i = 0; i < numStreams; ++i) {
CUDA_CHECK(cudaMalloc(&aDev[i], chunkSize * sizeof(double)));
CUDA_CHECK(cudaMalloc(&bDev[i], chunkSize * sizeof(double)));
}
// Instead of benchmark() we use a simplified measure() which invokes the
// function only once (to get a cleaner profiling information).
double dt = measure([&]() {
// Handle chunk by chunk.
for (int chunk = 0; chunk * chunkSize < N; ++chunk) {
int offset = chunk * chunkSize;
int size = std::min(chunkSize, N - offset);
int stream = chunk % numStreams;
// Host -> device.
CUDA_CHECK(cudaMemcpyAsync(aDev[stream], aHost + offset, size * sizeof(double),
cudaMemcpyHostToDevice, streams[stream]));
// Kernel.
int threads = 1024;
int blocks = (size + threads - 1) / threads;
CUDA_LAUNCH_EX(kernel, blocks, threads, 0, streams[stream],
aDev[stream], bDev[stream], size);
// Device -> host.
CUDA_CHECK(cudaMemcpyAsync(bHost + offset, bDev[stream], size * sizeof(double),
cudaMemcpyDeviceToHost, streams[stream]));
}
// Synchronize. This MUST be within the lambda for the time measurement to work.
for (int i = 0; i < numStreams; ++i)
CUDA_CHECK(cudaStreamSynchronize(streams[i]));
});
checkResults(bHost, N);
printf("async %s N=%9d chunkSize=%9d numStreams=%d time=%fs\n",
kernelName, N, chunkSize, numStreams, dt);
// Destroy streams and deallocate the chunks.
for (int i = numStreams - 1; i >= 0; --i) {
CUDA_CHECK(cudaFree(bDev[i]));
CUDA_CHECK(cudaFree(aDev[i]));
}
CUDA_CHECK(cudaFreeHost(bHost));
CUDA_CHECK(cudaFreeHost(aHost));
}
/// Synchronously copy the whole array to the device, execute the kernel and
/// copy the result back. Do not edit.
template <typename Kernel>
void runSync(const char *kernelName, Kernel kernel, int N) {
double *aHost;
double *bHost;
double *aDev;
double *bDev;
CUDA_CHECK(cudaMallocHost(&aHost, N * sizeof(double)));
CUDA_CHECK(cudaMallocHost(&bHost, N * sizeof(double)));
CUDA_CHECK(cudaMalloc(&aDev, N * sizeof(double)));
CUDA_CHECK(cudaMalloc(&bDev, N * sizeof(double)));
for (int i = 0; i < N; ++i)
aHost[i] = 10.0 * i;
// Host -> device.
double dt1 = measure([&]() {
CUDA_CHECK(cudaMemcpy(aDev, aHost, N * sizeof(double), cudaMemcpyHostToDevice));
});
// Kernel.
double dt2 = measure([&]() {
// We cannot execute more than maxBlocks blocks, so we split the work
// into multiple launches. That's another reason for using chunks.
int threads = 1024;
int maxBlocks = 65'536;
int blocks = (N + threads - 1) / threads;
for (int i = 0; i < blocks; i += maxBlocks) {
CUDA_LAUNCH(kernel, std::min(maxBlocks, blocks - i), threads,
aDev + i * threads, bDev + i * threads, N);
}
});
// Device -> host.
double dt3 = measure([&]() {
CUDA_CHECK(cudaMemcpy(bHost, bDev, N * sizeof(double), cudaMemcpyDeviceToHost));
});
checkResults(bHost, N);
printf("sync %s N=%9d upload=%fs kernel=%fs download=%fs total=%fs\n",
kernelName, N, dt1, dt2, dt3, dt1 + dt2 + dt3);
CUDA_CHECK(cudaFree(bDev));
CUDA_CHECK(cudaFree(aDev));
CUDA_CHECK(cudaFreeHost(bHost));
CUDA_CHECK(cudaFreeHost(aHost));
}
/// Selection of runs to use for profiling.
void profile() {
runSync("fastKernel", fastKernel, 100'000'000);
runAsync("fastKernel", fastKernel, 100'000'000, 10'000'000, 4);
runSync("slowKernel", slowKernel, 100'000'000);
runAsync("slowKernel", slowKernel, 100'000'000, 10'000'000, 4);
cudaProfilerStop();
}
/// Selection of runs to use for benchmarking.
void runBenchmarks() {
runSync("fastKernel", fastKernel, 1'000'000);
runSync("fastKernel", fastKernel, 100'000'000);
runAsync("fastKernel", fastKernel, 100'000'000, 100'000'000, 1);
runAsync("fastKernel", fastKernel, 100'000'000, 10'000'000, 4);
runAsync("fastKernel", fastKernel, 100'000'000, 10'000'000, 8);
runAsync("fastKernel", fastKernel, 100'000'000, 1'000'000, 4);
runAsync("fastKernel", fastKernel, 100'000'000, 1'000'000, 8);
printf("\n");
runSync("slowKernel", slowKernel, 1'000'000);
runSync("slowKernel", slowKernel, 100'000'000);
runAsync("slowKernel", slowKernel, 100'000'000, 100'000'000, 1);
runAsync("slowKernel", slowKernel, 100'000'000, 10'000'000, 4);
runAsync("slowKernel", slowKernel, 100'000'000, 10'000'000, 8);
runAsync("slowKernel", slowKernel, 100'000'000, 1'000'000, 4);
runAsync("slowKernel", slowKernel, 100'000'000, 1'000'000, 8);
}
int main() {
// Use same streams for all runs.
for (int i = 0; i < 8; ++i)
CUDA_CHECK(cudaStreamCreate(&streams[i]));
profile();
// runBenchmarks();
for (int i = 7; i >= 0; --i)
CUDA_CHECK(cudaStreamDestroy(streams[i]));
}
|
3989f6c32b1ee31baa03fe0b6bd1ff147f0e376c.hip | // !!! This is a file automatically generated by hipify!!!
#include <THH/THHTensor.hpp>
#include <THH/THHStorage.hpp>
#include <THH/generic/THHTensor.hip>
#include <THH/THHGenerateAllTypes.h>
#include <THH/generic/THHTensor.hip>
#include <THH/THHGenerateBoolType.h>
#include <THH/generic/THHTensor.hip>
#include <THH/THHGenerateBFloat16Type.h>
| 3989f6c32b1ee31baa03fe0b6bd1ff147f0e376c.cu | #include <THC/THCTensor.hpp>
#include <THC/THCStorage.hpp>
#include <THC/generic/THCTensor.cu>
#include <THC/THCGenerateAllTypes.h>
#include <THC/generic/THCTensor.cu>
#include <THC/THCGenerateBoolType.h>
#include <THC/generic/THCTensor.cu>
#include <THC/THCGenerateBFloat16Type.h>
|
36d50c1ff7c363a99e65f803b622d5a1ea7701a5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <math.h>
#define BLOCK_SIZE 16
#define PI 3.141592653589793
// GPU subroutines
__global__ void GPU_find_potential(int Nr, int Nz, float dr2, float dz2, float *Phi_0, float *Phi_1, float *ChargeSource);
//////////////////////////////////////////////
int main(void)
{
// atmosphere properties
int Nr, Nz; // grid size [0,...,Nr][0,...,Nz]
float Rmax, Zmax; // grid boundaries (in km)
float dr, dz; // grid cell size
float dr2, dz2; // grid cell size squared
float *Potential_0, *Potential_1; // electric potentital
float *charge_source; // external charge
float zcharge,a,Q; // external charge parameters
// GPU variables
float *GPU_Potential_0, *GPU_Potential_1; // electric potential
float *GPU_charge_source; // external charge
dim3 dimBlock, dimGrid;
int i,k,ik; // counters over the grid
int n, Nn; // iteration
float r,z; // coordinates
// output variables
FILE *DATAOUT;
char filename[255];
// Set GPU
hipSetDevice(0);
// set atmosphere properties
Rmax=300.0; // km
Zmax=100.0; // km
Nr = 300; Nz = 100; // this gives 1km x 1km grid
dr = Rmax/Nr; dr2 = dr*dr;
dz = Zmax/Nz; dz2 = dz*dz;
zcharge = 10.0;
a = 3.0;
Q = 200.0; // total charge
Q = Q/pow(a*sqrt(PI),3.0)/8.854E-12;
Nn=20001; // iterations
// set varaiables
Potential_0 = (float *)malloc(sizeof(float)*(Nr+1)*(Nz+1));
Potential_1 = (float *)malloc(sizeof(float)*(Nr+1)*(Nz+1));
charge_source = (float *)malloc(sizeof(float)*(Nr+1)*(Nz+1));
// allocate GPU variables
hipMalloc((void **) &GPU_Potential_0, sizeof(float)*(Nr+1)*(Nz+1));
hipMalloc((void **) &GPU_Potential_1, sizeof(float)*(Nr+1)*(Nz+1));
hipMalloc((void **) &GPU_charge_source, sizeof(float)*(Nr+1)*(Nz+1));
// set the external charge
for (i=0;i<=Nr;i++) for (k=0;k<=Nz;k++) {
ik = (Nr+1)*k+i;
r = i*dr; z = k*dz;
Potential_0[ik]=0.0;
Potential_1[ik]=0.0;
charge_source[ik]=Q*exp(-((z-zcharge)*(z-zcharge)+r*r)/(a*a));
}
// transfer data to GPU
hipMemcpy(GPU_Potential_0, Potential_0, sizeof(float)*(Nr+1)*(Nz+1), hipMemcpyHostToDevice);
hipMemcpy(GPU_Potential_1, Potential_1, sizeof(float)*(Nr+1)*(Nz+1), hipMemcpyHostToDevice);
hipMemcpy(GPU_charge_source, charge_source, sizeof(float)*(Nr+1)*(Nz+1), hipMemcpyHostToDevice);
// run the electric field calculation
dimBlock.x=BLOCK_SIZE;
dimBlock.y=BLOCK_SIZE;
dimGrid.x=(int)(Nr/BLOCK_SIZE)+1;
dimGrid.y=(int)(Nz/BLOCK_SIZE)+1;
for (n=1;n<=Nn;n++) {
if (n%2==1)
hipLaunchKernelGGL(( GPU_find_potential), dim3(dimGrid),dim3(dimBlock), 0, 0, Nr,Nz,dr2,dz2,GPU_Potential_0,GPU_Potential_1,GPU_charge_source);
else
hipLaunchKernelGGL(( GPU_find_potential), dim3(dimGrid),dim3(dimBlock), 0, 0, Nr,Nz,dr2,dz2,GPU_Potential_1,GPU_Potential_0,GPU_charge_source);
hipDeviceSynchronize();
}
// retrieve data from GPU
hipMemcpy(Potential_1, GPU_Potential_1, sizeof(float)*(Nr+1)*(Nz+1), hipMemcpyDeviceToHost);
// data output
sprintf (filename,"GPU_POTENTIAL.dat");
DATAOUT = fopen(filename,"w");
fprintf(DATAOUT,"%d %d\n",Nr,Nz);
for (i=0;i<=Nr;i++) {
for (k=0;k<=Nz;k++) {
ik = (Nr+1)*k+i;
fprintf(DATAOUT,"%E ",Potential_1[ik]);
}
fprintf(DATAOUT,"\n");
}
fclose(DATAOUT);
// free the memory
free(Potential_0);
free(Potential_1);
free(charge_source);
hipFree(GPU_Potential_0);
hipFree(GPU_Potential_1);
hipFree(GPU_charge_source);
}
/////////////////////////////////////////////////////
/////////////////////////////////////////////////////
__global__ void GPU_find_potential(int Nr, int Nz, float dr2, float dz2, float *Phi_0, float *Phi_1, float *ChargeSource){
// localy shared data for this block
__shared__ float PhiBlock[BLOCK_SIZE+2][BLOCK_SIZE+2];
// updated potential
float Phi;
// starting index for this block
int i0 = BLOCK_SIZE * blockIdx.x;
int k0 = BLOCK_SIZE * blockIdx.y;
// index of this thread in the r-z grid
int it = threadIdx.x;
int kt = threadIdx.y;
int i = i0 + it;
int k = k0 + kt;
int ik = (Nr+1)*k + i;
// check if the point is within the r-z grid
if (i<=Nr && k<=Nz) {
// get the data from global into the shared memory
PhiBlock[it+1][kt+1]=Phi_0[ik];
if (i==0) PhiBlock[it][kt+1]=0.0;
else if (it==0) PhiBlock[it][kt+1]=Phi_0[ik-1];
if (i==Nr) PhiBlock[it+2][kt+1]=0.0;
else if (it==BLOCK_SIZE-1) PhiBlock[it+2][kt+1]=Phi_0[ik+1];
if (k==0) PhiBlock[it+1][kt]=0.0;
else if (kt==0) PhiBlock[it+1][kt]=Phi_0[ik-(Nr+1)];
if (k==Nz) PhiBlock[it+1][kt+2]=0.0;
else if (kt==BLOCK_SIZE-1) PhiBlock[it+1][kt+2]=Phi_0[ik+(Nr+1)];
}
// synchornize threads to make sure that all data is loaded
// (We exit "if" because this has to be done for all threads within the block)
__syncthreads();
if (i<=Nr && k<=Nz) {
// calculate the updated potential
if (k==0 || k==Nz) Phi = 0.0; // forced by boundary condition
else if (i>0) {
Phi=PhiBlock[it][kt+1]*dz2*(1.0-0.5/i);
Phi += PhiBlock[it+1][kt]*dr2;
Phi += PhiBlock[it+2][kt+1]*dz2*(1.0+0.5/i);
Phi += PhiBlock[it+1][kt+2]*dr2;
Phi += ChargeSource[ik]*dr2*dz2;
Phi /= (2*(dr2+dz2));
}
else {
Phi = PhiBlock[it+1][kt]*dr2;
Phi += PhiBlock[it+2][kt+1]*dz2*4;
Phi += PhiBlock[it+1][kt+2]*dr2;
Phi += ChargeSource[ik]*dr2*dz2;
Phi /= (2*(dr2+2*dz2));
}
// store the result
Phi_1[ik]=Phi;
} // end of: if (i<=Nr && k<=Nz)
} | 36d50c1ff7c363a99e65f803b622d5a1ea7701a5.cu | #include <stdio.h>
#include <cuda.h>
#include <math.h>
#define BLOCK_SIZE 16
#define PI 3.141592653589793
// GPU subroutines
__global__ void GPU_find_potential(int Nr, int Nz, float dr2, float dz2, float *Phi_0, float *Phi_1, float *ChargeSource);
//////////////////////////////////////////////
int main(void)
{
// atmosphere properties
int Nr, Nz; // grid size [0,...,Nr][0,...,Nz]
float Rmax, Zmax; // grid boundaries (in km)
float dr, dz; // grid cell size
float dr2, dz2; // grid cell size squared
float *Potential_0, *Potential_1; // electric potentital
float *charge_source; // external charge
float zcharge,a,Q; // external charge parameters
// GPU variables
float *GPU_Potential_0, *GPU_Potential_1; // electric potential
float *GPU_charge_source; // external charge
dim3 dimBlock, dimGrid;
int i,k,ik; // counters over the grid
int n, Nn; // iteration
float r,z; // coordinates
// output variables
FILE *DATAOUT;
char filename[255];
// Set GPU
cudaSetDevice(0);
// set atmosphere properties
Rmax=300.0; // km
Zmax=100.0; // km
Nr = 300; Nz = 100; // this gives 1km x 1km grid
dr = Rmax/Nr; dr2 = dr*dr;
dz = Zmax/Nz; dz2 = dz*dz;
zcharge = 10.0;
a = 3.0;
Q = 200.0; // total charge
Q = Q/pow(a*sqrt(PI),3.0)/8.854E-12;
Nn=20001; // iterations
// set varaiables
Potential_0 = (float *)malloc(sizeof(float)*(Nr+1)*(Nz+1));
Potential_1 = (float *)malloc(sizeof(float)*(Nr+1)*(Nz+1));
charge_source = (float *)malloc(sizeof(float)*(Nr+1)*(Nz+1));
// allocate GPU variables
cudaMalloc((void **) &GPU_Potential_0, sizeof(float)*(Nr+1)*(Nz+1));
cudaMalloc((void **) &GPU_Potential_1, sizeof(float)*(Nr+1)*(Nz+1));
cudaMalloc((void **) &GPU_charge_source, sizeof(float)*(Nr+1)*(Nz+1));
// set the external charge
for (i=0;i<=Nr;i++) for (k=0;k<=Nz;k++) {
ik = (Nr+1)*k+i;
r = i*dr; z = k*dz;
Potential_0[ik]=0.0;
Potential_1[ik]=0.0;
charge_source[ik]=Q*exp(-((z-zcharge)*(z-zcharge)+r*r)/(a*a));
}
// transfer data to GPU
cudaMemcpy(GPU_Potential_0, Potential_0, sizeof(float)*(Nr+1)*(Nz+1), cudaMemcpyHostToDevice);
cudaMemcpy(GPU_Potential_1, Potential_1, sizeof(float)*(Nr+1)*(Nz+1), cudaMemcpyHostToDevice);
cudaMemcpy(GPU_charge_source, charge_source, sizeof(float)*(Nr+1)*(Nz+1), cudaMemcpyHostToDevice);
// run the electric field calculation
dimBlock.x=BLOCK_SIZE;
dimBlock.y=BLOCK_SIZE;
dimGrid.x=(int)(Nr/BLOCK_SIZE)+1;
dimGrid.y=(int)(Nz/BLOCK_SIZE)+1;
for (n=1;n<=Nn;n++) {
if (n%2==1)
GPU_find_potential<<<dimGrid,dimBlock>>>(Nr,Nz,dr2,dz2,GPU_Potential_0,GPU_Potential_1,GPU_charge_source);
else
GPU_find_potential<<<dimGrid,dimBlock>>>(Nr,Nz,dr2,dz2,GPU_Potential_1,GPU_Potential_0,GPU_charge_source);
cudaThreadSynchronize();
}
// retrieve data from GPU
cudaMemcpy(Potential_1, GPU_Potential_1, sizeof(float)*(Nr+1)*(Nz+1), cudaMemcpyDeviceToHost);
// data output
sprintf (filename,"GPU_POTENTIAL.dat");
DATAOUT = fopen(filename,"w");
fprintf(DATAOUT,"%d %d\n",Nr,Nz);
for (i=0;i<=Nr;i++) {
for (k=0;k<=Nz;k++) {
ik = (Nr+1)*k+i;
fprintf(DATAOUT,"%E ",Potential_1[ik]);
}
fprintf(DATAOUT,"\n");
}
fclose(DATAOUT);
// free the memory
free(Potential_0);
free(Potential_1);
free(charge_source);
cudaFree(GPU_Potential_0);
cudaFree(GPU_Potential_1);
cudaFree(GPU_charge_source);
}
/////////////////////////////////////////////////////
/////////////////////////////////////////////////////
__global__ void GPU_find_potential(int Nr, int Nz, float dr2, float dz2, float *Phi_0, float *Phi_1, float *ChargeSource){
// localy shared data for this block
__shared__ float PhiBlock[BLOCK_SIZE+2][BLOCK_SIZE+2];
// updated potential
float Phi;
// starting index for this block
int i0 = BLOCK_SIZE * blockIdx.x;
int k0 = BLOCK_SIZE * blockIdx.y;
// index of this thread in the r-z grid
int it = threadIdx.x;
int kt = threadIdx.y;
int i = i0 + it;
int k = k0 + kt;
int ik = (Nr+1)*k + i;
// check if the point is within the r-z grid
if (i<=Nr && k<=Nz) {
// get the data from global into the shared memory
PhiBlock[it+1][kt+1]=Phi_0[ik];
if (i==0) PhiBlock[it][kt+1]=0.0;
else if (it==0) PhiBlock[it][kt+1]=Phi_0[ik-1];
if (i==Nr) PhiBlock[it+2][kt+1]=0.0;
else if (it==BLOCK_SIZE-1) PhiBlock[it+2][kt+1]=Phi_0[ik+1];
if (k==0) PhiBlock[it+1][kt]=0.0;
else if (kt==0) PhiBlock[it+1][kt]=Phi_0[ik-(Nr+1)];
if (k==Nz) PhiBlock[it+1][kt+2]=0.0;
else if (kt==BLOCK_SIZE-1) PhiBlock[it+1][kt+2]=Phi_0[ik+(Nr+1)];
}
// synchornize threads to make sure that all data is loaded
// (We exit "if" because this has to be done for all threads within the block)
__syncthreads();
if (i<=Nr && k<=Nz) {
// calculate the updated potential
if (k==0 || k==Nz) Phi = 0.0; // forced by boundary condition
else if (i>0) {
Phi=PhiBlock[it][kt+1]*dz2*(1.0-0.5/i);
Phi += PhiBlock[it+1][kt]*dr2;
Phi += PhiBlock[it+2][kt+1]*dz2*(1.0+0.5/i);
Phi += PhiBlock[it+1][kt+2]*dr2;
Phi += ChargeSource[ik]*dr2*dz2;
Phi /= (2*(dr2+dz2));
}
else {
Phi = PhiBlock[it+1][kt]*dr2;
Phi += PhiBlock[it+2][kt+1]*dz2*4;
Phi += PhiBlock[it+1][kt+2]*dr2;
Phi += ChargeSource[ik]*dr2*dz2;
Phi /= (2*(dr2+2*dz2));
}
// store the result
Phi_1[ik]=Phi;
} // end of: if (i<=Nr && k<=Nz)
} |
0edb3fd717aeead26220f1abfded94b12dd48b2c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <assert.h>
#define N 10
__global__ void transpose(int transpose[][N], int matrix[][N], int matrixSize){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x >= matrixSize || y>= matrixSize)
return;
printf("%d \n", matrix[x][y]);
//int from = x + y * matrixSize;
//int to = y + x * matrixSize;
transpose[y][x] = matrix[x][y];
}
int main(){
int A[N][N] = {{1,1,1,1,1,1,1,1,1,1,},{2,2,2,2,2,2,2,2,2,2},{3,3,3,3,3,3,3,3,3,3},{1,1,1,1,1,1,1,1,1,1,},{2,2,2,2,2,2,2,2,2,2},{3,3,3,3,3,3,3,3,3,3},{1,1,1,1,1,1,1,1,1,1,},{2,2,2,2,2,2,2,2,2,2},{3,3,3,3,3,3,3,3,3,3},{1,1,1,1,1,1,1,1,1,1}};
int T[N][N] = {{0,0,0,0,0,0,0,0,0,0},{0,0,0,0,0,0,0,0,0,0},{0,0,0,0,0,0,0,0,0,0},{0,0,0,0,0,0,0,0,0,0},{0,0,0,0,0,0,0,0,0,0},{0,0,0,0,0,0,0,0,0,0},{0,0,0,0,0,0,0,0,0,0},{0,0,0,0,0,0,0,0,0,0},{0,0,0,0,0,0,0,0,0,0},{0,0,0,0,0,0,0,0,0,0}};
int (*pA)[N], (*pT)[N];
hipMalloc((void**)&pA, (N*N)*sizeof(int));
hipMalloc((void**)&pT, (N*N)*sizeof(int));
hipMemcpy(pA, A, (N*N)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(pT, T, (N*N)*sizeof(int), hipMemcpyHostToDevice);
int numBlocks = 1;
dim3 threadsPerBlock(N,N);
hipLaunchKernelGGL(( transpose_per_element), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, pT,pA,N*N);
hipMemcpy(T, pT, (N*N)*sizeof(int), hipMemcpyDeviceToHost);
int i, j; printf("A = \n");
for(i=0;i<N;i++){
for(j=0;j<N;j++){
printf("%d ", A[i][j]);
}
printf("\n");
}
printf("T = \n");
for(i=0;i<N;i++){
for(j=0;j<N;j++){
printf("%d ", T[i][j]);
}
printf("\n");
}
hipFree(pA);
hipFree(pT);
printf("\n");
return 0;
} | 0edb3fd717aeead26220f1abfded94b12dd48b2c.cu | #include <stdio.h>
#include <assert.h>
#define N 10
__global__ void transpose(int transpose[][N], int matrix[][N], int matrixSize){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x >= matrixSize || y>= matrixSize)
return;
printf("%d \n", matrix[x][y]);
//int from = x + y * matrixSize;
//int to = y + x * matrixSize;
transpose[y][x] = matrix[x][y];
}
int main(){
int A[N][N] = {{1,1,1,1,1,1,1,1,1,1,},{2,2,2,2,2,2,2,2,2,2},{3,3,3,3,3,3,3,3,3,3},{1,1,1,1,1,1,1,1,1,1,},{2,2,2,2,2,2,2,2,2,2},{3,3,3,3,3,3,3,3,3,3},{1,1,1,1,1,1,1,1,1,1,},{2,2,2,2,2,2,2,2,2,2},{3,3,3,3,3,3,3,3,3,3},{1,1,1,1,1,1,1,1,1,1}};
int T[N][N] = {{0,0,0,0,0,0,0,0,0,0},{0,0,0,0,0,0,0,0,0,0},{0,0,0,0,0,0,0,0,0,0},{0,0,0,0,0,0,0,0,0,0},{0,0,0,0,0,0,0,0,0,0},{0,0,0,0,0,0,0,0,0,0},{0,0,0,0,0,0,0,0,0,0},{0,0,0,0,0,0,0,0,0,0},{0,0,0,0,0,0,0,0,0,0},{0,0,0,0,0,0,0,0,0,0}};
int (*pA)[N], (*pT)[N];
cudaMalloc((void**)&pA, (N*N)*sizeof(int));
cudaMalloc((void**)&pT, (N*N)*sizeof(int));
cudaMemcpy(pA, A, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(pT, T, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
int numBlocks = 1;
dim3 threadsPerBlock(N,N);
transpose_per_element<<<numBlocks,threadsPerBlock>>>(pT,pA,N*N);
cudaMemcpy(T, pT, (N*N)*sizeof(int), cudaMemcpyDeviceToHost);
int i, j; printf("A = \n");
for(i=0;i<N;i++){
for(j=0;j<N;j++){
printf("%d ", A[i][j]);
}
printf("\n");
}
printf("T = \n");
for(i=0;i<N;i++){
for(j=0;j<N;j++){
printf("%d ", T[i][j]);
}
printf("\n");
}
cudaFree(pA);
cudaFree(pT);
printf("\n");
return 0;
} |
33fc54a8ea7680a06dd7f6bdde9979cfb2db0e41.hip | // !!! This is a file automatically generated by hipify!!!
#include <matazure/tensor>
#include <image_utility.hpp>
using namespace matazure;
#ifdef USE_ROCM
#ifndef MATAZURE_CUDA
#error "does not support cuda"
#endif
#endif
#ifdef _OPENMP
#define USE_OPENMP
#endif
typedef point<byte, 3> rgb;
int main(int argc, char *argv[]) {
auto output_mandelbrot_path = argc > 1 ? argv[1] : "mandelbrot.png";
// max_iteration
int_t max_iteration = 256 * 16;
//
auto color_fun = [max_iteration] __matazure__ (int_t i) -> rgb {
//ti
float t = float(i) / max_iteration;
auto r = static_cast<byte>(36*(1-t)*t*t*t*255);
auto g = static_cast<byte>(60*(1-t)*(1-t)*t*t*255);
auto b = static_cast<byte>(38*(1-t)*(1-t)*(1-t)*t*255);
return rgb{ r, g, b };
};
//
pointi<2> shape = { 2048, 2048 };
//
auto mandelbrot_fun = [=] __matazure__ (pointi<2> idx)->rgb {
pointf<2> c = point_cast<float>(idx) / point_cast<float>(shape) * pointf<2>{3.25f, 2.5f} -pointf<2>{2.0f, 1.25f};
auto z = pointf<2>::all(0.0f);
auto norm = 0.0f;
int_t i = 0;
while (norm <= 4.0f && i < max_iteration) {
float tmp = z[0] * z[0] - z[1] * z[1] + c[0];
z[1] = 2 * z[0] * z[1] + c[1];
z[0] = tmp;
++i;
norm = z[0] * z[0] + z[1] * z[1];
}
//rgb
return color_fun(i);
};
#ifdef USE_ROCM
//shapemandelbrot_funlambda tensor
auto cts_mandelbrot_rgb = make_lambda(shape, mandelbrot_fun, device_tag{}).persist();
auto ts_mandelbrot_rgb = mem_clone(cts_mandelbrot_rgb, host_tag{});
#else
#ifdef USE_OPENMP
//omp_policy
auto ts_mandelbrot_rgb = make_lambda(shape, mandelbrot_fun, host_tag{}).persist(omp_policy{});
#else
auto ts_mandelbrot_rgb = make_lambda(shape, mandelbrot_fun, host_tag{}).persist();
#endif
#endif
write_rgb_png(output_mandelbrot_path, ts_mandelbrot_rgb);
return 0;
}
| 33fc54a8ea7680a06dd7f6bdde9979cfb2db0e41.cu | #include <matazure/tensor>
#include <image_utility.hpp>
using namespace matazure;
#ifdef USE_CUDA
#ifndef MATAZURE_CUDA
#error "does not support cuda"
#endif
#endif
#ifdef _OPENMP
#define USE_OPENMP
#endif
typedef point<byte, 3> rgb;
int main(int argc, char *argv[]) {
auto output_mandelbrot_path = argc > 1 ? argv[1] : "mandelbrot.png";
//设置最大迭代次数, 超过max_iteration认为不收敛
int_t max_iteration = 256 * 16;
//颜色映射函数
auto color_fun = [max_iteration] __matazure__ (int_t i) -> rgb {
//t为i的归一化结果
float t = float(i) / max_iteration;
auto r = static_cast<byte>(36*(1-t)*t*t*t*255);
auto g = static_cast<byte>(60*(1-t)*(1-t)*t*t*255);
auto b = static_cast<byte>(38*(1-t)*(1-t)*(1-t)*t*255);
return rgb{ r, g, b };
};
//区域大小
pointi<2> shape = { 2048, 2048 };
//曼德勃罗算子
auto mandelbrot_fun = [=] __matazure__ (pointi<2> idx)->rgb {
pointf<2> c = point_cast<float>(idx) / point_cast<float>(shape) * pointf<2>{3.25f, 2.5f} -pointf<2>{2.0f, 1.25f};
auto z = pointf<2>::all(0.0f);
auto norm = 0.0f;
int_t i = 0;
while (norm <= 4.0f && i < max_iteration) {
float tmp = z[0] * z[0] - z[1] * z[1] + c[0];
z[1] = 2 * z[0] * z[1] + c[1];
z[0] = tmp;
++i;
norm = z[0] * z[0] + z[1] * z[1];
}
//直接返回rgb的像素值
return color_fun(i);
};
#ifdef USE_CUDA
//通过shape和mandelbrot_fun构造lambda tensor
auto cts_mandelbrot_rgb = make_lambda(shape, mandelbrot_fun, device_tag{}).persist();
auto ts_mandelbrot_rgb = mem_clone(cts_mandelbrot_rgb, host_tag{});
#else
#ifdef USE_OPENMP
//使用omp_policy并行策略
auto ts_mandelbrot_rgb = make_lambda(shape, mandelbrot_fun, host_tag{}).persist(omp_policy{});
#else
auto ts_mandelbrot_rgb = make_lambda(shape, mandelbrot_fun, host_tag{}).persist();
#endif
#endif
write_rgb_png(output_mandelbrot_path, ts_mandelbrot_rgb);
return 0;
}
|
7086d2497665c2b9e577a5f836562815fa54dd21.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__ void rgba_to_greyscale(const uchar4* const rgbimg,unsigned char* const greyscaleimg,int nr, int nc)
{
int xindex = threadIdx.x;
int yindex = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int xdimg = gridDim.x;
int ydimg = gridDim.y;
int xdimb = blockDim.x;
int ydimb = blockDim.y;
int posx = xdimb * bx + xindex;
int posy = ydimb * by + yindex;
int off1d = posy * (xdimb * xdimg) + posx;
uchar4 rgba = rgbimg[off1d];
float csum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
greyscaleimg[off1d] = csum;
}
void convert_to_gs(const uchar4 * const h_rgbaImage, uchar4 * const rbgimg_d,
unsigned char* const greyscale_d, size_t nr, size_t nc)
{
const dim3 bs(nr/16+1, nc/16+1, 1);
const dim3 gs( 16, 16, 1);
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gs), dim3(bs), 0, 0, rbgimg_d, greyscale_d, nr, nc);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
} | 7086d2497665c2b9e577a5f836562815fa54dd21.cu | #include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__ void rgba_to_greyscale(const uchar4* const rgbimg,unsigned char* const greyscaleimg,int nr, int nc)
{
int xindex = threadIdx.x;
int yindex = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int xdimg = gridDim.x;
int ydimg = gridDim.y;
int xdimb = blockDim.x;
int ydimb = blockDim.y;
int posx = xdimb * bx + xindex;
int posy = ydimb * by + yindex;
int off1d = posy * (xdimb * xdimg) + posx;
uchar4 rgba = rgbimg[off1d];
float csum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
greyscaleimg[off1d] = csum;
}
void convert_to_gs(const uchar4 * const h_rgbaImage, uchar4 * const rbgimg_d,
unsigned char* const greyscale_d, size_t nr, size_t nc)
{
const dim3 bs(nr/16+1, nc/16+1, 1);
const dim3 gs( 16, 16, 1);
rgba_to_greyscale<<<gs, bs>>>(rbgimg_d, greyscale_d, nr, nc);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
} |
f6d3ffdbd665f3036c49db1df771c1f1e9dde5c9.hip | // !!! This is a file automatically generated by hipify!!!
#include <cmath>
#include <cstdlib>
#include <iostream>
#include <string>
#include <vector>
#include "solver.h"
using namespace std;
typedef unsigned char uchar;
int num_train = 512, num_test = 500;
int reverseInt(int n) {
int bytes = 4;
unsigned char ch[bytes];
for (int i = 0; i < bytes; i++) {
ch[i] = (n >> i * 8) & 255;
}
int p = 0;
for (int i = 0; i < bytes; i++) {
p += (int)ch[i] << (bytes - i - 1) * 8;
}
return p;
}
void readMNIST(vector<vector<uchar> > &train_images,
vector<vector<uchar> > &test_images, vector<uchar> &train_labels,
vector<uchar> &test_labels) {
string filename_train_images = "data/train-images.idx3-ubyte";
string filename_train_labels = "data/train-labels.idx1-ubyte";
string filename_test_images = "data/t10k-images.idx3-ubyte";
string filename_test_labels = "data/t10k-labels.idx1-ubyte";
// read train/test images
for (int i = 0; i < 2; i++) {
string filename;
if (i == 0)
filename = filename_train_images;
else
filename = filename_test_images;
ifstream f(filename.c_str(), ios::binary);
if (!f.is_open()) printf("Cannot read MNIST from %s\n", filename.c_str());
// read metadata
int magic_number = 0, n_images = 0, n_rows = 0, n_cols = 0;
f.read((char *)&magic_number, sizeof(magic_number));
magic_number = reverseInt(magic_number);
f.read((char *)&n_images, sizeof(n_images));
n_images = reverseInt(n_images);
f.read((char *)&n_rows, sizeof(n_rows));
n_rows = reverseInt(n_rows);
f.read((char *)&n_cols, sizeof(n_cols));
n_cols = reverseInt(n_cols);
for (int k = 0; k < n_images; k++) {
vector<uchar> temp;
temp.reserve(n_rows * n_cols);
for (int j = 0; j < n_rows * n_cols; j++) {
uchar t = 0;
f.read((char *)&t, sizeof(t));
temp.push_back(t);
}
if (i == 0)
train_images.push_back(temp);
else
test_images.push_back(temp);
}
f.close();
}
// read train/test labels
for (int i = 0; i < 2; i++) {
string filename;
if (i == 0)
filename = filename_train_labels;
else
filename = filename_test_labels;
ifstream f(filename.c_str(), ios::binary);
if (!f.is_open()) printf("Cannot read MNIST from %s\n", filename.c_str());
// read metadata
int magic_number = 0, n_labels = 0;
f.read((char *)&magic_number, sizeof(magic_number));
magic_number = reverseInt(magic_number);
f.read((char *)&n_labels, sizeof(n_labels));
n_labels = reverseInt(n_labels);
for (int k = 0; k < n_labels; k++) {
uchar t = 0;
f.read((char *)&t, sizeof(t));
if (i == 0)
train_labels.push_back(t);
else
test_labels.push_back(t);
}
f.close();
}
}
void printTimes(vector<float> &time, string filename);
void printvDNNLag(vector<vector<float> > &fwd_vdnn_lag,
vector<vector<float> > &bwd_vdnn_lag, string filename);
int main(int argc, char *argv[]) {
// int num_train = 100 * batch_size, num_val = batch_size;
// void *X_train = malloc(num_train * input_channels * sizeof(float));
// int *y_train = (int *)malloc(num_train * sizeof(int));
// void *X_val = malloc(num_val * input_channels * sizeof(float));
// int *y_val = (int *)malloc(num_val * sizeof(int));
// for (int i = 0; i < num_train; i++) {
// for (int j = 0; j < input_channels; j++)
// ((float *)X_train)[i * input_channels + j] = (rand() % 1000) * 1.0 /
// 1000;
// y_train[i] = 0;
// }
// for (int i = 0; i < num_val; i++) {
// for (int j = 0; j < input_channels; j++)
// ((float *)X_val)[i * input_channels + j] = (rand() % 1000) * 1.0 /
// 1000;
// y_val[i] = rand() % 2;
// }
// int rows = 28, cols = 28, channels = 1;
// vector<vector<uchar> > train_images, test_images;
// vector<uchar> train_labels, test_labels;
// readMNIST(train_images, test_images, train_labels, test_labels);
// float *f_train_images, *f_train_labels, *f_test_images, *f_test_labels;
float *f_train_images, *f_test_images;
int *f_train_labels, *f_test_labels;
int rows = 227, cols = 227, channels = 3;
int input_size = rows * cols * channels;
// f_train_images = (float *)malloc(num_train * input_size * sizeof(float));
// f_train_labels = (int *)malloc(num_train * sizeof(int));
checkCudaErrors(
hipHostMalloc(&f_train_images, num_train * input_size * sizeof(float)));
checkCudaErrors(hipHostMalloc(&f_train_labels, num_train * sizeof(int)));
f_test_images = (float *)malloc(num_test * input_size * sizeof(float));
f_test_labels = (int *)malloc(num_test * sizeof(int));
float *mean_image;
mean_image = (float *)malloc(input_size * sizeof(float));
for (int i = 0; i < input_size; i++) {
mean_image[i] = 0;
for (int k = 0; k < num_train; k++) {
mean_image[i] += f_train_images[k * input_size + i];
}
mean_image[i] /= num_train;
}
for (int i = 0; i < num_train; i++) {
for (int j = 0; j < input_size; j++) {
f_train_images[i * input_size + j] -= mean_image[j];
}
}
for (int i = 0; i < num_test; i++) {
for (int j = 0; j < input_size; j++) {
f_test_images[i * input_size + j] -= mean_image[j];
}
}
// int input_channels = rows * cols * channels * 3, hidden_channels1 = 50,
// hidden_channels2 = 100, output_channels = 10;
// vector<LayerSpecifier> layer_specifier;
// ConvDescriptor layer0;
// LayerSpecifier temp;
// layer0.initializeValues(1, 3, 3, 3, rows, cols, 1, 1, 1, 1);
// temp.initPointer(CONV);
// *((ConvDescriptor *)temp.params) = layer0;
// layer_specifier.push_back(temp);
// ActivationDescriptor layer0_actv;
// layer0_actv.initializeValues(RELU, 3, rows, cols);
// temp.initPointer(ACTV);
// *((ActivationDescriptor *)temp.params) = layer0_actv;
// layer_specifier.push_back(temp);
// BatchNormDescriptor layer0_bn;
// for (int i = 0; i < 200; i++) {
// layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows, cols);
// temp.initPointer(BATCHNORM);
// *((BatchNormDescriptor *)temp.params) = layer0_bn;
// layer_specifier.push_back(temp);
// layer0.initializeValues(3, 3, 3, 3, rows, cols, 1, 1, 1, 1);
// temp.initPointer(CONV);
// *((ConvDescriptor *)temp.params) = layer0;
// layer_specifier.push_back(temp);
// layer0_actv.initializeValues(RELU, 3, rows, cols);
// temp.initPointer(ACTV);
// *((ActivationDescriptor *)temp.params) = layer0_actv;
// layer_specifier.push_back(temp);
// }
// PoolingDescriptor layer0_pool;
// layer0_pool.initializeValues(3, 2, 2, rows, cols, 0, 0, 2, 2, POOLING_MAX);
// temp.initPointer(POOLING);
// *((PoolingDescriptor *)temp.params) = layer0_pool;
// layer_specifier.push_back(temp);
// layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows / 2, cols
// / 2);
// temp.initPointer(BATCHNORM);
// *((BatchNormDescriptor *)temp.params) = layer0_bn;
// layer_specifier.push_back(temp);
// // DropoutDescriptor layer0_dropout;
// // layer0_dropout.initializeValues(0.2, 3, rows / 2, cols / 2);
// // temp.initPointer(DROPOUT);
// // *((DropoutDescriptor *)temp.params) = layer0_dropout;
// // layer_specifier.push_back(temp);
// layer0.initializeValues(3, 3, 3, 3, rows / 2, cols / 2, 1, 1, 1, 1);
// temp.initPointer(CONV);
// *((ConvDescriptor *)temp.params) = layer0;
// layer_specifier.push_back(temp);
// layer0_actv.initializeValues(RELU, 3, rows / 2, cols / 2);
// temp.initPointer(ACTV);
// *((ActivationDescriptor *)temp.params) = layer0_actv;
// layer_specifier.push_back(temp);
// layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows / 2, cols
// / 2);
// temp.initPointer(BATCHNORM);
// *((BatchNormDescriptor *)temp.params) = layer0_bn;
// layer_specifier.push_back(temp);
// FCDescriptor layer1;
// layer1.initializeValues(input_channels, hidden_channels1);
// temp.initPointer(FULLY_CONNECTED);
// *((FCDescriptor *)(temp.params)) = layer1;
// layer_specifier.push_back(temp);
// temp.initPointer(ACTV);
// ActivationDescriptor layer1_actv;
// layer1_actv.initializeValues(RELU, hidden_channels1, 1, 1);
// *((ActivationDescriptor *)temp.params) = layer1_actv;
// layer_specifier.push_back(temp);
// layer0_bn.initializeValues(BATCHNORM_PER_ACTIVATION, 1e-5, 0.1,
// hidden_channels1, 1, 1);
// temp.initPointer(BATCHNORM);
// *((BatchNormDescriptor *)temp.params) = layer0_bn;
// layer_specifier.push_back(temp);
// temp.initPointer(FULLY_CONNECTED);
// FCDescriptor layer2;
// layer2.initializeValues(hidden_channels1, output_channels);
// *((FCDescriptor *)temp.params) = layer2;
// layer_specifier.push_back(temp);
// // temp.initPointer(FULLY_CONNECTED);
// // FCDescriptor layer3;
// // layer3.initializeValues(hidden_channels2, output_channels);
// // *((FCDescriptor *)temp.params) = layer3;
// // layer_specifier.push_back(temp);
// temp.initPointer(SOFTMAX);
// SoftmaxDescriptor smax;
// smax.initializeValues(SOFTMAX_ACCURATE, SOFTMAX_MODE_INSTANCE,
// output_channels, 1, 1);
// *((SoftmaxDescriptor *)(temp.params)) = smax;
// layer_specifier.push_back(temp);
// AlexNet
vector<LayerSpecifier> layer_specifier;
{
ConvDescriptor layer0;
layer0.initializeValues(3, 96, 11, 11, 227, 227, 0, 0, 4, 4, RELU);
LayerSpecifier temp;
temp.initPointer(CONV);
*((ConvDescriptor *)temp.params) = layer0;
layer_specifier.push_back(temp);
}
{
PoolingDescriptor layer1;
layer1.initializeValues(96, 3, 3, 55, 55, 0, 0, 2, 2, POOLING_MAX);
LayerSpecifier temp;
temp.initPointer(POOLING);
*((PoolingDescriptor *)temp.params) = layer1;
layer_specifier.push_back(temp);
}
{
ConvDescriptor layer2;
layer2.initializeValues(96, 256, 5, 5, 27, 27, 2, 2, 1, 1, RELU);
LayerSpecifier temp;
temp.initPointer(CONV);
*((ConvDescriptor *)temp.params) = layer2;
layer_specifier.push_back(temp);
}
{
PoolingDescriptor layer3;
layer3.initializeValues(256, 3, 3, 27, 27, 0, 0, 2, 2, POOLING_MAX);
LayerSpecifier temp;
temp.initPointer(POOLING);
*((PoolingDescriptor *)temp.params) = layer3;
layer_specifier.push_back(temp);
}
{
ConvDescriptor layer4;
layer4.initializeValues(256, 384, 3, 3, 13, 13, 1, 1, 1, 1, RELU);
LayerSpecifier temp;
temp.initPointer(CONV);
*((ConvDescriptor *)temp.params) = layer4;
layer_specifier.push_back(temp);
}
{
ConvDescriptor layer5;
layer5.initializeValues(384, 384, 3, 3, 13, 13, 1, 1, 1, 1, RELU);
LayerSpecifier temp;
temp.initPointer(CONV);
*((ConvDescriptor *)temp.params) = layer5;
layer_specifier.push_back(temp);
}
{
ConvDescriptor layer6;
layer6.initializeValues(384, 256, 3, 3, 13, 13, 1, 1, 1, 1, RELU);
LayerSpecifier temp;
temp.initPointer(CONV);
*((ConvDescriptor *)temp.params) = layer6;
layer_specifier.push_back(temp);
}
{
PoolingDescriptor layer7;
layer7.initializeValues(256, 3, 3, 13, 13, 0, 0, 2, 2, POOLING_MAX);
LayerSpecifier temp;
temp.initPointer(POOLING);
*((PoolingDescriptor *)temp.params) = layer7;
layer_specifier.push_back(temp);
}
{
FCDescriptor layer8;
layer8.initializeValues(9216, 4096, RELU);
LayerSpecifier temp;
temp.initPointer(FULLY_CONNECTED);
*((FCDescriptor *)temp.params) = layer8;
layer_specifier.push_back(temp);
}
{
FCDescriptor layer9;
layer9.initializeValues(4096, 4096, RELU);
LayerSpecifier temp;
temp.initPointer(FULLY_CONNECTED);
*((FCDescriptor *)temp.params) = layer9;
layer_specifier.push_back(temp);
}
{
FCDescriptor layer10;
layer10.initializeValues(4096, 1000);
LayerSpecifier temp;
temp.initPointer(FULLY_CONNECTED);
*((FCDescriptor *)temp.params) = layer10;
layer_specifier.push_back(temp);
}
{
SoftmaxDescriptor layer11;
layer11.initializeValues(SOFTMAX_ACCURATE, SOFTMAX_MODE_INSTANCE, 1000, 1,
1);
LayerSpecifier temp;
temp.initPointer(SOFTMAX);
*((SoftmaxDescriptor *)temp.params) = layer11;
layer_specifier.push_back(temp);
}
vDNNConvAlgo vdnn_conv_algo = vDNN_PERFORMANCE_OPTIMAL;
vDNNType vdnn_type = vDNN_DYN;
string filename("vdnn_dyn");
if (argc == 3) {
filename.assign("vdnn");
// argv[1] - layers to offload, argv[2] - conv algo to use
if (strcmp(argv[1], "dyn") == 0) {
vdnn_type = vDNN_DYN;
filename.append("_dyn");
} else if (strcmp(argv[1], "conv") == 0) {
vdnn_type = vDNN_CONV;
filename.append("_conv");
} else if (strcmp(argv[1], "all") == 0) {
vdnn_type = vDNN_ALL;
filename.append("_all");
} else if (strcmp(argv[1], "alternate_conv") == 0) {
vdnn_type = vDNN_ALTERNATE_CONV;
filename.append("_alternate_conv");
} else {
printf("invalid argument.. using vdnn dynamic\n");
filename.assign("vdnn_dyn");
}
if ((strcmp(argv[1], "conv") == 0 or strcmp(argv[1], "all") == 0 or
strcmp(argv[1], "alternate_conv") == 0)) {
if (strcmp(argv[2], "p") == 0) {
vdnn_conv_algo = vDNN_PERFORMANCE_OPTIMAL;
filename.append("_p");
} else if (strcmp(argv[2], "m") == 0) {
vdnn_conv_algo = vDNN_MEMORY_OPTIMAL;
filename.append("_m");
} else {
printf("invalid argument.. using vdnn dynamic\n");
filename.assign("vdnn_dyn");
}
}
}
int batch_size = 256;
long long dropout_seed = 1;
float softmax_eps = 1e-8;
float init_std_dev = 0.1;
NeuralNet net(layer_specifier, DATA_FLOAT, batch_size, TENSOR_NCHW,
dropout_seed, softmax_eps, init_std_dev, vdnn_type,
vdnn_conv_algo, SGD);
int num_epoch = 1000;
double learning_rate = 1e-3;
double learning_rate_decay = 0.9;
Solver solver(&net, (void *)f_train_images, f_train_labels,
(void *)f_train_images, f_train_labels, num_epoch, SGD,
learning_rate, learning_rate_decay, num_train, num_train);
vector<float> loss;
vector<float> time;
vector<vector<float> > fwd_vdnn_lag, bwd_vdnn_lag;
solver.getTrainTime(loss, time, 100, fwd_vdnn_lag, bwd_vdnn_lag);
printTimes(time, filename);
printvDNNLag(fwd_vdnn_lag, bwd_vdnn_lag, filename);
}
void printTimes(vector<float> &time, string filename) {
float mean_time = 0.0;
float std_dev = 0.0;
int N = time.size();
for (int i = 0; i < N; i++) {
mean_time += time[i];
}
mean_time /= N;
for (int i = 0; i < N; i++) {
std_dev += pow(time[i] - mean_time, 2);
}
std_dev /= N;
std_dev = pow(std_dev, 0.5);
cout << "Average time: " << mean_time << endl;
cout << "Standard deviation: " << std_dev << endl;
filename.append(".dat");
fstream f;
f.open(filename.c_str(), ios_base::out);
for (int i = 0; i < N; i++) {
f << time[i] << endl;
}
f << "mean_time: " << mean_time << endl;
f << "standard_deviation: " << std_dev << endl;
f.close();
filename.append(".bin");
fstream f_bin;
f_bin.open(filename.c_str(), ios_base::out);
f_bin.write((char *)&N, sizeof(N));
for (int i = 0; i < N; i++) {
f_bin.write((char *)&time[i], sizeof(time[i]));
}
f_bin.close();
}
void printvDNNLag(vector<vector<float> > &fwd_vdnn_lag,
vector<vector<float> > &bwd_vdnn_lag, string filename) {
filename.append("_lag.dat");
fstream f;
f.open(filename.c_str(), ios_base::out);
int N = fwd_vdnn_lag.size();
for (int i = 0; i < N; i++) {
for (int j = 0; j < fwd_vdnn_lag[i].size(); j++) {
f << "fwd" << j << ": " << fwd_vdnn_lag[i][j] << endl;
}
for (int j = 0; j < bwd_vdnn_lag[i].size(); j++) {
f << "bwd" << j << ": " << bwd_vdnn_lag[i][j] << endl;
}
f << endl;
}
f.close();
} | f6d3ffdbd665f3036c49db1df771c1f1e9dde5c9.cu | #include <cmath>
#include <cstdlib>
#include <iostream>
#include <string>
#include <vector>
#include "solver.h"
using namespace std;
typedef unsigned char uchar;
int num_train = 512, num_test = 500;
int reverseInt(int n) {
int bytes = 4;
unsigned char ch[bytes];
for (int i = 0; i < bytes; i++) {
ch[i] = (n >> i * 8) & 255;
}
int p = 0;
for (int i = 0; i < bytes; i++) {
p += (int)ch[i] << (bytes - i - 1) * 8;
}
return p;
}
void readMNIST(vector<vector<uchar> > &train_images,
vector<vector<uchar> > &test_images, vector<uchar> &train_labels,
vector<uchar> &test_labels) {
string filename_train_images = "data/train-images.idx3-ubyte";
string filename_train_labels = "data/train-labels.idx1-ubyte";
string filename_test_images = "data/t10k-images.idx3-ubyte";
string filename_test_labels = "data/t10k-labels.idx1-ubyte";
// read train/test images
for (int i = 0; i < 2; i++) {
string filename;
if (i == 0)
filename = filename_train_images;
else
filename = filename_test_images;
ifstream f(filename.c_str(), ios::binary);
if (!f.is_open()) printf("Cannot read MNIST from %s\n", filename.c_str());
// read metadata
int magic_number = 0, n_images = 0, n_rows = 0, n_cols = 0;
f.read((char *)&magic_number, sizeof(magic_number));
magic_number = reverseInt(magic_number);
f.read((char *)&n_images, sizeof(n_images));
n_images = reverseInt(n_images);
f.read((char *)&n_rows, sizeof(n_rows));
n_rows = reverseInt(n_rows);
f.read((char *)&n_cols, sizeof(n_cols));
n_cols = reverseInt(n_cols);
for (int k = 0; k < n_images; k++) {
vector<uchar> temp;
temp.reserve(n_rows * n_cols);
for (int j = 0; j < n_rows * n_cols; j++) {
uchar t = 0;
f.read((char *)&t, sizeof(t));
temp.push_back(t);
}
if (i == 0)
train_images.push_back(temp);
else
test_images.push_back(temp);
}
f.close();
}
// read train/test labels
for (int i = 0; i < 2; i++) {
string filename;
if (i == 0)
filename = filename_train_labels;
else
filename = filename_test_labels;
ifstream f(filename.c_str(), ios::binary);
if (!f.is_open()) printf("Cannot read MNIST from %s\n", filename.c_str());
// read metadata
int magic_number = 0, n_labels = 0;
f.read((char *)&magic_number, sizeof(magic_number));
magic_number = reverseInt(magic_number);
f.read((char *)&n_labels, sizeof(n_labels));
n_labels = reverseInt(n_labels);
for (int k = 0; k < n_labels; k++) {
uchar t = 0;
f.read((char *)&t, sizeof(t));
if (i == 0)
train_labels.push_back(t);
else
test_labels.push_back(t);
}
f.close();
}
}
void printTimes(vector<float> &time, string filename);
void printvDNNLag(vector<vector<float> > &fwd_vdnn_lag,
vector<vector<float> > &bwd_vdnn_lag, string filename);
int main(int argc, char *argv[]) {
// int num_train = 100 * batch_size, num_val = batch_size;
// void *X_train = malloc(num_train * input_channels * sizeof(float));
// int *y_train = (int *)malloc(num_train * sizeof(int));
// void *X_val = malloc(num_val * input_channels * sizeof(float));
// int *y_val = (int *)malloc(num_val * sizeof(int));
// for (int i = 0; i < num_train; i++) {
// for (int j = 0; j < input_channels; j++)
// ((float *)X_train)[i * input_channels + j] = (rand() % 1000) * 1.0 /
// 1000;
// y_train[i] = 0;
// }
// for (int i = 0; i < num_val; i++) {
// for (int j = 0; j < input_channels; j++)
// ((float *)X_val)[i * input_channels + j] = (rand() % 1000) * 1.0 /
// 1000;
// y_val[i] = rand() % 2;
// }
// int rows = 28, cols = 28, channels = 1;
// vector<vector<uchar> > train_images, test_images;
// vector<uchar> train_labels, test_labels;
// readMNIST(train_images, test_images, train_labels, test_labels);
// float *f_train_images, *f_train_labels, *f_test_images, *f_test_labels;
float *f_train_images, *f_test_images;
int *f_train_labels, *f_test_labels;
int rows = 227, cols = 227, channels = 3;
int input_size = rows * cols * channels;
// f_train_images = (float *)malloc(num_train * input_size * sizeof(float));
// f_train_labels = (int *)malloc(num_train * sizeof(int));
checkCudaErrors(
cudaMallocHost(&f_train_images, num_train * input_size * sizeof(float)));
checkCudaErrors(cudaMallocHost(&f_train_labels, num_train * sizeof(int)));
f_test_images = (float *)malloc(num_test * input_size * sizeof(float));
f_test_labels = (int *)malloc(num_test * sizeof(int));
float *mean_image;
mean_image = (float *)malloc(input_size * sizeof(float));
for (int i = 0; i < input_size; i++) {
mean_image[i] = 0;
for (int k = 0; k < num_train; k++) {
mean_image[i] += f_train_images[k * input_size + i];
}
mean_image[i] /= num_train;
}
for (int i = 0; i < num_train; i++) {
for (int j = 0; j < input_size; j++) {
f_train_images[i * input_size + j] -= mean_image[j];
}
}
for (int i = 0; i < num_test; i++) {
for (int j = 0; j < input_size; j++) {
f_test_images[i * input_size + j] -= mean_image[j];
}
}
// int input_channels = rows * cols * channels * 3, hidden_channels1 = 50,
// hidden_channels2 = 100, output_channels = 10;
// vector<LayerSpecifier> layer_specifier;
// ConvDescriptor layer0;
// LayerSpecifier temp;
// layer0.initializeValues(1, 3, 3, 3, rows, cols, 1, 1, 1, 1);
// temp.initPointer(CONV);
// *((ConvDescriptor *)temp.params) = layer0;
// layer_specifier.push_back(temp);
// ActivationDescriptor layer0_actv;
// layer0_actv.initializeValues(RELU, 3, rows, cols);
// temp.initPointer(ACTV);
// *((ActivationDescriptor *)temp.params) = layer0_actv;
// layer_specifier.push_back(temp);
// BatchNormDescriptor layer0_bn;
// for (int i = 0; i < 200; i++) {
// layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows, cols);
// temp.initPointer(BATCHNORM);
// *((BatchNormDescriptor *)temp.params) = layer0_bn;
// layer_specifier.push_back(temp);
// layer0.initializeValues(3, 3, 3, 3, rows, cols, 1, 1, 1, 1);
// temp.initPointer(CONV);
// *((ConvDescriptor *)temp.params) = layer0;
// layer_specifier.push_back(temp);
// layer0_actv.initializeValues(RELU, 3, rows, cols);
// temp.initPointer(ACTV);
// *((ActivationDescriptor *)temp.params) = layer0_actv;
// layer_specifier.push_back(temp);
// }
// PoolingDescriptor layer0_pool;
// layer0_pool.initializeValues(3, 2, 2, rows, cols, 0, 0, 2, 2, POOLING_MAX);
// temp.initPointer(POOLING);
// *((PoolingDescriptor *)temp.params) = layer0_pool;
// layer_specifier.push_back(temp);
// layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows / 2, cols
// / 2);
// temp.initPointer(BATCHNORM);
// *((BatchNormDescriptor *)temp.params) = layer0_bn;
// layer_specifier.push_back(temp);
// // DropoutDescriptor layer0_dropout;
// // layer0_dropout.initializeValues(0.2, 3, rows / 2, cols / 2);
// // temp.initPointer(DROPOUT);
// // *((DropoutDescriptor *)temp.params) = layer0_dropout;
// // layer_specifier.push_back(temp);
// layer0.initializeValues(3, 3, 3, 3, rows / 2, cols / 2, 1, 1, 1, 1);
// temp.initPointer(CONV);
// *((ConvDescriptor *)temp.params) = layer0;
// layer_specifier.push_back(temp);
// layer0_actv.initializeValues(RELU, 3, rows / 2, cols / 2);
// temp.initPointer(ACTV);
// *((ActivationDescriptor *)temp.params) = layer0_actv;
// layer_specifier.push_back(temp);
// layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows / 2, cols
// / 2);
// temp.initPointer(BATCHNORM);
// *((BatchNormDescriptor *)temp.params) = layer0_bn;
// layer_specifier.push_back(temp);
// FCDescriptor layer1;
// layer1.initializeValues(input_channels, hidden_channels1);
// temp.initPointer(FULLY_CONNECTED);
// *((FCDescriptor *)(temp.params)) = layer1;
// layer_specifier.push_back(temp);
// temp.initPointer(ACTV);
// ActivationDescriptor layer1_actv;
// layer1_actv.initializeValues(RELU, hidden_channels1, 1, 1);
// *((ActivationDescriptor *)temp.params) = layer1_actv;
// layer_specifier.push_back(temp);
// layer0_bn.initializeValues(BATCHNORM_PER_ACTIVATION, 1e-5, 0.1,
// hidden_channels1, 1, 1);
// temp.initPointer(BATCHNORM);
// *((BatchNormDescriptor *)temp.params) = layer0_bn;
// layer_specifier.push_back(temp);
// temp.initPointer(FULLY_CONNECTED);
// FCDescriptor layer2;
// layer2.initializeValues(hidden_channels1, output_channels);
// *((FCDescriptor *)temp.params) = layer2;
// layer_specifier.push_back(temp);
// // temp.initPointer(FULLY_CONNECTED);
// // FCDescriptor layer3;
// // layer3.initializeValues(hidden_channels2, output_channels);
// // *((FCDescriptor *)temp.params) = layer3;
// // layer_specifier.push_back(temp);
// temp.initPointer(SOFTMAX);
// SoftmaxDescriptor smax;
// smax.initializeValues(SOFTMAX_ACCURATE, SOFTMAX_MODE_INSTANCE,
// output_channels, 1, 1);
// *((SoftmaxDescriptor *)(temp.params)) = smax;
// layer_specifier.push_back(temp);
// AlexNet
vector<LayerSpecifier> layer_specifier;
{
ConvDescriptor layer0;
layer0.initializeValues(3, 96, 11, 11, 227, 227, 0, 0, 4, 4, RELU);
LayerSpecifier temp;
temp.initPointer(CONV);
*((ConvDescriptor *)temp.params) = layer0;
layer_specifier.push_back(temp);
}
{
PoolingDescriptor layer1;
layer1.initializeValues(96, 3, 3, 55, 55, 0, 0, 2, 2, POOLING_MAX);
LayerSpecifier temp;
temp.initPointer(POOLING);
*((PoolingDescriptor *)temp.params) = layer1;
layer_specifier.push_back(temp);
}
{
ConvDescriptor layer2;
layer2.initializeValues(96, 256, 5, 5, 27, 27, 2, 2, 1, 1, RELU);
LayerSpecifier temp;
temp.initPointer(CONV);
*((ConvDescriptor *)temp.params) = layer2;
layer_specifier.push_back(temp);
}
{
PoolingDescriptor layer3;
layer3.initializeValues(256, 3, 3, 27, 27, 0, 0, 2, 2, POOLING_MAX);
LayerSpecifier temp;
temp.initPointer(POOLING);
*((PoolingDescriptor *)temp.params) = layer3;
layer_specifier.push_back(temp);
}
{
ConvDescriptor layer4;
layer4.initializeValues(256, 384, 3, 3, 13, 13, 1, 1, 1, 1, RELU);
LayerSpecifier temp;
temp.initPointer(CONV);
*((ConvDescriptor *)temp.params) = layer4;
layer_specifier.push_back(temp);
}
{
ConvDescriptor layer5;
layer5.initializeValues(384, 384, 3, 3, 13, 13, 1, 1, 1, 1, RELU);
LayerSpecifier temp;
temp.initPointer(CONV);
*((ConvDescriptor *)temp.params) = layer5;
layer_specifier.push_back(temp);
}
{
ConvDescriptor layer6;
layer6.initializeValues(384, 256, 3, 3, 13, 13, 1, 1, 1, 1, RELU);
LayerSpecifier temp;
temp.initPointer(CONV);
*((ConvDescriptor *)temp.params) = layer6;
layer_specifier.push_back(temp);
}
{
PoolingDescriptor layer7;
layer7.initializeValues(256, 3, 3, 13, 13, 0, 0, 2, 2, POOLING_MAX);
LayerSpecifier temp;
temp.initPointer(POOLING);
*((PoolingDescriptor *)temp.params) = layer7;
layer_specifier.push_back(temp);
}
{
FCDescriptor layer8;
layer8.initializeValues(9216, 4096, RELU);
LayerSpecifier temp;
temp.initPointer(FULLY_CONNECTED);
*((FCDescriptor *)temp.params) = layer8;
layer_specifier.push_back(temp);
}
{
FCDescriptor layer9;
layer9.initializeValues(4096, 4096, RELU);
LayerSpecifier temp;
temp.initPointer(FULLY_CONNECTED);
*((FCDescriptor *)temp.params) = layer9;
layer_specifier.push_back(temp);
}
{
FCDescriptor layer10;
layer10.initializeValues(4096, 1000);
LayerSpecifier temp;
temp.initPointer(FULLY_CONNECTED);
*((FCDescriptor *)temp.params) = layer10;
layer_specifier.push_back(temp);
}
{
SoftmaxDescriptor layer11;
layer11.initializeValues(SOFTMAX_ACCURATE, SOFTMAX_MODE_INSTANCE, 1000, 1,
1);
LayerSpecifier temp;
temp.initPointer(SOFTMAX);
*((SoftmaxDescriptor *)temp.params) = layer11;
layer_specifier.push_back(temp);
}
vDNNConvAlgo vdnn_conv_algo = vDNN_PERFORMANCE_OPTIMAL;
vDNNType vdnn_type = vDNN_DYN;
string filename("vdnn_dyn");
if (argc == 3) {
filename.assign("vdnn");
// argv[1] - layers to offload, argv[2] - conv algo to use
if (strcmp(argv[1], "dyn") == 0) {
vdnn_type = vDNN_DYN;
filename.append("_dyn");
} else if (strcmp(argv[1], "conv") == 0) {
vdnn_type = vDNN_CONV;
filename.append("_conv");
} else if (strcmp(argv[1], "all") == 0) {
vdnn_type = vDNN_ALL;
filename.append("_all");
} else if (strcmp(argv[1], "alternate_conv") == 0) {
vdnn_type = vDNN_ALTERNATE_CONV;
filename.append("_alternate_conv");
} else {
printf("invalid argument.. using vdnn dynamic\n");
filename.assign("vdnn_dyn");
}
if ((strcmp(argv[1], "conv") == 0 or strcmp(argv[1], "all") == 0 or
strcmp(argv[1], "alternate_conv") == 0)) {
if (strcmp(argv[2], "p") == 0) {
vdnn_conv_algo = vDNN_PERFORMANCE_OPTIMAL;
filename.append("_p");
} else if (strcmp(argv[2], "m") == 0) {
vdnn_conv_algo = vDNN_MEMORY_OPTIMAL;
filename.append("_m");
} else {
printf("invalid argument.. using vdnn dynamic\n");
filename.assign("vdnn_dyn");
}
}
}
int batch_size = 256;
long long dropout_seed = 1;
float softmax_eps = 1e-8;
float init_std_dev = 0.1;
NeuralNet net(layer_specifier, DATA_FLOAT, batch_size, TENSOR_NCHW,
dropout_seed, softmax_eps, init_std_dev, vdnn_type,
vdnn_conv_algo, SGD);
int num_epoch = 1000;
double learning_rate = 1e-3;
double learning_rate_decay = 0.9;
Solver solver(&net, (void *)f_train_images, f_train_labels,
(void *)f_train_images, f_train_labels, num_epoch, SGD,
learning_rate, learning_rate_decay, num_train, num_train);
vector<float> loss;
vector<float> time;
vector<vector<float> > fwd_vdnn_lag, bwd_vdnn_lag;
solver.getTrainTime(loss, time, 100, fwd_vdnn_lag, bwd_vdnn_lag);
printTimes(time, filename);
printvDNNLag(fwd_vdnn_lag, bwd_vdnn_lag, filename);
}
void printTimes(vector<float> &time, string filename) {
float mean_time = 0.0;
float std_dev = 0.0;
int N = time.size();
for (int i = 0; i < N; i++) {
mean_time += time[i];
}
mean_time /= N;
for (int i = 0; i < N; i++) {
std_dev += pow(time[i] - mean_time, 2);
}
std_dev /= N;
std_dev = pow(std_dev, 0.5);
cout << "Average time: " << mean_time << endl;
cout << "Standard deviation: " << std_dev << endl;
filename.append(".dat");
fstream f;
f.open(filename.c_str(), ios_base::out);
for (int i = 0; i < N; i++) {
f << time[i] << endl;
}
f << "mean_time: " << mean_time << endl;
f << "standard_deviation: " << std_dev << endl;
f.close();
filename.append(".bin");
fstream f_bin;
f_bin.open(filename.c_str(), ios_base::out);
f_bin.write((char *)&N, sizeof(N));
for (int i = 0; i < N; i++) {
f_bin.write((char *)&time[i], sizeof(time[i]));
}
f_bin.close();
}
void printvDNNLag(vector<vector<float> > &fwd_vdnn_lag,
vector<vector<float> > &bwd_vdnn_lag, string filename) {
filename.append("_lag.dat");
fstream f;
f.open(filename.c_str(), ios_base::out);
int N = fwd_vdnn_lag.size();
for (int i = 0; i < N; i++) {
for (int j = 0; j < fwd_vdnn_lag[i].size(); j++) {
f << "fwd" << j << ": " << fwd_vdnn_lag[i][j] << endl;
}
for (int j = 0; j < bwd_vdnn_lag[i].size(); j++) {
f << "bwd" << j << ": " << bwd_vdnn_lag[i][j] << endl;
}
f << endl;
}
f.close();
} |
376b72bef5de3f2cb2b3c6b1532bafdfa5be4d7a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// main.cpp
// original author: Diivanand Ramalingam
// original institution: Computational Optical Imaging Lab at UC Berkeley (Prof. Laura Waller's Lab)
// forked author: Michael tang
#include <ctime>
#include <cstdio>
#include <cstdlib>
#include <string>
#include <iostream>
#include "tiff_io-win.h"
#include "toolbox.h"
#include "pointwise_matrix_ops.h"
#include "fourier_tools.h"
//This constant may already be in <cmath> so I should switch the using those in the code but for now I'm using these
#define PI 3.1415926535897932384626433832795028841971693993751058209749445923078164062862089986280348253421170679
//Not all GPUS can handle large block sizes which is why it's only 16 for now
#define BLOCKSIZEX 32//should increase this to 32,64,128, etc. to see potentially better performance gains!
#define BLOCKSIZEY 32 //should increase this to 32,64,128, etc. to see potentially better performance gains!
//Global time variable
double total_time_elapsed = 0.0;
//device function declaration
__global__ void xrayTIEHelperKernel(hipfftComplex *denominator_dev, float *freq_vector_dev, int N, float R2, float delta, float Mag, float mu, float reg);
//host function declarations
void stateArguments(float IinVal, float Mag, float R2, float mu, float delta, float ps, float reg, int out_scale);
hipError_t calculateThickness(float *output, float *image, int height, int width, float IinVal, float Mag, float R2, float mu, float delta, float ps, float reg, int out_scale);
/* Height is the number of rows (x) and Width is the number of columns (y)*/
int main(int argc, char **argv)
{
if(argc != 14){
printf("Incorrect number of arguments. Usage: ./tie input_folder output_folder prefix start_num end_num Iin Mag R2 mu delta ps reg scale\n");
quitProgramPrompt(0);
return 1;
}else {
char *srcFolder = argv[1];
char *destFolder = argv[2];
char *prefix = argv[3];
int start = atoi(argv[4]);
int end = atoi(argv[5]);
int numFiles = end - start + 1;
char **filenames = getFilenames(srcFolder, prefix, start, end);
char **outfilenames = getFilenames(destFolder, prefix, start, end);
TIFFSetWarningHandler(NULL);
//IinVal, Mag, R2, mu, delta, ps
float IinVal = atof(argv[6]);
float Mag = atof(argv[7]);
//Mag = 1.0; //Right now algorithm doesn't work for Mag other than 1.0, so for now Mag argument isn't supported.
float R2 = atof(argv[8]);
float mu = atof(argv[9]);
float delta = atof(argv[10]);
float ps = atof(argv[11]);
float reg = atof(argv[12]);
int out_scale = atoi(argv[13]);
stateArguments(IinVal, Mag, R2, mu, delta, ps, reg, out_scale);
TiffIO* tiff_io = new TiffIO();
int width;
int height;
//printf("Processing Input Files: \n");
for(int i = 0;i < numFiles; i++) {
printf("reading: %s\n", filenames[i]);
float *image;
//read iamge
image = tiff_io->readFloatImage(filenames[i], &width, &height);
if(!image){
printf("Error reading image\n");
}else {
//convert image to 1D for CUDA processing
//float *image1D = toFloatArray(imagerow, width, height);
int device, num_devices;
hipGetDeviceCount(&num_devices);
if (num_devices > 1) {
int max_ThreadsPerMultiProcessor = 0, max_device = 0;
for (device = 0; device < num_devices; device++) {
hipDeviceProp_t properties;
hipGetDeviceProperties(&properties, device);
if (max_ThreadsPerMultiProcessor < properties.maxThreadsPerMultiProcessor) {
max_ThreadsPerMultiProcessor = properties.maxThreadsPerMultiProcessor;
max_device = device;
}
}
hipSetDevice(max_device);
}
float *image_dev = 0;
float *output = 0;
printf("\nProcessing file %s\n", filenames[i]);
//Process Image
//Allocate space on GPU and then transfer the input image to the GPU
hipError_t cudaStatus;
cudaStatus = hipMalloc((void**)&image_dev, sizeof(float) * height * width);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc for image_dev failed! Error Code: %d", cudaStatus);
}else {
cudaStatus = hipMemcpy(image_dev, image, sizeof(float) * width * height, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy for image_dev failed! Error Code: %d", cudaStatus);
}else{
output = (float *) malloc(sizeof(float) * width * height);
calculateThickness(output, image_dev, height, width, IinVal, Mag, R2, mu, delta, ps, reg, out_scale);
}
}
//End Processing of Image
//convert 1D output to 2D for outputting
float *image1DOut = 0;
float **imageOut = 0;
if(output){
//printMatrix1D("output", output, height, width);
imageOut = toFloat2D(output, width, height);
}else {
fprintf(stderr, "\n Output is NULL so using original image as output");
imageOut = toFloat2D(image, width, height);
}
//output image
printf("\nFile Processed. Outputting to %s\n", outfilenames[i]);
tiff_io->writeFloatImage(imageOut, outfilenames[i], width, height);
//free memory
//free(image1D);
free(output);
free(imageOut);
hipFree(image_dev);
free(image);
}
}
delete tiff_io;
printf("\nTotal time to process %d images: %f seconds\n", numFiles,total_time_elapsed);
quitProgramPrompt(true);
return 0;
}
}
void stateArguments(float IinVal, float Mag, float R2, float mu, float delta, float ps, float reg, int out_scale)
{
std::cout << "Input Argument Values:" << std::endl;
std::cout << "IinVal: " << IinVal << std::endl;
std::cout << "Mag: " << Mag << std::endl;
std::cout << "R2: " << R2 << " mm" << std::endl;
std::cout << "mu: " << mu << " mm^-1" << std::endl;
std::cout << "delta: " << delta << std::endl;
std::cout << "ps: " << ps << " mm" << std::endl;
std::cout << "reg: " << reg << std::endl;
std::cout << "scale: " << out_scale << std::endl;
}
/*
Calculates thickness according to Paganin Phase paper algorithm: http://www.ncbi.nlm.nih.gov/pubmed/12000561
*/
hipError_t calculateThickness(float* output, float *image_dev, int height, int width, float IinVal, float Mag, float R2, float mu, float delta, float ps, float reg, int out_scale)
{
hipError_t cudaStatus = hipGetLastError();
hipfftResult cufftStatus;
hipfftHandle plan = 0;
if(height != width){
fprintf(stderr, "Only works on square matrices whose dimension is a power of two!\n", cudaStatus);
goto thickness_end;
}
//declare and initialize variables used when calling CUDA kernels
int size = height * width;
int block_size_x = BLOCKSIZEX;
int block_size_y = BLOCKSIZEY;
dim3 dimBlock(block_size_x, block_size_y);
dim3 dimGrid (height/dimBlock.x, width/dimBlock.y);
int N = width;
//Handle N not multiple of block_size_x or block_size_y but this shouldn't be the case since N power of 2
//And blocksize should always be a power of 2 for both correctness and efficiency
if (height % block_size_x !=0 ) dimGrid.x+=1;
if (width % block_size_y !=0 ) dimGrid.y+=1;
std::cout << "Calculating Thickness..." << std::endl;
std::clock_t begin = std::clock();
//Code begins here
//declare device pointers
float *int_seq_dev = 0;
float *freq_vector_dev=0;
//float *image_dev = 0;
float *output_dev = 0;
hipfftComplex *image_complex_dev = 0;
hipfftComplex *fft_output_dev = 0;
hipfftComplex *fft_shifted_output_dev = 0;
hipfftComplex *ifft_shifted_input_dev = 0; //inverse
hipfftComplex *ifft_output_dev = 0; //inverse
hipfftComplex *denominator_dev = 0;
//Start memory allocation of device vectors and convert/copy input image to complex device vector
//Allocate memory for 9 device vectors (potential speedup to be gained by reducing the number of device vectors used)
cudaStatus = hipMalloc((void**)&int_seq_dev, N * sizeof(float));
hipMemset(int_seq_dev, 0, N*sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc for int_seq_dev failed! Error Code: %d", cudaStatus);
goto thickness_end;
}
cudaStatus = hipMalloc((void**)&freq_vector_dev, N * sizeof(float));
hipMemset(freq_vector_dev, 0, N*sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc for freq_vector_dev failed! Error Code: %d", cudaStatus);
goto thickness_end;
}
cudaStatus = hipMalloc((void**)&output_dev, size * sizeof(int));
hipMemset(output_dev, 0,sizeof(float)*size);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc for output_dev failed! Error Code: %d", cudaStatus);
goto thickness_end;
}
cudaStatus = hipMalloc((void**)&image_complex_dev, size * sizeof(hipfftComplex));
hipMemset(image_complex_dev, 0,sizeof(float)*size);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc for image_complex_dev failed! Error Code: %d", cudaStatus);
goto thickness_end;
}
cudaStatus = hipMalloc((void**)&fft_output_dev, size * sizeof(hipfftComplex));
hipMemset(fft_output_dev, 0,sizeof(float)*size);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc for fft_output_dev failed! Error Code: %d", cudaStatus);
goto thickness_end;
}
cudaStatus = hipMalloc((void**)&fft_shifted_output_dev, size * sizeof(hipfftComplex));
hipMemset(fft_shifted_output_dev, 0,sizeof(float)*size);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc for fft_shifted_output_dev failed! Error Code: %d", cudaStatus);
goto thickness_end;
}
cudaStatus = hipMalloc((void**)&ifft_shifted_input_dev, size * sizeof(hipfftComplex));
hipMemset(ifft_shifted_input_dev, 0,sizeof(float)*size);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc for ifft_shifted_input_dev failed! Error Code: %d", cudaStatus);
goto thickness_end;
}
cudaStatus = hipMalloc((void**)&ifft_output_dev, size * sizeof(hipfftComplex));
hipMemset(ifft_output_dev, 0,sizeof(float)*size);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc for ifft_output_dev failed! Error Code: %d", cudaStatus);
goto thickness_end;
}
cudaStatus = hipMalloc((void**)&denominator_dev, size * sizeof(hipfftComplex));
hipMemset(denominator_dev, 0,sizeof(float)*size);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc for denominator_dev failed! Error Code: %d", cudaStatus);
goto thickness_end;
}
//FINISH ALLOCATION//
//scale by magnification
//printDeviceMatrixValues("img_dev", image_dev,N,N);
hipLaunchKernelGGL(( pointwiseRealScaleRealMatrix), dim3(dimGrid), dim3(dimBlock), 0, 0, image_dev, image_dev, Mag*Mag, N, N);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching pointwiseRealScaleMatrix!\n", cudaStatus);
goto thickness_end;
}
//printDeviceMatrixValues("img_dev_scaled", image_dev,N,N);
//convert input image real device vector to complex device vector
hipLaunchKernelGGL(( real2complex), dim3(dimGrid),dim3(dimBlock), 0, 0, image_dev, image_complex_dev, N, N);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching real2complex!\n", cudaStatus);
goto thickness_end;
}
//End memory allocation of device vectors and convert/copy input image to complex device vector
//printDeviceComplexMatrixValues("img_complex_dev", image_complex_dev,N,N);
//Start creation of frequency axis
//generate integer sequence used in creating frequency axis
hipLaunchKernelGGL(( genIntSequence), dim3(N),dim3(1), 0, 0, int_seq_dev, 0, N-1);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching genIntSequence!\n", cudaStatus);
goto thickness_end;
}
//printDeviceMatrixValues("int_seq_dev", int_seq_dev,1,N);
//create omega axis
hipLaunchKernelGGL(( pointwiseRealScaleRealMatrix), dim3(N),dim3(1), 0, 0, freq_vector_dev, int_seq_dev, 2*PI/N, N, 1);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching pointwiseRealScaleRealMatrix!\n", cudaStatus);
goto thickness_end;
}
//printDeviceMatrixValues("omega_freq_vector_dev", freq_vector_dev,1,N);
//Shift zero to center - for even case, pull back by pi, note N is even by our assumption of powers of 2
hipLaunchKernelGGL(( pointwiseAddRealConstantToRealMatrix), dim3(N),dim3(1), 0, 0, freq_vector_dev, freq_vector_dev, -PI, N, 1);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addReadConstantToRealMatrix!\n", cudaStatus);
goto thickness_end;
}
//convert to cyclical frequencies (hertz) and scale by pixel size
hipLaunchKernelGGL(( pointwiseRealScaleRealMatrix), dim3(N),dim3(1), 0, 0, freq_vector_dev, freq_vector_dev, 1/(2*PI), N, 1);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching pointwiseRealScaleRealMatrix!\n", cudaStatus);
goto thickness_end;
}
hipLaunchKernelGGL(( pointwiseRealScaleRealMatrix), dim3(N),dim3(1), 0, 0, freq_vector_dev, freq_vector_dev, 1/ps, N, 1);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching pointwiseRealScaleRealMatrix!\n", cudaStatus);
goto thickness_end;
}
//printDeviceMatrixValues("freq_vector_dev after Pi shift to center", freq_vector_dev,1,N);
//End creation of frequency axis
//Fourier Transform image and scale according to Paginin phase algorithm
hipfftPlan2d(&plan, N, N, HIPFFT_C2C);
cufftStatus = hipfftExecC2C(plan, image_complex_dev, fft_output_dev, HIPFFT_FORWARD);
if(cufftStatus != HIPFFT_SUCCESS){
fprintf(stderr, "hipfftExecC2C returned error code %d after attempting 2D fft!\n", cufftStatus);
goto thickness_end;
}
//fft shift the spectrum of this signal
hipLaunchKernelGGL(( fftShift2D), dim3(dimGrid),dim3(dimBlock), 0, 0, fft_shifted_output_dev, fft_output_dev, width);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching fftShift2D!\n", cudaStatus);
goto thickness_end;
}
//printDeviceComplexMatrixValues("fft_shifted_output_dev", fft_shifted_output_dev, N,N);
hipLaunchKernelGGL(( pointwiseRealScaleComplexMatrix), dim3(dimGrid),dim3(dimBlock), 0, 0, fft_shifted_output_dev, fft_shifted_output_dev, mu/IinVal, N, N);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after pointwiseRealScaleComplexMatrix!\n", cudaStatus);
goto thickness_end;
}
//printDeviceComplexMatrixValues("fft_shifted_output_dev scaled", fft_shifted_output_dev, N,N);
//End Fourier Transform and scaling
//Create the denominator shown in the Paganin phase algorithm
hipLaunchKernelGGL(( xrayTIEHelperKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, denominator_dev, freq_vector_dev, N, R2, delta, Mag, mu, reg);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching xrayTIEHelperKernel!\n", cudaStatus);
goto thickness_end;
}
//printDeviceComplexMatrixValues("denominator_dev", denominator_dev, N,N);
//End creation of denominator
//pointwise divide, ifft, pointwise log, and inverse-mu-scaling as shown in Paganin phase algorithm
//pointwise divide
hipLaunchKernelGGL(( pointwiseDivideComplexMatrices), dim3(dimGrid), dim3(dimBlock), 0, 0, fft_shifted_output_dev, fft_shifted_output_dev, denominator_dev, N, N);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching xrayTIEHelperKernel!\n", cudaStatus);
goto thickness_end;
}
//printDeviceComplexMatrixValues("inv_term (as mentioned in matlab algorithm)", fft_shifted_output_dev, N,N);
//ifftshift
hipLaunchKernelGGL(( fftShift2D), dim3(dimGrid), dim3(dimBlock), 0, 0, ifft_shifted_input_dev, fft_shifted_output_dev, N);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching fftShift2D!\n", cudaStatus);
goto thickness_end;
}
//ifft
cufftStatus = hipfftExecC2C(plan, ifft_shifted_input_dev, ifft_output_dev, HIPFFT_BACKWARD);
if(cufftStatus != HIPFFT_SUCCESS){
fprintf(stderr, "hipfftExecC2C returned error code %d after attempting 2D fft!\n", cufftStatus);
goto thickness_end;
}
//normalized and convert to real device vector
float scale = 1.f / ( (float) height * (float) width );
//convert complex to real
hipLaunchKernelGGL(( complex2real_scaled), dim3(dimGrid), dim3(dimBlock), 0, 0, ifft_output_dev, output_dev, N, N, scale);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching complex2real_scaled!\n", cudaStatus);
goto thickness_end;
}
//take pointwise log and scale to obtain projected thickness!
//pointwise natural log
hipLaunchKernelGGL(( pointwiseNaturalLogRealMatrix), dim3(dimGrid), dim3(dimBlock), 0, 0, output_dev, output_dev, N, N);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching complex2real_scaled!\n", cudaStatus);
goto thickness_end;
}
//printDeviceMatrixValues("logOutput (like in matlab algorithm)", output_dev, N,N);
//pointwise real scale
hipLaunchKernelGGL(( pointwiseRealScaleRealMatrix), dim3(dimGrid), dim3(dimBlock), 0, 0, output_dev, output_dev, -(out_scale/mu), N, N);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching pointwiseRealScaleRealMatrix!\n", cudaStatus);
goto thickness_end;
}
//printDeviceMatrixValues("output", output_dev, N,N);
//Transfer output device vector to our host output vector and we are done!
cudaStatus = hipMemcpy(output, output_dev, size * sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy for output_dev failed!");
goto thickness_end;
}
//destroy cufft plan and free memory allocated on device (FYI device is another name for GPU, host is CPU)
thickness_end:
hipFree(int_seq_dev);
hipFree(freq_vector_dev);
hipFree(output_dev);
hipFree(image_complex_dev);
hipFree(fft_output_dev);
hipFree(fft_shifted_output_dev);
hipFree(ifft_shifted_input_dev);
hipFree(ifft_output_dev);
hipFree(denominator_dev);
if(plan)
hipfftDestroy(plan);
//Code ends here
std::clock_t end = std::clock();
double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
std::cout << "\nDone. Took " << elapsed_secs << " seconds" << std::endl;
total_time_elapsed += elapsed_secs;
return cudaStatus;
}
//computes the denominators seen in Paganin phase algorithm paper
__global__
void xrayTIEHelperKernel(hipfftComplex *denominator_dev, float *freq_vector_dev, int N, float R2, float delta, float Mag, float mu, float reg)
{
/* compute idx and idy, the location of the element in the original NxN array */
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int idy = blockIdx.y*blockDim.y+threadIdx.y;
if ( idx < N && idy < N) {
int index = idx + idy*N;
denominator_dev[index].x = ((R2*delta)*((freq_vector_dev[idx]*freq_vector_dev[idx]) + (freq_vector_dev[idy]*freq_vector_dev[idy]))/Mag) + mu + reg;
denominator_dev[index].y = 0;
}
}
| 376b72bef5de3f2cb2b3c6b1532bafdfa5be4d7a.cu | // main.cpp
// original author: Diivanand Ramalingam
// original institution: Computational Optical Imaging Lab at UC Berkeley (Prof. Laura Waller's Lab)
// forked author: Michael tang
#include <ctime>
#include <cstdio>
#include <cstdlib>
#include <string>
#include <iostream>
#include "tiff_io-win.h"
#include "toolbox.h"
#include "pointwise_matrix_ops.h"
#include "fourier_tools.h"
//This constant may already be in <cmath> so I should switch the using those in the code but for now I'm using these
#define PI 3.1415926535897932384626433832795028841971693993751058209749445923078164062862089986280348253421170679
//Not all GPUS can handle large block sizes which is why it's only 16 for now
#define BLOCKSIZEX 32//should increase this to 32,64,128, etc. to see potentially better performance gains!
#define BLOCKSIZEY 32 //should increase this to 32,64,128, etc. to see potentially better performance gains!
//Global time variable
double total_time_elapsed = 0.0;
//device function declaration
__global__ void xrayTIEHelperKernel(cufftComplex *denominator_dev, float *freq_vector_dev, int N, float R2, float delta, float Mag, float mu, float reg);
//host function declarations
void stateArguments(float IinVal, float Mag, float R2, float mu, float delta, float ps, float reg, int out_scale);
cudaError_t calculateThickness(float *output, float *image, int height, int width, float IinVal, float Mag, float R2, float mu, float delta, float ps, float reg, int out_scale);
/* Height is the number of rows (x) and Width is the number of columns (y)*/
int main(int argc, char **argv)
{
if(argc != 14){
printf("Incorrect number of arguments. Usage: ./tie input_folder output_folder prefix start_num end_num Iin Mag R2 mu delta ps reg scale\n");
quitProgramPrompt(0);
return 1;
}else {
char *srcFolder = argv[1];
char *destFolder = argv[2];
char *prefix = argv[3];
int start = atoi(argv[4]);
int end = atoi(argv[5]);
int numFiles = end - start + 1;
char **filenames = getFilenames(srcFolder, prefix, start, end);
char **outfilenames = getFilenames(destFolder, prefix, start, end);
TIFFSetWarningHandler(NULL);
//IinVal, Mag, R2, mu, delta, ps
float IinVal = atof(argv[6]);
float Mag = atof(argv[7]);
//Mag = 1.0; //Right now algorithm doesn't work for Mag other than 1.0, so for now Mag argument isn't supported.
float R2 = atof(argv[8]);
float mu = atof(argv[9]);
float delta = atof(argv[10]);
float ps = atof(argv[11]);
float reg = atof(argv[12]);
int out_scale = atoi(argv[13]);
stateArguments(IinVal, Mag, R2, mu, delta, ps, reg, out_scale);
TiffIO* tiff_io = new TiffIO();
int width;
int height;
//printf("Processing Input Files: \n");
for(int i = 0;i < numFiles; i++) {
printf("reading: %s\n", filenames[i]);
float *image;
//read iamge
image = tiff_io->readFloatImage(filenames[i], &width, &height);
if(!image){
printf("Error reading image\n");
}else {
//convert image to 1D for CUDA processing
//float *image1D = toFloatArray(imagerow, width, height);
int device, num_devices;
cudaGetDeviceCount(&num_devices);
if (num_devices > 1) {
int max_ThreadsPerMultiProcessor = 0, max_device = 0;
for (device = 0; device < num_devices; device++) {
cudaDeviceProp properties;
cudaGetDeviceProperties(&properties, device);
if (max_ThreadsPerMultiProcessor < properties.maxThreadsPerMultiProcessor) {
max_ThreadsPerMultiProcessor = properties.maxThreadsPerMultiProcessor;
max_device = device;
}
}
cudaSetDevice(max_device);
}
float *image_dev = 0;
float *output = 0;
printf("\nProcessing file %s\n", filenames[i]);
//Process Image
//Allocate space on GPU and then transfer the input image to the GPU
cudaError_t cudaStatus;
cudaStatus = cudaMalloc((void**)&image_dev, sizeof(float) * height * width);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc for image_dev failed! Error Code: %d", cudaStatus);
}else {
cudaStatus = cudaMemcpy(image_dev, image, sizeof(float) * width * height, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy for image_dev failed! Error Code: %d", cudaStatus);
}else{
output = (float *) malloc(sizeof(float) * width * height);
calculateThickness(output, image_dev, height, width, IinVal, Mag, R2, mu, delta, ps, reg, out_scale);
}
}
//End Processing of Image
//convert 1D output to 2D for outputting
float *image1DOut = 0;
float **imageOut = 0;
if(output){
//printMatrix1D("output", output, height, width);
imageOut = toFloat2D(output, width, height);
}else {
fprintf(stderr, "\n Output is NULL so using original image as output");
imageOut = toFloat2D(image, width, height);
}
//output image
printf("\nFile Processed. Outputting to %s\n", outfilenames[i]);
tiff_io->writeFloatImage(imageOut, outfilenames[i], width, height);
//free memory
//free(image1D);
free(output);
free(imageOut);
cudaFree(image_dev);
free(image);
}
}
delete tiff_io;
printf("\nTotal time to process %d images: %f seconds\n", numFiles,total_time_elapsed);
quitProgramPrompt(true);
return 0;
}
}
void stateArguments(float IinVal, float Mag, float R2, float mu, float delta, float ps, float reg, int out_scale)
{
std::cout << "Input Argument Values:" << std::endl;
std::cout << "IinVal: " << IinVal << std::endl;
std::cout << "Mag: " << Mag << std::endl;
std::cout << "R2: " << R2 << " mm" << std::endl;
std::cout << "mu: " << mu << " mm^-1" << std::endl;
std::cout << "delta: " << delta << std::endl;
std::cout << "ps: " << ps << " mm" << std::endl;
std::cout << "reg: " << reg << std::endl;
std::cout << "scale: " << out_scale << std::endl;
}
/*
Calculates thickness according to Paganin Phase paper algorithm: http://www.ncbi.nlm.nih.gov/pubmed/12000561
*/
cudaError_t calculateThickness(float* output, float *image_dev, int height, int width, float IinVal, float Mag, float R2, float mu, float delta, float ps, float reg, int out_scale)
{
cudaError_t cudaStatus = cudaGetLastError();
cufftResult cufftStatus;
cufftHandle plan = 0;
if(height != width){
fprintf(stderr, "Only works on square matrices whose dimension is a power of two!\n", cudaStatus);
goto thickness_end;
}
//declare and initialize variables used when calling CUDA kernels
int size = height * width;
int block_size_x = BLOCKSIZEX;
int block_size_y = BLOCKSIZEY;
dim3 dimBlock(block_size_x, block_size_y);
dim3 dimGrid (height/dimBlock.x, width/dimBlock.y);
int N = width;
//Handle N not multiple of block_size_x or block_size_y but this shouldn't be the case since N power of 2
//And blocksize should always be a power of 2 for both correctness and efficiency
if (height % block_size_x !=0 ) dimGrid.x+=1;
if (width % block_size_y !=0 ) dimGrid.y+=1;
std::cout << "Calculating Thickness..." << std::endl;
std::clock_t begin = std::clock();
//Code begins here
//declare device pointers
float *int_seq_dev = 0;
float *freq_vector_dev=0;
//float *image_dev = 0;
float *output_dev = 0;
cufftComplex *image_complex_dev = 0;
cufftComplex *fft_output_dev = 0;
cufftComplex *fft_shifted_output_dev = 0;
cufftComplex *ifft_shifted_input_dev = 0; //inverse
cufftComplex *ifft_output_dev = 0; //inverse
cufftComplex *denominator_dev = 0;
//Start memory allocation of device vectors and convert/copy input image to complex device vector
//Allocate memory for 9 device vectors (potential speedup to be gained by reducing the number of device vectors used)
cudaStatus = cudaMalloc((void**)&int_seq_dev, N * sizeof(float));
cudaMemset(int_seq_dev, 0, N*sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc for int_seq_dev failed! Error Code: %d", cudaStatus);
goto thickness_end;
}
cudaStatus = cudaMalloc((void**)&freq_vector_dev, N * sizeof(float));
cudaMemset(freq_vector_dev, 0, N*sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc for freq_vector_dev failed! Error Code: %d", cudaStatus);
goto thickness_end;
}
cudaStatus = cudaMalloc((void**)&output_dev, size * sizeof(int));
cudaMemset(output_dev, 0,sizeof(float)*size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc for output_dev failed! Error Code: %d", cudaStatus);
goto thickness_end;
}
cudaStatus = cudaMalloc((void**)&image_complex_dev, size * sizeof(cufftComplex));
cudaMemset(image_complex_dev, 0,sizeof(float)*size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc for image_complex_dev failed! Error Code: %d", cudaStatus);
goto thickness_end;
}
cudaStatus = cudaMalloc((void**)&fft_output_dev, size * sizeof(cufftComplex));
cudaMemset(fft_output_dev, 0,sizeof(float)*size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc for fft_output_dev failed! Error Code: %d", cudaStatus);
goto thickness_end;
}
cudaStatus = cudaMalloc((void**)&fft_shifted_output_dev, size * sizeof(cufftComplex));
cudaMemset(fft_shifted_output_dev, 0,sizeof(float)*size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc for fft_shifted_output_dev failed! Error Code: %d", cudaStatus);
goto thickness_end;
}
cudaStatus = cudaMalloc((void**)&ifft_shifted_input_dev, size * sizeof(cufftComplex));
cudaMemset(ifft_shifted_input_dev, 0,sizeof(float)*size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc for ifft_shifted_input_dev failed! Error Code: %d", cudaStatus);
goto thickness_end;
}
cudaStatus = cudaMalloc((void**)&ifft_output_dev, size * sizeof(cufftComplex));
cudaMemset(ifft_output_dev, 0,sizeof(float)*size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc for ifft_output_dev failed! Error Code: %d", cudaStatus);
goto thickness_end;
}
cudaStatus = cudaMalloc((void**)&denominator_dev, size * sizeof(cufftComplex));
cudaMemset(denominator_dev, 0,sizeof(float)*size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc for denominator_dev failed! Error Code: %d", cudaStatus);
goto thickness_end;
}
//FINISH ALLOCATION//
//scale by magnification
//printDeviceMatrixValues("img_dev", image_dev,N,N);
pointwiseRealScaleRealMatrix<<<dimGrid, dimBlock>>>(image_dev, image_dev, Mag*Mag, N, N);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching pointwiseRealScaleMatrix!\n", cudaStatus);
goto thickness_end;
}
//printDeviceMatrixValues("img_dev_scaled", image_dev,N,N);
//convert input image real device vector to complex device vector
real2complex<<<dimGrid,dimBlock>>>(image_dev, image_complex_dev, N, N);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching real2complex!\n", cudaStatus);
goto thickness_end;
}
//End memory allocation of device vectors and convert/copy input image to complex device vector
//printDeviceComplexMatrixValues("img_complex_dev", image_complex_dev,N,N);
//Start creation of frequency axis
//generate integer sequence used in creating frequency axis
genIntSequence<<<N,1>>>(int_seq_dev, 0, N-1);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching genIntSequence!\n", cudaStatus);
goto thickness_end;
}
//printDeviceMatrixValues("int_seq_dev", int_seq_dev,1,N);
//create omega axis
pointwiseRealScaleRealMatrix<<<N,1>>>(freq_vector_dev, int_seq_dev, 2*PI/N, N, 1);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching pointwiseRealScaleRealMatrix!\n", cudaStatus);
goto thickness_end;
}
//printDeviceMatrixValues("omega_freq_vector_dev", freq_vector_dev,1,N);
//Shift zero to center - for even case, pull back by pi, note N is even by our assumption of powers of 2
pointwiseAddRealConstantToRealMatrix<<<N,1>>>(freq_vector_dev, freq_vector_dev, -PI, N, 1);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addReadConstantToRealMatrix!\n", cudaStatus);
goto thickness_end;
}
//convert to cyclical frequencies (hertz) and scale by pixel size
pointwiseRealScaleRealMatrix<<<N,1>>>(freq_vector_dev, freq_vector_dev, 1/(2*PI), N, 1);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching pointwiseRealScaleRealMatrix!\n", cudaStatus);
goto thickness_end;
}
pointwiseRealScaleRealMatrix<<<N,1>>>(freq_vector_dev, freq_vector_dev, 1/ps, N, 1);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching pointwiseRealScaleRealMatrix!\n", cudaStatus);
goto thickness_end;
}
//printDeviceMatrixValues("freq_vector_dev after Pi shift to center", freq_vector_dev,1,N);
//End creation of frequency axis
//Fourier Transform image and scale according to Paginin phase algorithm
cufftPlan2d(&plan, N, N, CUFFT_C2C);
cufftStatus = cufftExecC2C(plan, image_complex_dev, fft_output_dev, CUFFT_FORWARD);
if(cufftStatus != CUFFT_SUCCESS){
fprintf(stderr, "cufftExecC2C returned error code %d after attempting 2D fft!\n", cufftStatus);
goto thickness_end;
}
//fft shift the spectrum of this signal
fftShift2D<<<dimGrid,dimBlock>>>(fft_shifted_output_dev, fft_output_dev, width);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching fftShift2D!\n", cudaStatus);
goto thickness_end;
}
//printDeviceComplexMatrixValues("fft_shifted_output_dev", fft_shifted_output_dev, N,N);
pointwiseRealScaleComplexMatrix<<<dimGrid,dimBlock>>>(fft_shifted_output_dev, fft_shifted_output_dev, mu/IinVal, N, N);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after pointwiseRealScaleComplexMatrix!\n", cudaStatus);
goto thickness_end;
}
//printDeviceComplexMatrixValues("fft_shifted_output_dev scaled", fft_shifted_output_dev, N,N);
//End Fourier Transform and scaling
//Create the denominator shown in the Paganin phase algorithm
xrayTIEHelperKernel<<<dimGrid, dimBlock>>>(denominator_dev, freq_vector_dev, N, R2, delta, Mag, mu, reg);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching xrayTIEHelperKernel!\n", cudaStatus);
goto thickness_end;
}
//printDeviceComplexMatrixValues("denominator_dev", denominator_dev, N,N);
//End creation of denominator
//pointwise divide, ifft, pointwise log, and inverse-mu-scaling as shown in Paganin phase algorithm
//pointwise divide
pointwiseDivideComplexMatrices<<<dimGrid, dimBlock>>>(fft_shifted_output_dev, fft_shifted_output_dev, denominator_dev, N, N);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching xrayTIEHelperKernel!\n", cudaStatus);
goto thickness_end;
}
//printDeviceComplexMatrixValues("inv_term (as mentioned in matlab algorithm)", fft_shifted_output_dev, N,N);
//ifftshift
fftShift2D<<<dimGrid, dimBlock>>>(ifft_shifted_input_dev, fft_shifted_output_dev, N);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching fftShift2D!\n", cudaStatus);
goto thickness_end;
}
//ifft
cufftStatus = cufftExecC2C(plan, ifft_shifted_input_dev, ifft_output_dev, CUFFT_INVERSE);
if(cufftStatus != CUFFT_SUCCESS){
fprintf(stderr, "cufftExecC2C returned error code %d after attempting 2D fft!\n", cufftStatus);
goto thickness_end;
}
//normalized and convert to real device vector
float scale = 1.f / ( (float) height * (float) width );
//convert complex to real
complex2real_scaled<<<dimGrid, dimBlock>>>(ifft_output_dev, output_dev, N, N, scale);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching complex2real_scaled!\n", cudaStatus);
goto thickness_end;
}
//take pointwise log and scale to obtain projected thickness!
//pointwise natural log
pointwiseNaturalLogRealMatrix<<<dimGrid, dimBlock>>>(output_dev, output_dev, N, N);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching complex2real_scaled!\n", cudaStatus);
goto thickness_end;
}
//printDeviceMatrixValues("logOutput (like in matlab algorithm)", output_dev, N,N);
//pointwise real scale
pointwiseRealScaleRealMatrix<<<dimGrid, dimBlock>>>(output_dev, output_dev, -(out_scale/mu), N, N);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching pointwiseRealScaleRealMatrix!\n", cudaStatus);
goto thickness_end;
}
//printDeviceMatrixValues("output", output_dev, N,N);
//Transfer output device vector to our host output vector and we are done!
cudaStatus = cudaMemcpy(output, output_dev, size * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy for output_dev failed!");
goto thickness_end;
}
//destroy cufft plan and free memory allocated on device (FYI device is another name for GPU, host is CPU)
thickness_end:
cudaFree(int_seq_dev);
cudaFree(freq_vector_dev);
cudaFree(output_dev);
cudaFree(image_complex_dev);
cudaFree(fft_output_dev);
cudaFree(fft_shifted_output_dev);
cudaFree(ifft_shifted_input_dev);
cudaFree(ifft_output_dev);
cudaFree(denominator_dev);
if(plan)
cufftDestroy(plan);
//Code ends here
std::clock_t end = std::clock();
double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
std::cout << "\nDone. Took " << elapsed_secs << " seconds" << std::endl;
total_time_elapsed += elapsed_secs;
return cudaStatus;
}
//computes the denominators seen in Paganin phase algorithm paper
__global__
void xrayTIEHelperKernel(cufftComplex *denominator_dev, float *freq_vector_dev, int N, float R2, float delta, float Mag, float mu, float reg)
{
/* compute idx and idy, the location of the element in the original NxN array */
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int idy = blockIdx.y*blockDim.y+threadIdx.y;
if ( idx < N && idy < N) {
int index = idx + idy*N;
denominator_dev[index].x = ((R2*delta)*((freq_vector_dev[idx]*freq_vector_dev[idx]) + (freq_vector_dev[idy]*freq_vector_dev[idy]))/Mag) + mu + reg;
denominator_dev[index].y = 0;
}
}
|
4675adde9a258088a187ae9f5ff7a831dd81423c.hip | // !!! This is a file automatically generated by hipify!!!
#include "test_help.cuh"
#include "../DeviceBuffer.h"
#include <hip/hip_runtime.h>
//__global__
int main(void)
{
#if 1
_CrtSetDbgFlag (_CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF);
_CrtSetReportMode(_CRT_ERROR, _CRTDBG_MODE_FILE);
_CrtSetReportFile(_CRT_ERROR, _CRTDBG_FILE_STDERR);
#endif
//create nutty
nutty::Init();
size_t freeMemory, totalMemory;
hipMemGetInfo(&freeMemory, &totalMemory);
printf("%u %u\n", totalMemory, freeMemory);
nutty::DeviceBuffer<char> memory0(1024 * 1024 * 2);
nutty::DeviceBuffer<char> memory1(1024 * 1024 * 1024);
nutty::DeviceBuffer<char> memory2(1024 * 1024 * 1024);
nutty::Fill(memory0.Begin(), memory0.End(), (char)0);
nutty::Fill(memory1.Begin(), memory1.End(), (char)0);
//release nutty
nutty::Release();
return 0;
} | 4675adde9a258088a187ae9f5ff7a831dd81423c.cu | #include "test_help.cuh"
#include "../DeviceBuffer.h"
#include <cuda_runtime.h>
//__global__
int main(void)
{
#if 1
_CrtSetDbgFlag (_CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF);
_CrtSetReportMode(_CRT_ERROR, _CRTDBG_MODE_FILE);
_CrtSetReportFile(_CRT_ERROR, _CRTDBG_FILE_STDERR);
#endif
//create nutty
nutty::Init();
size_t freeMemory, totalMemory;
cudaMemGetInfo(&freeMemory, &totalMemory);
printf("%u %u\n", totalMemory, freeMemory);
nutty::DeviceBuffer<char> memory0(1024 * 1024 * 2);
nutty::DeviceBuffer<char> memory1(1024 * 1024 * 1024);
nutty::DeviceBuffer<char> memory2(1024 * 1024 * 1024);
nutty::Fill(memory0.Begin(), memory0.End(), (char)0);
nutty::Fill(memory1.Begin(), memory1.End(), (char)0);
//release nutty
nutty::Release();
return 0;
} |
aa21a92c7c08fc5aad151194595d075cd8c3478d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "reduce_max_filter_finalf.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *g_idata = NULL;
hipMalloc(&g_idata, XSIZE*YSIZE);
float *g_odata = NULL;
hipMalloc(&g_odata, XSIZE*YSIZE);
int *max_idx = NULL;
hipMalloc(&max_idx, XSIZE*YSIZE);
unsigned int n = 1;
unsigned int width = 1;
int blockSize = XSIZE*YSIZE;
int *maxes = NULL;
hipMalloc(&maxes, XSIZE*YSIZE);
int nMax = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
reduce_max_filter_finalf), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,max_idx,n,width,blockSize,maxes,nMax);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
reduce_max_filter_finalf), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,max_idx,n,width,blockSize,maxes,nMax);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
reduce_max_filter_finalf), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,max_idx,n,width,blockSize,maxes,nMax);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | aa21a92c7c08fc5aad151194595d075cd8c3478d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "reduce_max_filter_finalf.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *g_idata = NULL;
cudaMalloc(&g_idata, XSIZE*YSIZE);
float *g_odata = NULL;
cudaMalloc(&g_odata, XSIZE*YSIZE);
int *max_idx = NULL;
cudaMalloc(&max_idx, XSIZE*YSIZE);
unsigned int n = 1;
unsigned int width = 1;
int blockSize = XSIZE*YSIZE;
int *maxes = NULL;
cudaMalloc(&maxes, XSIZE*YSIZE);
int nMax = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
reduce_max_filter_finalf<<<gridBlock,threadBlock>>>(g_idata,g_odata,max_idx,n,width,blockSize,maxes,nMax);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
reduce_max_filter_finalf<<<gridBlock,threadBlock>>>(g_idata,g_odata,max_idx,n,width,blockSize,maxes,nMax);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
reduce_max_filter_finalf<<<gridBlock,threadBlock>>>(g_idata,g_odata,max_idx,n,width,blockSize,maxes,nMax);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
ee3eee6626365993eb065cae6c41f71dcaa67f67.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/ceil_div.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/native/hip/UpSample.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/upsample_bicubic2d_native.h>
#include <ATen/ops/upsample_bicubic2d_backward_native.h>
#endif
namespace at::native {
namespace {
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_bicubic2d_out_frame(
const int num_elements,
const accscalar_t height_scale,
const accscalar_t width_scale,
const bool align_corners,
const PackedTensorAccessor64<scalar_t, 4> idata,
PackedTensorAccessor64<scalar_t, 4> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int input_height = idata.size(2);
const int input_width = idata.size(3);
const int output_height = odata.size(2);
const int output_width = odata.size(3);
if (index >= num_elements) {
return;
}
// Special case: input and output are the same size, just copy
const int output_x = index % output_width;
const int output_y = index / output_width;
if (input_height == output_height && input_width == output_width) {
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; c++) {
const scalar_t val = idata[n][c][output_y][output_x];
odata[n][c][output_y][output_x] = val;
}
}
return;
}
// Interpolation kernel
accscalar_t real_x = area_pixel_compute_source_index(
width_scale, output_x, align_corners, /*cubic=*/true);
int in_x = floorf(real_x);
accscalar_t t_x = real_x - in_x;
accscalar_t real_y = area_pixel_compute_source_index(
height_scale, output_y, align_corners, /*cubic=*/true);
int in_y = floorf(real_y);
accscalar_t t_y = real_y - in_y;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; c++) {
accscalar_t coefficients[4];
for (int k = 0; k < 4; k++) {
coefficients[k] = cubic_interp1d(
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x - 1),
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 0),
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 1),
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 2),
t_x);
}
odata[n][c][output_y][output_x] = static_cast<scalar_t>(cubic_interp1d(
coefficients[0],
coefficients[1],
coefficients[2],
coefficients[3],
t_y));
}
}
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_bicubic2d_backward_out_frame(
const int num_elements,
const accscalar_t height_scale,
const accscalar_t width_scale,
const bool align_corners,
PackedTensorAccessor64<scalar_t, 4> idata,
const PackedTensorAccessor64<scalar_t, 4> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int input_height = idata.size(2);
const int input_width = idata.size(3);
const int output_height = odata.size(2);
const int output_width = odata.size(3);
if (index >= num_elements) {
return;
}
const int output_x = index % output_width;
const int output_y = index / output_width;
// special case: output_xust copy
if (input_height == output_height && input_width == output_width) {
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = odata[n][c][output_y][output_x];
idata[n][c][output_y][output_x] = val;
}
}
return;
}
accscalar_t real_x = area_pixel_compute_source_index(
width_scale, output_x, align_corners, /*cubic=*/true);
int input_x = floorf(real_x);
accscalar_t t_x = real_x - input_x;
accscalar_t real_y = area_pixel_compute_source_index(
height_scale, output_y, align_corners, /*cubic=*/true);
int input_y = floorf(real_y);
accscalar_t t_y = real_y - input_y;
accscalar_t x_coeffs[4];
accscalar_t y_coeffs[4];
get_cubic_upsampling_coefficients(x_coeffs, t_x);
get_cubic_upsampling_coefficients(y_coeffs, t_y);
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
scalar_t out_value = odata[n][c][output_y][output_x];
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
upsample_increment_value_bounded<scalar_t, accscalar_t>(
idata,
n,
c,
input_height,
input_width,
input_y - 1 + i,
input_x - 1 + j,
out_value * y_coeffs[i] * x_coeffs[j]);
}
}
}
}
}
static void upsample_bicubic2d_out_cuda_template(
const Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
checkAllSameGPU(__func__, {input_arg, output_arg});
int output_height = output_size[0];
int output_width = output_size[1];
int input_height = input.size(2);
int input_width = input.size(3);
output.zero_();
const int num_output_elements = output_height * output_width;
const int max_threads = ::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
// Launch kernel
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16,
input.scalar_type(), "upsample_bicubic2d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.packed_accessor64<scalar_t, 4>();
auto odata = output.packed_accessor64<scalar_t, 4>();
// Get scaling factors
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
hipLaunchKernelGGL(( upsample_bicubic2d_out_frame<scalar_t, accscalar_t>)
, dim3(ceil_div(num_output_elements, max_threads)),
dim3(max_threads),
0,
stream,
num_output_elements,
rheight,
rwidth,
align_corners,
idata,
odata);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
static void upsample_bicubic2d_backward_out_cuda_template(
const Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2};
checkAllSameGPU(__func__, {grad_output_arg, grad_input_arg});
int output_height = output_size[0];
int output_width = output_size[1];
int input_height = input_size[2];
int input_width = input_size[3];
Tensor grad_output = grad_output_.contiguous();
grad_input.zero_();
const int num_kernels = output_height * output_width;
const int num_threads = ::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16,
grad_output.scalar_type(), "upsample_bicubic2d_backward_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = grad_input.packed_accessor64<scalar_t, 4>();
auto odata = grad_output.packed_accessor64<scalar_t, 4>();
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
hipLaunchKernelGGL(( upsample_bicubic2d_backward_out_frame<scalar_t, accscalar_t>)
, dim3(ceil_div(num_kernels, num_threads)),
dim3(num_threads),
0,
stream,
num_kernels, rheight, rwidth, align_corners, idata, odata);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
} // namespace
TORCH_IMPL_FUNC(upsample_bicubic2d_out_cuda) (
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w,
const Tensor& output) {
upsample_bicubic2d_out_cuda_template(output, input, output_size, align_corners, scales_h, scales_w);
}
TORCH_IMPL_FUNC(upsample_bicubic2d_backward_out_cuda) (
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w,
const Tensor& grad_input) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("upsample_bicubic2d_backward_out_cuda");
upsample_bicubic2d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
}
} // namespace at::native
| ee3eee6626365993eb065cae6c41f71dcaa67f67.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/ceil_div.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/cuda/UpSample.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/upsample_bicubic2d_native.h>
#include <ATen/ops/upsample_bicubic2d_backward_native.h>
#endif
namespace at::native {
namespace {
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_bicubic2d_out_frame(
const int num_elements,
const accscalar_t height_scale,
const accscalar_t width_scale,
const bool align_corners,
const PackedTensorAccessor64<scalar_t, 4> idata,
PackedTensorAccessor64<scalar_t, 4> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int input_height = idata.size(2);
const int input_width = idata.size(3);
const int output_height = odata.size(2);
const int output_width = odata.size(3);
if (index >= num_elements) {
return;
}
// Special case: input and output are the same size, just copy
const int output_x = index % output_width;
const int output_y = index / output_width;
if (input_height == output_height && input_width == output_width) {
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; c++) {
const scalar_t val = idata[n][c][output_y][output_x];
odata[n][c][output_y][output_x] = val;
}
}
return;
}
// Interpolation kernel
accscalar_t real_x = area_pixel_compute_source_index(
width_scale, output_x, align_corners, /*cubic=*/true);
int in_x = floorf(real_x);
accscalar_t t_x = real_x - in_x;
accscalar_t real_y = area_pixel_compute_source_index(
height_scale, output_y, align_corners, /*cubic=*/true);
int in_y = floorf(real_y);
accscalar_t t_y = real_y - in_y;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; c++) {
accscalar_t coefficients[4];
for (int k = 0; k < 4; k++) {
coefficients[k] = cubic_interp1d(
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x - 1),
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 0),
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 1),
upsample_get_value_bounded<scalar_t>(
idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 2),
t_x);
}
odata[n][c][output_y][output_x] = static_cast<scalar_t>(cubic_interp1d(
coefficients[0],
coefficients[1],
coefficients[2],
coefficients[3],
t_y));
}
}
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_bicubic2d_backward_out_frame(
const int num_elements,
const accscalar_t height_scale,
const accscalar_t width_scale,
const bool align_corners,
PackedTensorAccessor64<scalar_t, 4> idata,
const PackedTensorAccessor64<scalar_t, 4> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int input_height = idata.size(2);
const int input_width = idata.size(3);
const int output_height = odata.size(2);
const int output_width = odata.size(3);
if (index >= num_elements) {
return;
}
const int output_x = index % output_width;
const int output_y = index / output_width;
// special case: output_xust copy
if (input_height == output_height && input_width == output_width) {
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = odata[n][c][output_y][output_x];
idata[n][c][output_y][output_x] = val;
}
}
return;
}
accscalar_t real_x = area_pixel_compute_source_index(
width_scale, output_x, align_corners, /*cubic=*/true);
int input_x = floorf(real_x);
accscalar_t t_x = real_x - input_x;
accscalar_t real_y = area_pixel_compute_source_index(
height_scale, output_y, align_corners, /*cubic=*/true);
int input_y = floorf(real_y);
accscalar_t t_y = real_y - input_y;
accscalar_t x_coeffs[4];
accscalar_t y_coeffs[4];
get_cubic_upsampling_coefficients(x_coeffs, t_x);
get_cubic_upsampling_coefficients(y_coeffs, t_y);
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
scalar_t out_value = odata[n][c][output_y][output_x];
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
upsample_increment_value_bounded<scalar_t, accscalar_t>(
idata,
n,
c,
input_height,
input_width,
input_y - 1 + i,
input_x - 1 + j,
out_value * y_coeffs[i] * x_coeffs[j]);
}
}
}
}
}
static void upsample_bicubic2d_out_cuda_template(
const Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
checkAllSameGPU(__func__, {input_arg, output_arg});
int output_height = output_size[0];
int output_width = output_size[1];
int input_height = input.size(2);
int input_width = input.size(3);
output.zero_();
const int num_output_elements = output_height * output_width;
const int max_threads = std::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
// Launch kernel
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16,
input.scalar_type(), "upsample_bicubic2d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.packed_accessor64<scalar_t, 4>();
auto odata = output.packed_accessor64<scalar_t, 4>();
// Get scaling factors
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
upsample_bicubic2d_out_frame<scalar_t, accscalar_t>
<<<ceil_div(num_output_elements, max_threads),
max_threads,
0,
stream>>>(
num_output_elements,
rheight,
rwidth,
align_corners,
idata,
odata);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
static void upsample_bicubic2d_backward_out_cuda_template(
const Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2};
checkAllSameGPU(__func__, {grad_output_arg, grad_input_arg});
int output_height = output_size[0];
int output_width = output_size[1];
int input_height = input_size[2];
int input_width = input_size[3];
Tensor grad_output = grad_output_.contiguous();
grad_input.zero_();
const int num_kernels = output_height * output_width;
const int num_threads = std::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16,
grad_output.scalar_type(), "upsample_bicubic2d_backward_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = grad_input.packed_accessor64<scalar_t, 4>();
auto odata = grad_output.packed_accessor64<scalar_t, 4>();
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
upsample_bicubic2d_backward_out_frame<scalar_t, accscalar_t>
<<<ceil_div(num_kernels, num_threads),
num_threads,
0,
stream>>>(
num_kernels, rheight, rwidth, align_corners, idata, odata);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
} // namespace
TORCH_IMPL_FUNC(upsample_bicubic2d_out_cuda) (
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w,
const Tensor& output) {
upsample_bicubic2d_out_cuda_template(output, input, output_size, align_corners, scales_h, scales_w);
}
TORCH_IMPL_FUNC(upsample_bicubic2d_backward_out_cuda) (
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w,
const Tensor& grad_input) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("upsample_bicubic2d_backward_out_cuda");
upsample_bicubic2d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
}
} // namespace at::native
|
340019207572b6a5137acc7909ee3c56b2e3f9e2.hip | // !!! This is a file automatically generated by hipify!!!
/**
* \file: multiplyOut.cu
* \brief: Multiply out in parallel with cublas.
* \author: Hugh Delaney
* \version:
* \date: 2021-09-16
*/
#include "cu_multiplyOut.h"
#include "multiplyOut.h"
#include "helpers.h"
#include <iomanip>
template <typename T>
void cu_multOut(lanczosDecomp<T> &L, eigenDecomp<T> &E, adjMatrix &A)
{
auto n{L.get_n()}, k{L.get_krylov()};
// Applying function
for (auto j = 0u; j < L.krylov_dim; j++)
my_exp_func(E.eigenvalues[j]);
// Elementwise multiplying of f(lambda) by first row of eigenvectors
for (auto j = 0u; j < L.krylov_dim; j++)
E.eigenvalues[j] *= L.x_norm * E.eigenvectors[j];
//print_matrix(3, 1, &E.eigenvalues[0]);
T *eigvals_d, *ans_d, alpha {1.0}, beta {0.0};
cblas_dgemv(CblasRowMajor, CblasNoTrans, L.krylov_dim, L.krylov_dim, 1, &E.eigenvectors[0], k, &E.eigenvalues[0], 1, 0, &L.ans[0],1);
hipblasStatus_t status;
hipError_t cudaStat;
hipblasHandle_t handle;
hipStream_t stream[2];
hipStreamCreate(&stream[0]);
hipStreamCreate(&stream[1]);
cudaStat = hipMalloc(&eigvals_d, sizeof(T) * k);
if (cudaStat != hipSuccess) {
std::cerr << "Allocation error for eigvals_d.\n";
return;
}
cudaStat = hipMalloc(&ans_d, sizeof(T) * n);
if (cudaStat != hipSuccess) {
std::cerr << "Allocation error for ans_d.\n";
return;
}
status = hipblasCreate(&handle);
if (status != HIPBLAS_STATUS_SUCCESS) {
std::cerr << "Cublas initialization error.\n";
return;
}
// Memory transfers
status = hipblasSetVectorAsync(k, sizeof(T),&L.ans[0], 1, eigvals_d,1, stream[0]);
if (status != HIPBLAS_STATUS_SUCCESS) {
std::cerr << "Device access error.\n";
return;
}
// DGEMV
status = hipblasDgemv(handle,HIPBLAS_OP_N,
n,k,
&alpha,
L.Q_d,n,
eigvals_d,1,
&beta,ans_d,1);
if (status != HIPBLAS_STATUS_SUCCESS) {
std::cerr << "Dgemv error.\n";
return;
}
//printf("\nSome values from ans_d: ");
//print_some<<<1,1>>>(ans_d);
status = hipblasGetVector(n, sizeof(T),ans_d, 1,&L.ans[0],1);
if (status != HIPBLAS_STATUS_SUCCESS) {
std::cerr << "Error transferring from device to host.\n";
return;
}
hipStreamDestroy(stream[0]);
hipStreamDestroy(stream[1]);
hipblasDestroy(handle);
hipFree(L.Q_d);
hipFree(eigvals_d);
hipFree(ans_d);
}
template void cu_multOut(lanczosDecomp<float> &L, eigenDecomp<float> &E, adjMatrix &A);
template void cu_multOut(lanczosDecomp<double> &L, eigenDecomp<double> &E, adjMatrix &A);
| 340019207572b6a5137acc7909ee3c56b2e3f9e2.cu | /**
* \file: multiplyOut.cu
* \brief: Multiply out in parallel with cublas.
* \author: Hugh Delaney
* \version:
* \date: 2021-09-16
*/
#include "cu_multiplyOut.h"
#include "multiplyOut.h"
#include "helpers.h"
#include <iomanip>
template <typename T>
void cu_multOut(lanczosDecomp<T> &L, eigenDecomp<T> &E, adjMatrix &A)
{
auto n{L.get_n()}, k{L.get_krylov()};
// Applying function
for (auto j = 0u; j < L.krylov_dim; j++)
my_exp_func(E.eigenvalues[j]);
// Elementwise multiplying of f(lambda) by first row of eigenvectors
for (auto j = 0u; j < L.krylov_dim; j++)
E.eigenvalues[j] *= L.x_norm * E.eigenvectors[j];
//print_matrix(3, 1, &E.eigenvalues[0]);
T *eigvals_d, *ans_d, alpha {1.0}, beta {0.0};
cblas_dgemv(CblasRowMajor, CblasNoTrans, L.krylov_dim, L.krylov_dim, 1, &E.eigenvectors[0], k, &E.eigenvalues[0], 1, 0, &L.ans[0],1);
cublasStatus_t status;
cudaError_t cudaStat;
cublasHandle_t handle;
cudaStream_t stream[2];
cudaStreamCreate(&stream[0]);
cudaStreamCreate(&stream[1]);
cudaStat = cudaMalloc(&eigvals_d, sizeof(T) * k);
if (cudaStat != cudaSuccess) {
std::cerr << "Allocation error for eigvals_d.\n";
return;
}
cudaStat = cudaMalloc(&ans_d, sizeof(T) * n);
if (cudaStat != cudaSuccess) {
std::cerr << "Allocation error for ans_d.\n";
return;
}
status = cublasCreate(&handle);
if (status != CUBLAS_STATUS_SUCCESS) {
std::cerr << "Cublas initialization error.\n";
return;
}
// Memory transfers
status = cublasSetVectorAsync(k, sizeof(T),&L.ans[0], 1, eigvals_d,1, stream[0]);
if (status != CUBLAS_STATUS_SUCCESS) {
std::cerr << "Device access error.\n";
return;
}
// DGEMV
status = cublasDgemv_v2(handle,CUBLAS_OP_N,
n,k,
&alpha,
L.Q_d,n,
eigvals_d,1,
&beta,ans_d,1);
if (status != CUBLAS_STATUS_SUCCESS) {
std::cerr << "Dgemv error.\n";
return;
}
//printf("\nSome values from ans_d: ");
//print_some<<<1,1>>>(ans_d);
status = cublasGetVector(n, sizeof(T),ans_d, 1,&L.ans[0],1);
if (status != CUBLAS_STATUS_SUCCESS) {
std::cerr << "Error transferring from device to host.\n";
return;
}
cudaStreamDestroy(stream[0]);
cudaStreamDestroy(stream[1]);
cublasDestroy(handle);
cudaFree(L.Q_d);
cudaFree(eigvals_d);
cudaFree(ans_d);
}
template void cu_multOut(lanczosDecomp<float> &L, eigenDecomp<float> &E, adjMatrix &A);
template void cu_multOut(lanczosDecomp<double> &L, eigenDecomp<double> &E, adjMatrix &A);
|
0674c214204c2a6f246bcbc30df498138534fa46.hip | // !!! This is a file automatically generated by hipify!!!
#include "device_launch_parameters.h"
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
__global__ void toggleCase(char *ipStr, char *opStr)
{
int i = threadIdx.x;
if(ipStr[i] >= 'a' && ipStr[i] <= 'z')
{
opStr[i] = ipStr[i] - 'a' + 'A';
}
else if(ipStr[i] >= 'A' && ipStr[i] <= 'Z')
{
opStr[i] = ipStr[i] - 'A' + 'a';
}
else
{
opStr[i] = ipStr[i];
}
}
int main()
{
hipEvent_t startEvent, stopEvent;
hipEventCreate(&startEvent);
hipEventCreate(&stopEvent);
char *str = (char *) calloc(BUFSIZ, sizeof(char)), *dStr, *dOpStr;
printf("Enter a string\n");
scanf("%[^\n]%*c", str);
int len = strlen(str);
hipEventRecord(startEvent, 0);
hipMalloc(&dStr, sizeof(char) * len);
hipMalloc(&dOpStr, sizeof(char) * len);
hipMemcpy(dStr, str, sizeof(char) * len, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( toggleCase), dim3(1), dim3(len), 0, 0, dStr, dOpStr);
hipMemcpy(str, dOpStr, sizeof(char) * len, hipMemcpyDeviceToHost);
hipEventRecord(stopEvent, 0);
hipEventSynchronize(stopEvent);
float timeElapsed;
hipEventElapsedTime(&timeElapsed, startEvent, stopEvent);
printf("The resultant string: \n");
printf("%s\n", str);
printf("Time taken for CUDA operations %0.5fms\n", timeElapsed);
hipFree(dStr);
hipFree(dOpStr);
}
| 0674c214204c2a6f246bcbc30df498138534fa46.cu | #include "device_launch_parameters.h"
#include "cuda_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
__global__ void toggleCase(char *ipStr, char *opStr)
{
int i = threadIdx.x;
if(ipStr[i] >= 'a' && ipStr[i] <= 'z')
{
opStr[i] = ipStr[i] - 'a' + 'A';
}
else if(ipStr[i] >= 'A' && ipStr[i] <= 'Z')
{
opStr[i] = ipStr[i] - 'A' + 'a';
}
else
{
opStr[i] = ipStr[i];
}
}
int main()
{
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
char *str = (char *) calloc(BUFSIZ, sizeof(char)), *dStr, *dOpStr;
printf("Enter a string\n");
scanf("%[^\n]%*c", str);
int len = strlen(str);
cudaEventRecord(startEvent, 0);
cudaMalloc(&dStr, sizeof(char) * len);
cudaMalloc(&dOpStr, sizeof(char) * len);
cudaMemcpy(dStr, str, sizeof(char) * len, cudaMemcpyHostToDevice);
toggleCase<<<1, len>>>(dStr, dOpStr);
cudaMemcpy(str, dOpStr, sizeof(char) * len, cudaMemcpyDeviceToHost);
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
float timeElapsed;
cudaEventElapsedTime(&timeElapsed, startEvent, stopEvent);
printf("The resultant string: \n");
printf("%s\n", str);
printf("Time taken for CUDA operations %0.5fms\n", timeElapsed);
cudaFree(dStr);
cudaFree(dOpStr);
}
|
15227a3da1d8da771e9cd811ef43db2d13ee2770.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include "maxpool_layer.h"
#include "hip/hip_runtime.h"
}
#define MAX2(a,b) ((a)>(b)?(a):(b))
#define MAX4(a,b,c,d) MAX2(MAX2(a,b),MAX2(c,d))
// custom kernel for stride 2, 2x2 kernel,
__global__ void forward_maxpool_2x2_s2_kernel(int n, int in_h, int in_w, int in_c, int stridex, int stridey, int sizex, int sizey, int padx, int pady, float *input, float *output, int *indexes, int yolotype, int is_leaky, float slope)
{
int h = (in_h + 2*pady - sizey)/stridey + 1;
int w = (in_w + 2*padx - sizex)/stridex + 1;
if (yolotype)
{
h = (in_h + 2*pady)/stridey;
w = (in_w + 2*padx)/stridex;
}
int c = in_c;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -padx;
int h_offset = -pady;
int out_index = j + w*(i + h*(k + c*b));
float *pSrc= input + 2*j + in_w*(2*i+in_h*(k+b*in_c));
float a = MAX4(pSrc[0], pSrc[1], pSrc[in_w], pSrc[in_w+1]);
float *pOut = output + j + w*(i + h*(k + c*b));
if (!is_leaky)
pOut[0] = a;
else
pOut[0] = ((a > 0)?a:(slope*a));
}
__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stridex, int stridey, int sizex, int sizey, int padx, int pady, float *input, float *output, int *indexes, int yolotype, int is_leaky, float slope)
{
int h = (in_h + 2*pady - sizey)/stridey + 1;
int w = (in_w + 2*padx - sizex)/stridex + 1;
if (yolotype)
{
h = (in_h + 2*pady)/stridey;
w = (in_w + 2*padx)/stridex;
}
int c = in_c;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -padx;
int h_offset = -pady;
int out_index = j + w*(i + h*(k + c*b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for(l = 0; l < sizey; ++l){
for(m = 0; m < sizex; ++m){
int cur_h = h_offset + i*stridey + l;
int cur_w = w_offset + j*stridex + m;
int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c));
int valid = (cur_h >= 0 && cur_h < in_h &&
cur_w >= 0 && cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
if (is_leaky){ max = (max > 0)? max:(slope*max);}
output[out_index] = max;
indexes[out_index] = max_i;
}
__global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stridex, int stridey, int sizex, int sizey, int padx, int pady, float *delta, float *prev_delta, int *indexes)
{
int h = (in_h + pady - sizey)/stridey + 1;
int w = (in_w + padx - sizex)/stridex + 1;
int c = in_c;
int areay = (sizey-1)/stridey;
int areax = (sizex-1)/stridex;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int index = id;
int j = id % in_w;
id /= in_w;
int i = id % in_h;
id /= in_h;
int k = id % in_c;
id /= in_c;
int b = id;
int w_offset = -padx;
int h_offset = -pady;
float d = 0;
int l, m;
for(l = -areay; l < areay+1; ++l){
for(m = -areax; m < areax+1; ++m){
int out_w = (j-w_offset)/stridex + m;
int out_h = (i-h_offset)/stridey + l;
int out_index = out_w + w*(out_h + h*(k + c*b));
int valid = (out_w >= 0 && out_w < w &&
out_h >= 0 && out_h < h);
d += (valid && indexes[out_index] == index) ? delta[out_index] : 0;
}
}
prev_delta[index] += d;
}
#define MAXPOOLOPT
extern "C" void forward_maxpool_layer_gpu(maxpool_layer layer, network net)
{
int h = layer.out_h;
int w = layer.out_w;
int c = layer.c;
printf("output h = %d :: w = %d :: c = %d, leaky = %d\n", h,w,c, layer.activation == LEAKY);
size_t n = h*w*c*layer.batch;
// special case, even width and height, 2x2 kernel, and no padding. we can split input into 2x2 tiles
// valid for most initial layers of tiny yolo or yolo.
#ifdef MAXPOOLOPT
if ((layer.padx == 0)&&(layer.pady==0)&&(layer.h%2==0)&&(layer.w%2==0)&&(layer.sizex == 2)&&(layer.sizey==2)&&(layer.stridex==2)&&(layer.stridey==2))
{
hipLaunchKernelGGL(( forward_maxpool_2x2_s2_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, layer.h, layer.w, layer.c, layer.stridex, layer.stridey, layer.sizex, layer.sizey, layer.padx, layer.pady, net.input_gpu, layer.output_gpu, layer.indexes_gpu, layer.yolotype, layer.activation==LEAKY, 0.1);
}
else
#endif
{
hipLaunchKernelGGL(( forward_maxpool_layer_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, layer.h, layer.w, layer.c, layer.stridex, layer.stridey, layer.sizex, layer.sizey, layer.padx, layer.pady, net.input_gpu, layer.output_gpu, layer.indexes_gpu, layer.yolotype, layer.activation==LEAKY, 0.1);
}
check_error(hipPeekAtLastError());
}
extern "C" void backward_maxpool_layer_gpu(maxpool_layer layer, network net)
{
size_t n = layer.h*layer.w*layer.c*layer.batch;
hipLaunchKernelGGL(( backward_maxpool_layer_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, layer.h, layer.w, layer.c, layer.stridex, layer.stridey, layer.sizex, layer.sizey, layer.padx, layer.pady, layer.delta_gpu, net.delta_gpu, layer.indexes_gpu);
check_error(hipPeekAtLastError());
}
| 15227a3da1d8da771e9cd811ef43db2d13ee2770.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include "maxpool_layer.h"
#include "cuda.h"
}
#define MAX2(a,b) ((a)>(b)?(a):(b))
#define MAX4(a,b,c,d) MAX2(MAX2(a,b),MAX2(c,d))
// custom kernel for stride 2, 2x2 kernel,
__global__ void forward_maxpool_2x2_s2_kernel(int n, int in_h, int in_w, int in_c, int stridex, int stridey, int sizex, int sizey, int padx, int pady, float *input, float *output, int *indexes, int yolotype, int is_leaky, float slope)
{
int h = (in_h + 2*pady - sizey)/stridey + 1;
int w = (in_w + 2*padx - sizex)/stridex + 1;
if (yolotype)
{
h = (in_h + 2*pady)/stridey;
w = (in_w + 2*padx)/stridex;
}
int c = in_c;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -padx;
int h_offset = -pady;
int out_index = j + w*(i + h*(k + c*b));
float *pSrc= input + 2*j + in_w*(2*i+in_h*(k+b*in_c));
float a = MAX4(pSrc[0], pSrc[1], pSrc[in_w], pSrc[in_w+1]);
float *pOut = output + j + w*(i + h*(k + c*b));
if (!is_leaky)
pOut[0] = a;
else
pOut[0] = ((a > 0)?a:(slope*a));
}
__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stridex, int stridey, int sizex, int sizey, int padx, int pady, float *input, float *output, int *indexes, int yolotype, int is_leaky, float slope)
{
int h = (in_h + 2*pady - sizey)/stridey + 1;
int w = (in_w + 2*padx - sizex)/stridex + 1;
if (yolotype)
{
h = (in_h + 2*pady)/stridey;
w = (in_w + 2*padx)/stridex;
}
int c = in_c;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -padx;
int h_offset = -pady;
int out_index = j + w*(i + h*(k + c*b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for(l = 0; l < sizey; ++l){
for(m = 0; m < sizex; ++m){
int cur_h = h_offset + i*stridey + l;
int cur_w = w_offset + j*stridex + m;
int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c));
int valid = (cur_h >= 0 && cur_h < in_h &&
cur_w >= 0 && cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
if (is_leaky){ max = (max > 0)? max:(slope*max);}
output[out_index] = max;
indexes[out_index] = max_i;
}
__global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stridex, int stridey, int sizex, int sizey, int padx, int pady, float *delta, float *prev_delta, int *indexes)
{
int h = (in_h + pady - sizey)/stridey + 1;
int w = (in_w + padx - sizex)/stridex + 1;
int c = in_c;
int areay = (sizey-1)/stridey;
int areax = (sizex-1)/stridex;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int index = id;
int j = id % in_w;
id /= in_w;
int i = id % in_h;
id /= in_h;
int k = id % in_c;
id /= in_c;
int b = id;
int w_offset = -padx;
int h_offset = -pady;
float d = 0;
int l, m;
for(l = -areay; l < areay+1; ++l){
for(m = -areax; m < areax+1; ++m){
int out_w = (j-w_offset)/stridex + m;
int out_h = (i-h_offset)/stridey + l;
int out_index = out_w + w*(out_h + h*(k + c*b));
int valid = (out_w >= 0 && out_w < w &&
out_h >= 0 && out_h < h);
d += (valid && indexes[out_index] == index) ? delta[out_index] : 0;
}
}
prev_delta[index] += d;
}
#define MAXPOOLOPT
extern "C" void forward_maxpool_layer_gpu(maxpool_layer layer, network net)
{
int h = layer.out_h;
int w = layer.out_w;
int c = layer.c;
printf("output h = %d :: w = %d :: c = %d, leaky = %d\n", h,w,c, layer.activation == LEAKY);
size_t n = h*w*c*layer.batch;
// special case, even width and height, 2x2 kernel, and no padding. we can split input into 2x2 tiles
// valid for most initial layers of tiny yolo or yolo.
#ifdef MAXPOOLOPT
if ((layer.padx == 0)&&(layer.pady==0)&&(layer.h%2==0)&&(layer.w%2==0)&&(layer.sizex == 2)&&(layer.sizey==2)&&(layer.stridex==2)&&(layer.stridey==2))
{
forward_maxpool_2x2_s2_kernel<<<cuda_gridsize(n), BLOCK>>>(n, layer.h, layer.w, layer.c, layer.stridex, layer.stridey, layer.sizex, layer.sizey, layer.padx, layer.pady, net.input_gpu, layer.output_gpu, layer.indexes_gpu, layer.yolotype, layer.activation==LEAKY, 0.1);
}
else
#endif
{
forward_maxpool_layer_kernel<<<cuda_gridsize(n), BLOCK>>>(n, layer.h, layer.w, layer.c, layer.stridex, layer.stridey, layer.sizex, layer.sizey, layer.padx, layer.pady, net.input_gpu, layer.output_gpu, layer.indexes_gpu, layer.yolotype, layer.activation==LEAKY, 0.1);
}
check_error(cudaPeekAtLastError());
}
extern "C" void backward_maxpool_layer_gpu(maxpool_layer layer, network net)
{
size_t n = layer.h*layer.w*layer.c*layer.batch;
backward_maxpool_layer_kernel<<<cuda_gridsize(n), BLOCK>>>(n, layer.h, layer.w, layer.c, layer.stridex, layer.stridey, layer.sizex, layer.sizey, layer.padx, layer.pady, layer.delta_gpu, net.delta_gpu, layer.indexes_gpu);
check_error(cudaPeekAtLastError());
}
|
7c7aca2f3d27048b8faceeaa0add05a90949fefc.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
/**check_cuda_error(char *, int )
* Function to identify the CUDA error using the error code.
* file : the file where the error occured.
* line : the line number where the error occured.
*/
void check_cuda_error(char *file, int line) {
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
printf("%s in %s at line %d\n", hipGetErrorString(error), file, line);
exit(-1);
}
} | 7c7aca2f3d27048b8faceeaa0add05a90949fefc.cu | #include <cstdio>
/**check_cuda_error(char *, int )
* Function to identify the CUDA error using the error code.
* file : the file where the error occured.
* line : the line number where the error occured.
*/
void check_cuda_error(char *file, int line) {
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(error), file, line);
exit(-1);
}
} |
6f8ebf43e09d170884ed46033470f7cbb3001314.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// HEADERS
#include <iostream>
#include <stdlib.h>
#include <fstream>
#include <math.h>
#include <time.h>
using namespace std;
// DEFINITIONS
#define NX 201
#define NY 201
#define NT 401
#define SENSOR_GROUP_SIZE 10
#ifndef I_MAX
#define I_MAX 20
#endif /* I_MAX */
__constant__ float hx = 0.001f;
__constant__ float hy = 0.001f; // pixel size
__constant__ float h = 0.001f;
/* __constant__ float T = 1.3333e-04f; // 0.2f / 1500.f; */
__constant__ float dt = 3.3333e-07f; // T / 400.f;
/* __constant__ float fre = 125000.f; */
__constant__ float omegac = 7.8540e+05f; // 2.f * pi * fre; // wavelength
__constant__ float tao = 4.0000e-06f; // pi / omegac;
__constant__ float tt = 8.1573e-06f; // sqrtf(6.f * logf(2.f)) * tao; // time delay
// FUNCTIONS DECLARATION
void Ultrasonic_Tomography();
void IO_Files(float*, float*, float*, float*);
float norm(float*, int);
__global__ void field_setup(const float*, const float*, float*);
__global__ void propagation(int, int, int, int, const float*, float*, int);
__global__ void propagation_at_corners(float*);
__global__ void initial_signal(const float*, float*, float*, float*, float*, int);
__global__ void difference_signal(const float*, const float*, const float*, const float*, const float*, float*, float*, float*, float*, int);
__global__ void backpropagation1(float*, const float*, int);
__global__ void backpropagation2(float*, const float*, const float*, const float*, const float*, int);
__global__ void laplace1(const float*, float*);
__global__ void laplace2(const float*, float*);
__global__ void init_differential(float*, const float*, const float*, const float*);
__global__ void update_differential(float*, const float*, const float*, const float*, int);
/* __global__ void update_field(float*, float*, const float*, float*, const float*); */
__global__ void update_field(float*, const float*, float*, const float*);
__global__ void reset(const float*, float*, float*, float*, float*);
// MAIN PROGRAM
int main(void)
{
// Time measuring variables
int ti = 0, tf = 0;
// Function Execution
printf("Ultrasonic Tomography Running:\n\n");
ti = clock();
printf("ti = %d\n", ti);
Ultrasonic_Tomography();
tf = clock();
printf("tf = %d\n", tf);
printf("tt = %d\n", tf - ti);
printf("Total Seconds = %f\n", (float)(tf - ti) / CLOCKS_PER_SEC);
hipDeviceReset();
// End of the program
/* system("pause"); */
return 0;
}
// FUNCTIONS DEFINITION
void Ultrasonic_Tomography()
{
// Simulation Variables
float hx = 0.001f;
float hy = 0.001f;
int i = 0, j = 0, k = 0;
int Nx_Ny = NX * NY;
int Nx_Ny_Nt = NX * NY * NT;
int Nx_Nt = NX * NT;
float *x = new float[NX];
float *y = new float[NY];
float *fo = new float[Nx_Ny];
float *u = new float[Nx_Ny_Nt];
// Kernel Preparation
/*dim3 Grid_Size(13, 26);
dim3 Block_Size(16, 8);*/
/*dim3 Grid_Size(7, 51);
dim3 Block_Size(32, 4);*/
/*dim3 Grid_Size(7, 26);
dim3 Block_Size(32, 8);*/
dim3 Grid_Size(13, 13);
dim3 Block_Size(16, 16);
// Variables of allocation
float *dev_x;
int size_x = NX * sizeof(float);
float *dev_y;
int size_y = NX * sizeof(float);
float *dev_fo;
int size_fo = Nx_Ny * sizeof(float);
float *dev_u;
int size_u = Nx_Ny_Nt * sizeof(float);
float *dev_g1;
int size_g1 = Nx_Nt * 640 * sizeof(float);
float *dev_g2;
int size_g2 = Nx_Nt * 640 * sizeof(float);
float *dev_g3;
int size_g3 = Nx_Nt * 640 * sizeof(float);
float *dev_g4;
int size_g4 = Nx_Nt * 640 * sizeof(float);
hipMalloc((void**) &dev_x, size_x);
hipMalloc((void**) &dev_y, size_y);
hipMalloc((void**) &dev_fo, size_fo);
hipMalloc((void**) &dev_u, size_u);
hipMalloc((void**) &dev_g1, size_g1);
hipMalloc((void**) &dev_g2, size_g2);
hipMalloc((void**) &dev_g3, size_g3);
hipMalloc((void**) &dev_g4, size_g4);
hipMemset(dev_u, 0.0, size_u);
hipMemset(dev_g1, 0.0, size_g1);
hipMemset(dev_g2, 0.0, size_g2);
hipMemset(dev_g3, 0.0, size_g3);
hipMemset(dev_g4, 0.0, size_g4);
// Environment Initialization
for(i = 0; i < NX; i++)
{
x[i] = -0.1f + i * hx;
}
for(j = 0; j < NY; j++)
{
y[j] = -0.1f + j * hy;
}
hipMemcpy(dev_x, x, size_x, hipMemcpyHostToDevice);
hipMemcpy(dev_y, y, size_y, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( field_setup), dim3(Grid_Size), dim3(Block_Size), 0, 0, dev_x, dev_y, dev_fo);
hipMemcpy(fo, dev_fo, size_fo, hipMemcpyDeviceToHost);
// Position of the transducers
int p = 0;
int *jj = new int[640];
int *ii = new int[640];
for(p = 0; p < 160; p++)
{
ii[p] = 21 + (p + 1);
jj[p] = 181;
}
for(p = 160; p < 320; p++)
{
ii[p] = 181;
jj[p] = 181 - ((p + 1) - 160);
}
for(p = 320; p < 480; p++)
{
ii[p] = 181 - ((p + 1) - 320);
jj[p] = 21;
}
for(p = 480; p < 640; p++)
{
ii[p] = 21;
jj[p] = 21 + ((p + 1) - 480);
}
for(p = 0; p < 640; p += SENSOR_GROUP_SIZE)
{
hipMemset(dev_u, 0.0, size_u);
int jp1 = jj[p];
int jp2 = jj[p + SENSOR_GROUP_SIZE - 1];
int ip1 = ii[p];
int ip2 = ii[p + SENSOR_GROUP_SIZE - 1];
if (jp2 < jp1)
{
int jp = jp1;
jp1 = jp2;
jp2 = jp;
}
if (ip2 < ip1)
{
int ip = ip1;
ip1 = ip2;
ip2 = ip;
}
// Boundary
for(k = 1; k < NT - 1; k++)
{
hipLaunchKernelGGL(( propagation), dim3(Grid_Size), dim3(Block_Size), 0, 0, jp1, jp2, ip1, ip2, dev_fo, dev_u, k);
}
// Four corners
hipLaunchKernelGGL(( propagation_at_corners), dim3(1), dim3(NT), 0, 0, dev_u);
hipLaunchKernelGGL(( initial_signal), dim3(NT - 2), dim3(159), 0, 0, dev_u, dev_g1, dev_g2, dev_g3, dev_g4, p);
}
// Kaczmarz method
// propagation
/* float *f_t = new float[Nx_Ny * I_MAX]; */
float *dev_rr1;
int size_rr1 = Nx_Nt * sizeof(float);
float *dev_rr2;
int size_rr2 = Nx_Nt * sizeof(float);
float *dev_rr3;
int size_rr3 = Nx_Nt * sizeof(float);
float *dev_rr4;
int size_rr4 = Nx_Nt * sizeof(float);
float *dev_z;
int size_z = Nx_Ny * (NT + 1) * sizeof(float);
float *dev_Lu;
int size_Lu = Nx_Ny_Nt * sizeof(float);
float *dev_f;
int size_f = Nx_Ny * sizeof(float);
float *dev_df;
int size_df = Nx_Ny * sizeof(float);
/* float *dev_alpha; */
/* int size_alpha = Nx_Ny * sizeof(float); */
float *dev_f_minus_fo;
int size_f_minus_fo = Nx_Ny * sizeof(float);
// Allocation
hipMalloc((void**) &dev_rr1, size_rr1);
hipMalloc((void**) &dev_rr2, size_rr2);
hipMalloc((void**) &dev_rr3, size_rr3);
hipMalloc((void**) &dev_rr4, size_rr4);
hipMalloc((void**) &dev_z, size_z);
hipMalloc((void**) &dev_Lu, size_Lu);
hipMalloc((void**) &dev_f, size_f);
hipMalloc((void**) &dev_df, size_df);
/* hipMalloc((void**) &dev_alpha, size_alpha); */
hipMalloc((void**) &dev_f_minus_fo, size_f_minus_fo);
hipMemset(dev_rr1, 0.0, size_rr1);
hipMemset(dev_rr2, 0.0, size_rr2);
hipMemset(dev_rr3, 0.0, size_rr3);
hipMemset(dev_rr4, 0.0, size_rr4);
hipMemset(dev_f, 0.0, size_f);
hipMemset(dev_Lu, 0.0, size_Lu);
float *f = new float[Nx_Ny];
float *f_minus_fo = new float[Nx_Ny];
float epsilon = 0.f;
for(int iter = 0; iter < I_MAX; iter++)
{
printf("\nIter: %d\n", iter);
hipMemset(dev_u, 0.0, size_u);
for(p = 0; p < 640; p += SENSOR_GROUP_SIZE)
{
int jp1 = jj[p];
int jp2 = jj[p + SENSOR_GROUP_SIZE - 1];
int ip1 = ii[p];
int ip2 = ii[p + SENSOR_GROUP_SIZE - 1];
if (jp2 < jp1)
{
int jp = jp1;
jp1 = jp2;
jp2 = jp;
}
if (ip2 < ip1)
{
int ip = ip1;
ip1 = ip2;
ip2 = ip;
}
// Boundary
for(k = 1; k < NT - 1; k++)
{
hipLaunchKernelGGL(( propagation), dim3(Grid_Size), dim3(Block_Size), 0, 0, jp1, jp2, ip1, ip2, dev_f, dev_u, k);
}
// Four corners
hipLaunchKernelGGL(( propagation_at_corners), dim3(1), dim3(NT), 0, 0, dev_u);
hipLaunchKernelGGL(( difference_signal), dim3(NT - 2), dim3(159), 0, 0, dev_u, dev_g1, dev_g2, dev_g3, dev_g4, dev_rr1, dev_rr2, dev_rr3, dev_rr4, p);
hipMemset(dev_z, 0.0, size_z);
for(k = NT - 2; k > 0; k--)
{
hipLaunchKernelGGL(( backpropagation1), dim3(Grid_Size), dim3(Block_Size), 0, 0, dev_z, dev_f, k);
hipLaunchKernelGGL(( backpropagation2), dim3(1), dim3(NX), 0, 0, dev_z, dev_rr1, dev_rr2, dev_rr3, dev_rr4, k);
}
hipLaunchKernelGGL(( laplace1), dim3(dim3(25, 25, 50)), dim3(dim3(8, 8, 8)), 0, 0, dev_u, dev_Lu);
hipLaunchKernelGGL(( laplace2), dim3(1), dim3(1), 0, 0, dev_u, dev_Lu);
hipLaunchKernelGGL(( init_differential), dim3(Grid_Size), dim3(Block_Size), 0, 0, dev_df, dev_z, dev_Lu, dev_f);
for(k = 2; k < NT; k++)
{
hipLaunchKernelGGL(( update_differential), dim3(Grid_Size), dim3(Block_Size), 0, 0, dev_df, dev_z, dev_Lu, dev_f, k);
}
/* update_field<<<Grid_Size, Block_Size>>>(dev_alpha, dev_f, dev_df, dev_f_minus_fo, dev_fo); */
hipLaunchKernelGGL(( update_field), dim3(Grid_Size), dim3(Block_Size), 0, 0, dev_f, dev_df, dev_f_minus_fo, dev_fo);
}
hipMemcpy(f_minus_fo, dev_f_minus_fo, size_f_minus_fo, hipMemcpyDeviceToHost);
epsilon = norm(f_minus_fo, Nx_Ny) / norm(fo, Nx_Ny) * 100.f;
printf("epsilon = %f\n", epsilon);
if (epsilon < 20.f)
{
break;
}
}
hipMemcpy(f, dev_f, size_f, hipMemcpyDeviceToHost);
IO_Files(x, y, fo, f);
// Free Variables
hipFree(dev_x);
hipFree(dev_y);
hipFree(dev_fo);
hipFree(dev_u);
hipFree(dev_g1);
hipFree(dev_g2);
hipFree(dev_g3);
hipFree(dev_g4);
hipFree(dev_rr1);
hipFree(dev_rr2);
hipFree(dev_rr3);
hipFree(dev_rr4);
hipFree(dev_z);
hipFree(dev_Lu);
hipFree(dev_f);
hipFree(dev_df);
/* hipFree(dev_alpha); */
hipFree(dev_f_minus_fo);
///////////////////////////
/* float *image = new float[Nx_Ny]; */
/* hipMemcpy(image, dev_f, size_f, hipMemcpyDeviceToHost); */
/* ofstream file; */
/* for(int yj = 0; yj < NY; yj++) */
/* { */
/* for(int xi = 0; xi < NX; xi++) */
/* { */
/* file << image[xi + NX * yj]; */
/* file << "\t"; */
/* } */
/* file << "\n"; */
/* } */
/* file.close(); */
/////////////////////////
}
__global__ void field_setup(const float *x, const float *y, float *fo)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i < NX) && (j < NY))
{
int offset = i + NX * j;
float value = 0.f;
/* if(((sqrtf(powf(x[i] - 0.015f, 2.f) + powf(y[j] + 0.000f, 2.f))) <= 0.005f) || ((sqrtf(powf(x[i] + 0.015f, 2.f) + powf(y[j] + 0.000f, 2.f))) <= 0.005f)) */
/* { */
/* value = 0.06f; */
/* } */
/* else */
/* { */
/* if(sqrtf(x[i] * x[i] + y[j] * y[j]) <= 0.03f) */
/* { */
/* value = 0.02f; */
/* } */
/* else */
/* { */
/* value = 0.f; */
/* } */
/* } */
float rc = 0.015f;
float rp = 0.005f;
/* float lim = 0.020f; */
float sc = 0.03f;
float sp = 0.05f;
/* float sb = 0.02f; */
if (powf(x[i], 2) + powf(y[j], 2) <= powf(rc, 2))
{
value = sc;
}
if (powf(x[i] - rc * cos(-30 * (3.14159265f / 180)), 2) + powf(y[j] - rc * sin(30 * (3.14159265f / 180)), 2) <= powf(rp, 2))
{
value = sp;
}
if (powf(x[i] + rc * cos(-30 * (3.14159265f / 180)), 2) + powf(y[j] - rc * sin(30 * (3.14159265f / 180)), 2) <= powf(rp, 2))
{
value = sp;
}
if (powf(x[i], 2) + powf(y[j] + rc, 2) <= powf(rp, 2))
{
value = sp;
}
fo[offset] = value;
/*int offset = i + NX * j;
float value = 0.f;
if (((sqrtf(powf(x[i] - 0.05f, 2.f) + powf(y[j] + 0.000f, 2.f))) <= 0.005f) || ((sqrtf(powf(x[i] + 0.05f, 2.f) + powf(y[j] + 0.000f, 2.f))) <= 0.005f))
{
value = 0.06f;
}
else
{
if (sqrtf(x[i] * x[i] + y[j] * y[j]) <= 0.03f)
{
value = 0.02f;
}
else
{
if ((x[i] >= -0.05f) && (x[i] <= 0.05f) && (y[j] >= -0.06f) && (y[j] <= -0.045f))
{
value = 0.04f;
}
else
{
if ((x[i] >= -0.03f) && (x[i] <= 0.00f) && (y[j] <= 0.065f) && (y[j] >= (0.04f - 0.5f * x[i])))
{
value = 0.03f;
}
else
{
if ((x[i] >= 0.00f) && (x[i] <= 0.03f) && (y[j] <= 0.065f) && (y[j] >= (0.04f + 0.5f * x[i])))
{
value = 0.03f;
}
else
{
value = 0.f;
}
}
}
}
}
fo[offset] = value;
v[offset] = 1500.f * sqrtf(1.f + value);
r[offset] = v[offset] * dt / hx;
r2[offset] = powf(r[offset], 2.f);
s[offset] = 2.f - 4.f * r2[offset];
*/
}
}
__global__ void propagation(int jp1, int jp2, int ip1, int ip2, const float *f, float *u, int k)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
/* int NX * NY = NX * NY; */
/* int NX * NY * k = NX * NY * k; */
if((i < NX) && (j < NY))
{
/* int i + NX * j = i + NX * j; */
float v = 1500.f * sqrtf(1.f + f[i + NX * j]);
float r = v * dt / hx;
float s = 2.f - 4.f * r * r;
// at sensor
if (((j + 1) >= jp1) && ((j + 1) <= jp2) && ((i + 1) >= ip1) && ((i + 1) <= ip2) && ((k + 1) <= 24))
{
float t = k * dt - tt;
u[i + NX * j + NX * NY * (k + 1)] =
v * v * dt * dt *
cosf(omegac * t) *
expf(-(t * t) / (2.f * tao * tao)) +
r * r *
(u[(i + 1) + NX * j + NX * NY * k] +
u[(i - 1) + NX * j + NX * NY * k] +
u[i + NX * (j - 1) + NX * NY * k] +
u[i + NX * (j + 1) + NX * NY * k]) +
s * u[i + NX * j + NX * NY * k] -
u[i + NX * j + NX * NY * (k - 1)];
}
// not at sensor or boundary
else if (i != 0 && j != 0 && i != (NX - 1) && j != (NY - 1))
{
u[i + NX * j + NX * NY * (k + 1)] =
r * r *
(u[(i + 1) + NX * j + NX * NY * k] +
u[(i - 1) + NX * j + NX * NY * k] +
u[i + NX * (j - 1) + NX * NY * k] +
u[i + NX * (j + 1) + NX * NY * k]) +
s * u[i + NX * j + NX * NY * k] -
u[i + NX * j + NX * NY * (k - 1)];
}
// left boundary
else if ((i == 0) && (j > 0) && (j < (NY - 1)))
{
u[i + NX * j + NX * NY * (k + 1)] =
(2.f - r * (r + 2.f)) * u[i + NX * j + NX * NY * k] +
2.f * r * (1.f + r) * u[(i + NX * j + 1) + NX * NY * k] -
r * r * u[(i + NX * j + 2) + NX * NY * k] +
(2.f * r - 1.f) * u[i + NX * j + NX * NY * (k - 1)] -
2.f * r * u[(i + NX * j + 1) + NX * NY * (k - 1)];
}
// right boundary
else if ((i == NX - 1) && (j > 0) && (j < (NY - 1)))
{
u[i + NX * j + NX * NY * (k + 1)] =
(2.f - 2.f * r - r * r) * u[i + NX * j + NX * NY * k] +
2.f * r * (1.f + r) * u[(i + NX * j - 1) + NX * NY * k] -
r * r * u[(i + NX * j - 2) + NX * NY * k] +
(2.f * r - 1.f) * u[i + NX * j + NX * NY * (k - 1)] -
2.f * r * u[(i + NX * j - 1) + NX * NY * (k - 1)];
}
// top boundary
else if ((j == 0) && (i > 0) && (i < (NX - 1)))
{
u[i + NX * j + NX * NY * (k + 1)] =
(2.f - 2.f * r - r * r) * u[i + NX * j + NX * NY * k] +
2.f * r * (1.f + r) * u[(i + (j + 1) * NX) + NX * NY * k] -
r * r * u[(i + (j + 2) * NX) + NX * NY * k] +
(2.f * r - 1.f) * u[i + NX * j + NX * NY * (k - 1)] -
2.f * r * u[(i + (j + 1) * NX) + NX * NY * (k - 1)];
}
// bottom boundary
else if ((j == NY - 1) && (i > 0) && (i < (NX - 1)))
{
u[i + NX * j + NX * NY * (k + 1)] =
(2.f - 2.f * r - r * r) * u[i + NX * j + NX * NY * k] +
2.f * r * (1.f + r) * u[(i + (j - 1) * NX) + NX * NY * k] -
r * r * u[(i + (j - 2) * NX) + NX * NY * k] +
(2.f * r - 1.f) * u[i + NX * j + NX * NY * (k - 1)] -
2.f * r * u[(i + (j - 1) * NX) + NX * NY * (k - 1)];
}
}
}
__global__ void propagation_at_corners(float *u)
{
int k = threadIdx.x;
int Nx_Ny = NX * NY;
int Nx_Ny_k = Nx_Ny * k;
u[Nx_Ny_k] =
1.f / 2.f * (u[NX + k] + u[1 + k]);
u[(NX - 1) + Nx_Ny_k] =
1.f / 2.f * (u[(NX - 2) + Nx_Ny_k] + u[(NX - 1) + NX + Nx_Ny_k]);
u[(NY - 1) * NX + Nx_Ny_k] =
1.f / 2.f * (u[(NY - 2) * NX + Nx_Ny_k] + u[1 +(NY - 1) * NX + Nx_Ny_k]);
u[(NX - 1) + (NY - 1) * NX + Nx_Ny_k] =
1.f / 2.f * (u[(NX - 2) + (NY - 1) * NX + Nx_Ny_k] + u[(NX - 1) + (NY - 2) * NX + Nx_Ny_k]);
}
__global__ void initial_signal(const float *u, float *g1, float *g2, float *g3, float *g4, int p)
{
int i = threadIdx.x + 21;
int k = blockIdx.x + 2;
int Nx_Ny_k = NX * NY * k;
int i_k_Nx_Nx_Nt_p = i + NX * k + NX * NT * p;
g1[i_k_Nx_Nx_Nt_p] = u[i + NX * 180 + Nx_Ny_k];
g3[i_k_Nx_Nx_Nt_p] = u[i + NX * 20 + Nx_Ny_k];
g2[i_k_Nx_Nx_Nt_p] = u[180 + NX * i + Nx_Ny_k];
g4[i_k_Nx_Nx_Nt_p] = u[20 + NX * i + Nx_Ny_k];
}
__global__ void difference_signal(const float *u, const float *g1, const float *g2, const float *g3, const float *g4, float *rr1, float *rr2, float *rr3, float *rr4, int p)
{
int i = threadIdx.x + 21;
int k = blockIdx.x + 2;
int Nx_Ny_k = NX * NY * k;
int i_k_Nx_Nx_Nt_p = i + k * NX + NX * NT * p;
int i_Nx_k = i + NX * k;
rr1[i_Nx_k] = g1[i_k_Nx_Nx_Nt_p] - u[i + NX * 180 + Nx_Ny_k];
rr3[i_Nx_k] = g3[i_k_Nx_Nx_Nt_p] - u[i + NX * 20 + Nx_Ny_k];
rr2[i_Nx_k] = g2[i_k_Nx_Nx_Nt_p] - u[180 + NX * i + Nx_Ny_k];
rr4[i_Nx_k] = g4[i_k_Nx_Nx_Nt_p] - u[20 + NX * i + Nx_Ny_k];
}
__global__ void backpropagation1(float *z, const float *f, int k)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i >= 1) && (i < (NX - 1)) && (j >= 1) && (j < (NY - 1)))
{
z[i + NX * j + NX * NY * k] =
1500.f * 1500.f * (dt * dt) *
((1.f + f[i + NX * (j - 1)]) * z[i + NX * (j - 1) + NX * NY * (k + 1)] +
(1.f + f[i + NX * (j + 1)]) * z[i + NX * (j + 1) + NX * NY * (k + 1)] +
(1.f + f[(i - 1) + NX * j]) * z[(i - 1) + NX * j + NX * NY * (k + 1)] +
(1.f + f[(i + 1) + NX * j]) * z[(i + 1) + NX * j + NX * NY * (k + 1)] -
4.f * (1.f + f[i + NX * j]) *
z[i + NX * j + NX * NY * (k + 1)]) / (h * h) +
2.f * z[i + NX * j + NX * NY * (k + 1)] -
z[i + NX * j + NX * NY * (k + 2)];
}
}
__global__ void backpropagation2(float *z, const float *rr1, const float *rr2, const float *rr3, const float *rr4, int k)
{
int i = threadIdx.x;
if((i >= 21) && (i < 180))
{
z[i + NX * 180 + NX * NY * k] =
z[i + NX * 179 + NX * NY * k] +
rr1[i + NX * k] * h * 1000.f;
z[i + NX * 20 + NX * NY * k] =
z[i + NX * 21 + NX * NY * k] +
rr3[i + NX * k] * h * 1000.f;
z[180 + NX * i + NX * NY * k] =
z[179 + NX * i + NX * NY * k] +
rr2[i + NX * k] * h * 1000.f;
z[20 + NX * i + NX * NY * k] =
z[21 + NX * i + NX * NY * k]
+ rr4[i + NX * k] * h * 1000.f;
}
if((i >= 1) && (i < (NX - 1)))
{
z[i + NX * NY * k] =
z[i + NX + NX * NY * k];
z[i + NX * (NY - 1) + NX * NY * k] =
z[i + NX * (NY - 2) + NX * NY * k];
z[NX * i + NX * NY * k] =
z[1 + NX * i + NX * NY * k];
z[(NX - 1) + NX * i + NX * NY * k] =
z[(NX - 2) + NX * i + NX * NY * k];
}
else if(i == 0)
{
z[NX * NY * k] =
(z[1 + NX * NY * k] +
z[NX + NX * NY * k]) / 2.f;
z[(NX - 1) + NX * NY * k] =
(z[(NX - 2) + NX * NY * k] +
z[(NX - 1) + NX + NX * NY * k]) / 2.f;
z[NX * (NY - 1) + NX * NY * k] =
(z[1 + NX * (NY - 1) + NX * NY * k] +
z[NX * (NY - 2) + NX * NY * k]) / 2.f;
z[(NX - 1) + NX * (NY - 1) + NX * NY * k] =
(z[(NX - 2) + NX * (NY - 1) + NX * NY * k] +
z[(NX - 1) + NX * (NY - 2) + NX * NY * k]) / 2.f;
}
}
__global__ void laplace1(const float *u, float *Lu)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z + 1;
if(i < (NX - 1) && j < (NY - 1) && k < NT)
{
if (i >= 1) {
if (j >= 1) {
Lu[i + NX * j + NX * NY * k] =
(u[i + NX * (j - 1) + NX * NY * k] +
u[i + NX * (j + 1) + NX * NY * k] +
u[(i - 1) + NX * j + NX * NY * k] +
u[(i + 1) + NX * j + NX * NY * k] -
4.f * u[i + NX * j + NX * NY * k]) / (h * h);
}
else {
Lu[i + NX * NY * k] =
(u[i + NX * NY * k] +
u[i + NX + NX * NY * k] +
u[(i - 1) + NX * NY * k] +
u[(i + 1) + NX * NY * k] -
4.f * u[i + NX * NY * k]) / (h * h);
Lu[i + NX * (NY - 1) + NX * NY * k] =
(u[i + NX * (NY - 1) + NX * NY * k] +
u[i + NX * (NY - 2) + NX * NY * k] +
u[(i - 1) + NX * (NY - 1) + NX * NY * k] +
u[(i + 1) + NX * (NY - 1) + NX * NY * k] -
4.f * u[i + NX * (NY - 1) + NX * NY * k]) / (h * h);
Lu[NX * i + NX * NY * k] =
(u[NX * i + NX * NY * k] +
u[1 + NX * i + NX * NY * k] +
u[NX * (i - 1) + NX * NY * k] +
u[NX * (i + 1) + NX * NY * k] -
4.f * u[NX * i + NX * NY * k]) / (h * h);
Lu[(NX - 1) + NX * i + NX * NY * k] =
(u[(NX - 1) + NX * i + NX * NY * k] +
u[(NX - 2) + NX * i + NX * NY * k] +
u[(NX - 1) + NX * (i - 1) + NX * NY * k] +
u[(NX - 1) + NX * (i + 1) + NX * NY * k] -
4.f * u[(NX - 1) + NX * i + NX * NY * k]) / (h * h);
}
}
}
}
__global__ void laplace2(const float *u, float *Lu)
{
# pragma unroll
for (int k = 1; k < NT; ++k) {
Lu[NX * NY * k] =
(Lu[1 + NX * NY * k] +
Lu[NX + NX * NY * k]) / 2.f;
Lu[(NX - 1) + NX * NY * k] =
(Lu[(NX - 2) + NX * NY * k] +
Lu[(NX - 1) + NX + NX * NY * k]) / 2.f;
Lu[NX * (NY - 1) + NX * NY * k] =
(Lu[1 + NX * (NY - 1) + NX * NY * k] +
Lu[NX * (NY - 2) + NX * NY * k]) / 2.f;
Lu[(NX - 1) + NX * (NY - 1) + NX * NY * k] =
(Lu[(NX - 2) + NX * (NY - 1) + NX * NY * k] +
Lu[(NX - 1) + NX * (NY - 2) + NX * NY * k]) / 2.f;
}
}
__global__ void init_differential(float *df, const float *z, const float *Lu, const float *f)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int Nx_Ny = NX * NY;
if((i < NX) && (j < NY))
{
int offset = i + NX * j;
df[offset] = z[offset + Nx_Ny] * Lu[offset + Nx_Ny] / (1.f + f[offset]);
}
}
__global__ void update_differential(float *df, const float *z, const float *Lu, const float *f, int k)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i < NX) && (j < NY)) {
df[i + NX * j] += z[i + NX * j + NX * NY * k] * Lu[i + NX * j + NX * NY * k] / (1.f + f[i + NX * j]);
}
}
/* __global__ void update_field(float *alpha, float *f, const float *df, float *f_minus_fo, const float *fo) */
__global__ void update_field(float *f, const float *df, float *f_minus_fo, const float *fo)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i < NX) && (j < NY))
{
int offset = i + NX * j;
bool flag = (i >= 21) && (i < 180) && (j >= 21) && (j < 180);
float alpha = flag ? 1.f : 0.f;
f[offset] += 20000.f * alpha * df[offset];
f_minus_fo[offset] = f[offset] - fo[offset];
}
}
__global__ void reset(const float *f, float *v, float *r, float *r2, float *s)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i < NX) && (j < NY))
{
int offset = i + NX * j;
v[offset] = 1500.f * sqrtf(1.f + f[offset]);
r[offset] = v[offset] * dt / hx;
r2[offset] = r[offset] * r[offset];
s[offset] = 2.f - 4.f * r2[offset];
}
}
void IO_Files(float *x, float *y, float *fo, float *f)
{
int i = 0, j = 0;
/* int k = 0; */
// I/O Files
ofstream x_file, y_file;
ofstream fo_file;
ofstream f_file;
x_file.open("dev_x.txt");
y_file.open("dev_y.txt");
fo_file.open("dev_f0.txt");
f_file.open("dev_f.txt");
for(i = 0; i < NX; i++)
{
x_file << x[i];
x_file << "\n";
}
for(j = 0; j < NX; j++)
{
y_file << y[j];
y_file << "\n";
}
for(j = 0; j < NY; j++)
{
for(i = 0; i < NX; i++)
{
fo_file << fo[i + NX * j];
fo_file << " ";
}
fo_file << "\n";
}
for(j = 0; j < NY; j++)
{
for(i = 0; i < NX; i++)
{
f_file << f[i + NX * j];
f_file << " ";
}
f_file << "\n";
}
x_file.close();
y_file.close();
fo_file.close();
f_file.close();
}
float norm(float *A, int length)
{
float sum = 0;
for(int i = 0; i < length; i++) {
sum += A[i] * A[i];
}
return sqrtf(sum);
}
| 6f8ebf43e09d170884ed46033470f7cbb3001314.cu | // HEADERS
#include <iostream>
#include <stdlib.h>
#include <fstream>
#include <math.h>
#include <time.h>
using namespace std;
// DEFINITIONS
#define NX 201
#define NY 201
#define NT 401
#define SENSOR_GROUP_SIZE 10
#ifndef I_MAX
#define I_MAX 20
#endif /* I_MAX */
__constant__ float hx = 0.001f;
__constant__ float hy = 0.001f; // pixel size
__constant__ float h = 0.001f;
/* __constant__ float T = 1.3333e-04f; // 0.2f / 1500.f; */
__constant__ float dt = 3.3333e-07f; // T / 400.f;
/* __constant__ float fre = 125000.f; */
__constant__ float omegac = 7.8540e+05f; // 2.f * pi * fre; // wavelength
__constant__ float tao = 4.0000e-06f; // pi / omegac;
__constant__ float tt = 8.1573e-06f; // sqrtf(6.f * logf(2.f)) * tao; // time delay
// FUNCTIONS DECLARATION
void Ultrasonic_Tomography();
void IO_Files(float*, float*, float*, float*);
float norm(float*, int);
__global__ void field_setup(const float*, const float*, float*);
__global__ void propagation(int, int, int, int, const float*, float*, int);
__global__ void propagation_at_corners(float*);
__global__ void initial_signal(const float*, float*, float*, float*, float*, int);
__global__ void difference_signal(const float*, const float*, const float*, const float*, const float*, float*, float*, float*, float*, int);
__global__ void backpropagation1(float*, const float*, int);
__global__ void backpropagation2(float*, const float*, const float*, const float*, const float*, int);
__global__ void laplace1(const float*, float*);
__global__ void laplace2(const float*, float*);
__global__ void init_differential(float*, const float*, const float*, const float*);
__global__ void update_differential(float*, const float*, const float*, const float*, int);
/* __global__ void update_field(float*, float*, const float*, float*, const float*); */
__global__ void update_field(float*, const float*, float*, const float*);
__global__ void reset(const float*, float*, float*, float*, float*);
// MAIN PROGRAM
int main(void)
{
// Time measuring variables
int ti = 0, tf = 0;
// Function Execution
printf("Ultrasonic Tomography Running:\n\n");
ti = clock();
printf("ti = %d\n", ti);
Ultrasonic_Tomography();
tf = clock();
printf("tf = %d\n", tf);
printf("tt = %d\n", tf - ti);
printf("Total Seconds = %f\n", (float)(tf - ti) / CLOCKS_PER_SEC);
cudaDeviceReset();
// End of the program
/* system("pause"); */
return 0;
}
// FUNCTIONS DEFINITION
void Ultrasonic_Tomography()
{
// Simulation Variables
float hx = 0.001f;
float hy = 0.001f;
int i = 0, j = 0, k = 0;
int Nx_Ny = NX * NY;
int Nx_Ny_Nt = NX * NY * NT;
int Nx_Nt = NX * NT;
float *x = new float[NX];
float *y = new float[NY];
float *fo = new float[Nx_Ny];
float *u = new float[Nx_Ny_Nt];
// Kernel Preparation
/*dim3 Grid_Size(13, 26);
dim3 Block_Size(16, 8);*/
/*dim3 Grid_Size(7, 51);
dim3 Block_Size(32, 4);*/
/*dim3 Grid_Size(7, 26);
dim3 Block_Size(32, 8);*/
dim3 Grid_Size(13, 13);
dim3 Block_Size(16, 16);
// Variables of allocation
float *dev_x;
int size_x = NX * sizeof(float);
float *dev_y;
int size_y = NX * sizeof(float);
float *dev_fo;
int size_fo = Nx_Ny * sizeof(float);
float *dev_u;
int size_u = Nx_Ny_Nt * sizeof(float);
float *dev_g1;
int size_g1 = Nx_Nt * 640 * sizeof(float);
float *dev_g2;
int size_g2 = Nx_Nt * 640 * sizeof(float);
float *dev_g3;
int size_g3 = Nx_Nt * 640 * sizeof(float);
float *dev_g4;
int size_g4 = Nx_Nt * 640 * sizeof(float);
cudaMalloc((void**) &dev_x, size_x);
cudaMalloc((void**) &dev_y, size_y);
cudaMalloc((void**) &dev_fo, size_fo);
cudaMalloc((void**) &dev_u, size_u);
cudaMalloc((void**) &dev_g1, size_g1);
cudaMalloc((void**) &dev_g2, size_g2);
cudaMalloc((void**) &dev_g3, size_g3);
cudaMalloc((void**) &dev_g4, size_g4);
cudaMemset(dev_u, 0.0, size_u);
cudaMemset(dev_g1, 0.0, size_g1);
cudaMemset(dev_g2, 0.0, size_g2);
cudaMemset(dev_g3, 0.0, size_g3);
cudaMemset(dev_g4, 0.0, size_g4);
// Environment Initialization
for(i = 0; i < NX; i++)
{
x[i] = -0.1f + i * hx;
}
for(j = 0; j < NY; j++)
{
y[j] = -0.1f + j * hy;
}
cudaMemcpy(dev_x, x, size_x, cudaMemcpyHostToDevice);
cudaMemcpy(dev_y, y, size_y, cudaMemcpyHostToDevice);
field_setup<<<Grid_Size, Block_Size>>>(dev_x, dev_y, dev_fo);
cudaMemcpy(fo, dev_fo, size_fo, cudaMemcpyDeviceToHost);
// Position of the transducers
int p = 0;
int *jj = new int[640];
int *ii = new int[640];
for(p = 0; p < 160; p++)
{
ii[p] = 21 + (p + 1);
jj[p] = 181;
}
for(p = 160; p < 320; p++)
{
ii[p] = 181;
jj[p] = 181 - ((p + 1) - 160);
}
for(p = 320; p < 480; p++)
{
ii[p] = 181 - ((p + 1) - 320);
jj[p] = 21;
}
for(p = 480; p < 640; p++)
{
ii[p] = 21;
jj[p] = 21 + ((p + 1) - 480);
}
for(p = 0; p < 640; p += SENSOR_GROUP_SIZE)
{
cudaMemset(dev_u, 0.0, size_u);
int jp1 = jj[p];
int jp2 = jj[p + SENSOR_GROUP_SIZE - 1];
int ip1 = ii[p];
int ip2 = ii[p + SENSOR_GROUP_SIZE - 1];
if (jp2 < jp1)
{
int jp = jp1;
jp1 = jp2;
jp2 = jp;
}
if (ip2 < ip1)
{
int ip = ip1;
ip1 = ip2;
ip2 = ip;
}
// Boundary
for(k = 1; k < NT - 1; k++)
{
propagation<<<Grid_Size, Block_Size>>>(jp1, jp2, ip1, ip2, dev_fo, dev_u, k);
}
// Four corners
propagation_at_corners<<<1, NT>>>(dev_u);
initial_signal<<<NT - 2, 159>>>(dev_u, dev_g1, dev_g2, dev_g3, dev_g4, p);
}
// Kaczmarz method
// propagation
/* float *f_t = new float[Nx_Ny * I_MAX]; */
float *dev_rr1;
int size_rr1 = Nx_Nt * sizeof(float);
float *dev_rr2;
int size_rr2 = Nx_Nt * sizeof(float);
float *dev_rr3;
int size_rr3 = Nx_Nt * sizeof(float);
float *dev_rr4;
int size_rr4 = Nx_Nt * sizeof(float);
float *dev_z;
int size_z = Nx_Ny * (NT + 1) * sizeof(float);
float *dev_Lu;
int size_Lu = Nx_Ny_Nt * sizeof(float);
float *dev_f;
int size_f = Nx_Ny * sizeof(float);
float *dev_df;
int size_df = Nx_Ny * sizeof(float);
/* float *dev_alpha; */
/* int size_alpha = Nx_Ny * sizeof(float); */
float *dev_f_minus_fo;
int size_f_minus_fo = Nx_Ny * sizeof(float);
// Allocation
cudaMalloc((void**) &dev_rr1, size_rr1);
cudaMalloc((void**) &dev_rr2, size_rr2);
cudaMalloc((void**) &dev_rr3, size_rr3);
cudaMalloc((void**) &dev_rr4, size_rr4);
cudaMalloc((void**) &dev_z, size_z);
cudaMalloc((void**) &dev_Lu, size_Lu);
cudaMalloc((void**) &dev_f, size_f);
cudaMalloc((void**) &dev_df, size_df);
/* cudaMalloc((void**) &dev_alpha, size_alpha); */
cudaMalloc((void**) &dev_f_minus_fo, size_f_minus_fo);
cudaMemset(dev_rr1, 0.0, size_rr1);
cudaMemset(dev_rr2, 0.0, size_rr2);
cudaMemset(dev_rr3, 0.0, size_rr3);
cudaMemset(dev_rr4, 0.0, size_rr4);
cudaMemset(dev_f, 0.0, size_f);
cudaMemset(dev_Lu, 0.0, size_Lu);
float *f = new float[Nx_Ny];
float *f_minus_fo = new float[Nx_Ny];
float epsilon = 0.f;
for(int iter = 0; iter < I_MAX; iter++)
{
printf("\nIter: %d\n", iter);
cudaMemset(dev_u, 0.0, size_u);
for(p = 0; p < 640; p += SENSOR_GROUP_SIZE)
{
int jp1 = jj[p];
int jp2 = jj[p + SENSOR_GROUP_SIZE - 1];
int ip1 = ii[p];
int ip2 = ii[p + SENSOR_GROUP_SIZE - 1];
if (jp2 < jp1)
{
int jp = jp1;
jp1 = jp2;
jp2 = jp;
}
if (ip2 < ip1)
{
int ip = ip1;
ip1 = ip2;
ip2 = ip;
}
// Boundary
for(k = 1; k < NT - 1; k++)
{
propagation<<<Grid_Size, Block_Size>>>(jp1, jp2, ip1, ip2, dev_f, dev_u, k);
}
// Four corners
propagation_at_corners<<<1, NT>>>(dev_u);
difference_signal<<<NT - 2, 159>>>(dev_u, dev_g1, dev_g2, dev_g3, dev_g4, dev_rr1, dev_rr2, dev_rr3, dev_rr4, p);
cudaMemset(dev_z, 0.0, size_z);
for(k = NT - 2; k > 0; k--)
{
backpropagation1<<<Grid_Size, Block_Size>>>(dev_z, dev_f, k);
backpropagation2<<<1, NX>>>(dev_z, dev_rr1, dev_rr2, dev_rr3, dev_rr4, k);
}
laplace1<<<dim3(25, 25, 50), dim3(8, 8, 8)>>>(dev_u, dev_Lu);
laplace2<<<1, 1>>>(dev_u, dev_Lu);
init_differential<<<Grid_Size, Block_Size>>>(dev_df, dev_z, dev_Lu, dev_f);
for(k = 2; k < NT; k++)
{
update_differential<<<Grid_Size, Block_Size>>>(dev_df, dev_z, dev_Lu, dev_f, k);
}
/* update_field<<<Grid_Size, Block_Size>>>(dev_alpha, dev_f, dev_df, dev_f_minus_fo, dev_fo); */
update_field<<<Grid_Size, Block_Size>>>(dev_f, dev_df, dev_f_minus_fo, dev_fo);
}
cudaMemcpy(f_minus_fo, dev_f_minus_fo, size_f_minus_fo, cudaMemcpyDeviceToHost);
epsilon = norm(f_minus_fo, Nx_Ny) / norm(fo, Nx_Ny) * 100.f;
printf("epsilon = %f\n", epsilon);
if (epsilon < 20.f)
{
break;
}
}
cudaMemcpy(f, dev_f, size_f, cudaMemcpyDeviceToHost);
IO_Files(x, y, fo, f);
// Free Variables
cudaFree(dev_x);
cudaFree(dev_y);
cudaFree(dev_fo);
cudaFree(dev_u);
cudaFree(dev_g1);
cudaFree(dev_g2);
cudaFree(dev_g3);
cudaFree(dev_g4);
cudaFree(dev_rr1);
cudaFree(dev_rr2);
cudaFree(dev_rr3);
cudaFree(dev_rr4);
cudaFree(dev_z);
cudaFree(dev_Lu);
cudaFree(dev_f);
cudaFree(dev_df);
/* cudaFree(dev_alpha); */
cudaFree(dev_f_minus_fo);
///////////////////////////
/* float *image = new float[Nx_Ny]; */
/* cudaMemcpy(image, dev_f, size_f, cudaMemcpyDeviceToHost); */
/* ofstream file; */
/* for(int yj = 0; yj < NY; yj++) */
/* { */
/* for(int xi = 0; xi < NX; xi++) */
/* { */
/* file << image[xi + NX * yj]; */
/* file << "\t"; */
/* } */
/* file << "\n"; */
/* } */
/* file.close(); */
/////////////////////////
}
__global__ void field_setup(const float *x, const float *y, float *fo)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i < NX) && (j < NY))
{
int offset = i + NX * j;
float value = 0.f;
/* if(((sqrtf(powf(x[i] - 0.015f, 2.f) + powf(y[j] + 0.000f, 2.f))) <= 0.005f) || ((sqrtf(powf(x[i] + 0.015f, 2.f) + powf(y[j] + 0.000f, 2.f))) <= 0.005f)) */
/* { */
/* value = 0.06f; */
/* } */
/* else */
/* { */
/* if(sqrtf(x[i] * x[i] + y[j] * y[j]) <= 0.03f) */
/* { */
/* value = 0.02f; */
/* } */
/* else */
/* { */
/* value = 0.f; */
/* } */
/* } */
float rc = 0.015f;
float rp = 0.005f;
/* float lim = 0.020f; */
float sc = 0.03f;
float sp = 0.05f;
/* float sb = 0.02f; */
if (powf(x[i], 2) + powf(y[j], 2) <= powf(rc, 2))
{
value = sc;
}
if (powf(x[i] - rc * cos(-30 * (3.14159265f / 180)), 2) + powf(y[j] - rc * sin(30 * (3.14159265f / 180)), 2) <= powf(rp, 2))
{
value = sp;
}
if (powf(x[i] + rc * cos(-30 * (3.14159265f / 180)), 2) + powf(y[j] - rc * sin(30 * (3.14159265f / 180)), 2) <= powf(rp, 2))
{
value = sp;
}
if (powf(x[i], 2) + powf(y[j] + rc, 2) <= powf(rp, 2))
{
value = sp;
}
fo[offset] = value;
/*int offset = i + NX * j;
float value = 0.f;
if (((sqrtf(powf(x[i] - 0.05f, 2.f) + powf(y[j] + 0.000f, 2.f))) <= 0.005f) || ((sqrtf(powf(x[i] + 0.05f, 2.f) + powf(y[j] + 0.000f, 2.f))) <= 0.005f))
{
value = 0.06f;
}
else
{
if (sqrtf(x[i] * x[i] + y[j] * y[j]) <= 0.03f)
{
value = 0.02f;
}
else
{
if ((x[i] >= -0.05f) && (x[i] <= 0.05f) && (y[j] >= -0.06f) && (y[j] <= -0.045f))
{
value = 0.04f;
}
else
{
if ((x[i] >= -0.03f) && (x[i] <= 0.00f) && (y[j] <= 0.065f) && (y[j] >= (0.04f - 0.5f * x[i])))
{
value = 0.03f;
}
else
{
if ((x[i] >= 0.00f) && (x[i] <= 0.03f) && (y[j] <= 0.065f) && (y[j] >= (0.04f + 0.5f * x[i])))
{
value = 0.03f;
}
else
{
value = 0.f;
}
}
}
}
}
fo[offset] = value;
v[offset] = 1500.f * sqrtf(1.f + value);
r[offset] = v[offset] * dt / hx;
r2[offset] = powf(r[offset], 2.f);
s[offset] = 2.f - 4.f * r2[offset];
*/
}
}
__global__ void propagation(int jp1, int jp2, int ip1, int ip2, const float *f, float *u, int k)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
/* int NX * NY = NX * NY; */
/* int NX * NY * k = NX * NY * k; */
if((i < NX) && (j < NY))
{
/* int i + NX * j = i + NX * j; */
float v = 1500.f * sqrtf(1.f + f[i + NX * j]);
float r = v * dt / hx;
float s = 2.f - 4.f * r * r;
// at sensor
if (((j + 1) >= jp1) && ((j + 1) <= jp2) && ((i + 1) >= ip1) && ((i + 1) <= ip2) && ((k + 1) <= 24))
{
float t = k * dt - tt;
u[i + NX * j + NX * NY * (k + 1)] =
v * v * dt * dt *
cosf(omegac * t) *
expf(-(t * t) / (2.f * tao * tao)) +
r * r *
(u[(i + 1) + NX * j + NX * NY * k] +
u[(i - 1) + NX * j + NX * NY * k] +
u[i + NX * (j - 1) + NX * NY * k] +
u[i + NX * (j + 1) + NX * NY * k]) +
s * u[i + NX * j + NX * NY * k] -
u[i + NX * j + NX * NY * (k - 1)];
}
// not at sensor or boundary
else if (i != 0 && j != 0 && i != (NX - 1) && j != (NY - 1))
{
u[i + NX * j + NX * NY * (k + 1)] =
r * r *
(u[(i + 1) + NX * j + NX * NY * k] +
u[(i - 1) + NX * j + NX * NY * k] +
u[i + NX * (j - 1) + NX * NY * k] +
u[i + NX * (j + 1) + NX * NY * k]) +
s * u[i + NX * j + NX * NY * k] -
u[i + NX * j + NX * NY * (k - 1)];
}
// left boundary
else if ((i == 0) && (j > 0) && (j < (NY - 1)))
{
u[i + NX * j + NX * NY * (k + 1)] =
(2.f - r * (r + 2.f)) * u[i + NX * j + NX * NY * k] +
2.f * r * (1.f + r) * u[(i + NX * j + 1) + NX * NY * k] -
r * r * u[(i + NX * j + 2) + NX * NY * k] +
(2.f * r - 1.f) * u[i + NX * j + NX * NY * (k - 1)] -
2.f * r * u[(i + NX * j + 1) + NX * NY * (k - 1)];
}
// right boundary
else if ((i == NX - 1) && (j > 0) && (j < (NY - 1)))
{
u[i + NX * j + NX * NY * (k + 1)] =
(2.f - 2.f * r - r * r) * u[i + NX * j + NX * NY * k] +
2.f * r * (1.f + r) * u[(i + NX * j - 1) + NX * NY * k] -
r * r * u[(i + NX * j - 2) + NX * NY * k] +
(2.f * r - 1.f) * u[i + NX * j + NX * NY * (k - 1)] -
2.f * r * u[(i + NX * j - 1) + NX * NY * (k - 1)];
}
// top boundary
else if ((j == 0) && (i > 0) && (i < (NX - 1)))
{
u[i + NX * j + NX * NY * (k + 1)] =
(2.f - 2.f * r - r * r) * u[i + NX * j + NX * NY * k] +
2.f * r * (1.f + r) * u[(i + (j + 1) * NX) + NX * NY * k] -
r * r * u[(i + (j + 2) * NX) + NX * NY * k] +
(2.f * r - 1.f) * u[i + NX * j + NX * NY * (k - 1)] -
2.f * r * u[(i + (j + 1) * NX) + NX * NY * (k - 1)];
}
// bottom boundary
else if ((j == NY - 1) && (i > 0) && (i < (NX - 1)))
{
u[i + NX * j + NX * NY * (k + 1)] =
(2.f - 2.f * r - r * r) * u[i + NX * j + NX * NY * k] +
2.f * r * (1.f + r) * u[(i + (j - 1) * NX) + NX * NY * k] -
r * r * u[(i + (j - 2) * NX) + NX * NY * k] +
(2.f * r - 1.f) * u[i + NX * j + NX * NY * (k - 1)] -
2.f * r * u[(i + (j - 1) * NX) + NX * NY * (k - 1)];
}
}
}
__global__ void propagation_at_corners(float *u)
{
int k = threadIdx.x;
int Nx_Ny = NX * NY;
int Nx_Ny_k = Nx_Ny * k;
u[Nx_Ny_k] =
1.f / 2.f * (u[NX + k] + u[1 + k]);
u[(NX - 1) + Nx_Ny_k] =
1.f / 2.f * (u[(NX - 2) + Nx_Ny_k] + u[(NX - 1) + NX + Nx_Ny_k]);
u[(NY - 1) * NX + Nx_Ny_k] =
1.f / 2.f * (u[(NY - 2) * NX + Nx_Ny_k] + u[1 +(NY - 1) * NX + Nx_Ny_k]);
u[(NX - 1) + (NY - 1) * NX + Nx_Ny_k] =
1.f / 2.f * (u[(NX - 2) + (NY - 1) * NX + Nx_Ny_k] + u[(NX - 1) + (NY - 2) * NX + Nx_Ny_k]);
}
__global__ void initial_signal(const float *u, float *g1, float *g2, float *g3, float *g4, int p)
{
int i = threadIdx.x + 21;
int k = blockIdx.x + 2;
int Nx_Ny_k = NX * NY * k;
int i_k_Nx_Nx_Nt_p = i + NX * k + NX * NT * p;
g1[i_k_Nx_Nx_Nt_p] = u[i + NX * 180 + Nx_Ny_k];
g3[i_k_Nx_Nx_Nt_p] = u[i + NX * 20 + Nx_Ny_k];
g2[i_k_Nx_Nx_Nt_p] = u[180 + NX * i + Nx_Ny_k];
g4[i_k_Nx_Nx_Nt_p] = u[20 + NX * i + Nx_Ny_k];
}
__global__ void difference_signal(const float *u, const float *g1, const float *g2, const float *g3, const float *g4, float *rr1, float *rr2, float *rr3, float *rr4, int p)
{
int i = threadIdx.x + 21;
int k = blockIdx.x + 2;
int Nx_Ny_k = NX * NY * k;
int i_k_Nx_Nx_Nt_p = i + k * NX + NX * NT * p;
int i_Nx_k = i + NX * k;
rr1[i_Nx_k] = g1[i_k_Nx_Nx_Nt_p] - u[i + NX * 180 + Nx_Ny_k];
rr3[i_Nx_k] = g3[i_k_Nx_Nx_Nt_p] - u[i + NX * 20 + Nx_Ny_k];
rr2[i_Nx_k] = g2[i_k_Nx_Nx_Nt_p] - u[180 + NX * i + Nx_Ny_k];
rr4[i_Nx_k] = g4[i_k_Nx_Nx_Nt_p] - u[20 + NX * i + Nx_Ny_k];
}
__global__ void backpropagation1(float *z, const float *f, int k)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i >= 1) && (i < (NX - 1)) && (j >= 1) && (j < (NY - 1)))
{
z[i + NX * j + NX * NY * k] =
1500.f * 1500.f * (dt * dt) *
((1.f + f[i + NX * (j - 1)]) * z[i + NX * (j - 1) + NX * NY * (k + 1)] +
(1.f + f[i + NX * (j + 1)]) * z[i + NX * (j + 1) + NX * NY * (k + 1)] +
(1.f + f[(i - 1) + NX * j]) * z[(i - 1) + NX * j + NX * NY * (k + 1)] +
(1.f + f[(i + 1) + NX * j]) * z[(i + 1) + NX * j + NX * NY * (k + 1)] -
4.f * (1.f + f[i + NX * j]) *
z[i + NX * j + NX * NY * (k + 1)]) / (h * h) +
2.f * z[i + NX * j + NX * NY * (k + 1)] -
z[i + NX * j + NX * NY * (k + 2)];
}
}
__global__ void backpropagation2(float *z, const float *rr1, const float *rr2, const float *rr3, const float *rr4, int k)
{
int i = threadIdx.x;
if((i >= 21) && (i < 180))
{
z[i + NX * 180 + NX * NY * k] =
z[i + NX * 179 + NX * NY * k] +
rr1[i + NX * k] * h * 1000.f;
z[i + NX * 20 + NX * NY * k] =
z[i + NX * 21 + NX * NY * k] +
rr3[i + NX * k] * h * 1000.f;
z[180 + NX * i + NX * NY * k] =
z[179 + NX * i + NX * NY * k] +
rr2[i + NX * k] * h * 1000.f;
z[20 + NX * i + NX * NY * k] =
z[21 + NX * i + NX * NY * k]
+ rr4[i + NX * k] * h * 1000.f;
}
if((i >= 1) && (i < (NX - 1)))
{
z[i + NX * NY * k] =
z[i + NX + NX * NY * k];
z[i + NX * (NY - 1) + NX * NY * k] =
z[i + NX * (NY - 2) + NX * NY * k];
z[NX * i + NX * NY * k] =
z[1 + NX * i + NX * NY * k];
z[(NX - 1) + NX * i + NX * NY * k] =
z[(NX - 2) + NX * i + NX * NY * k];
}
else if(i == 0)
{
z[NX * NY * k] =
(z[1 + NX * NY * k] +
z[NX + NX * NY * k]) / 2.f;
z[(NX - 1) + NX * NY * k] =
(z[(NX - 2) + NX * NY * k] +
z[(NX - 1) + NX + NX * NY * k]) / 2.f;
z[NX * (NY - 1) + NX * NY * k] =
(z[1 + NX * (NY - 1) + NX * NY * k] +
z[NX * (NY - 2) + NX * NY * k]) / 2.f;
z[(NX - 1) + NX * (NY - 1) + NX * NY * k] =
(z[(NX - 2) + NX * (NY - 1) + NX * NY * k] +
z[(NX - 1) + NX * (NY - 2) + NX * NY * k]) / 2.f;
}
}
__global__ void laplace1(const float *u, float *Lu)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z + 1;
if(i < (NX - 1) && j < (NY - 1) && k < NT)
{
if (i >= 1) {
if (j >= 1) {
Lu[i + NX * j + NX * NY * k] =
(u[i + NX * (j - 1) + NX * NY * k] +
u[i + NX * (j + 1) + NX * NY * k] +
u[(i - 1) + NX * j + NX * NY * k] +
u[(i + 1) + NX * j + NX * NY * k] -
4.f * u[i + NX * j + NX * NY * k]) / (h * h);
}
else {
Lu[i + NX * NY * k] =
(u[i + NX * NY * k] +
u[i + NX + NX * NY * k] +
u[(i - 1) + NX * NY * k] +
u[(i + 1) + NX * NY * k] -
4.f * u[i + NX * NY * k]) / (h * h);
Lu[i + NX * (NY - 1) + NX * NY * k] =
(u[i + NX * (NY - 1) + NX * NY * k] +
u[i + NX * (NY - 2) + NX * NY * k] +
u[(i - 1) + NX * (NY - 1) + NX * NY * k] +
u[(i + 1) + NX * (NY - 1) + NX * NY * k] -
4.f * u[i + NX * (NY - 1) + NX * NY * k]) / (h * h);
Lu[NX * i + NX * NY * k] =
(u[NX * i + NX * NY * k] +
u[1 + NX * i + NX * NY * k] +
u[NX * (i - 1) + NX * NY * k] +
u[NX * (i + 1) + NX * NY * k] -
4.f * u[NX * i + NX * NY * k]) / (h * h);
Lu[(NX - 1) + NX * i + NX * NY * k] =
(u[(NX - 1) + NX * i + NX * NY * k] +
u[(NX - 2) + NX * i + NX * NY * k] +
u[(NX - 1) + NX * (i - 1) + NX * NY * k] +
u[(NX - 1) + NX * (i + 1) + NX * NY * k] -
4.f * u[(NX - 1) + NX * i + NX * NY * k]) / (h * h);
}
}
}
}
__global__ void laplace2(const float *u, float *Lu)
{
# pragma unroll
for (int k = 1; k < NT; ++k) {
Lu[NX * NY * k] =
(Lu[1 + NX * NY * k] +
Lu[NX + NX * NY * k]) / 2.f;
Lu[(NX - 1) + NX * NY * k] =
(Lu[(NX - 2) + NX * NY * k] +
Lu[(NX - 1) + NX + NX * NY * k]) / 2.f;
Lu[NX * (NY - 1) + NX * NY * k] =
(Lu[1 + NX * (NY - 1) + NX * NY * k] +
Lu[NX * (NY - 2) + NX * NY * k]) / 2.f;
Lu[(NX - 1) + NX * (NY - 1) + NX * NY * k] =
(Lu[(NX - 2) + NX * (NY - 1) + NX * NY * k] +
Lu[(NX - 1) + NX * (NY - 2) + NX * NY * k]) / 2.f;
}
}
__global__ void init_differential(float *df, const float *z, const float *Lu, const float *f)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int Nx_Ny = NX * NY;
if((i < NX) && (j < NY))
{
int offset = i + NX * j;
df[offset] = z[offset + Nx_Ny] * Lu[offset + Nx_Ny] / (1.f + f[offset]);
}
}
__global__ void update_differential(float *df, const float *z, const float *Lu, const float *f, int k)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i < NX) && (j < NY)) {
df[i + NX * j] += z[i + NX * j + NX * NY * k] * Lu[i + NX * j + NX * NY * k] / (1.f + f[i + NX * j]);
}
}
/* __global__ void update_field(float *alpha, float *f, const float *df, float *f_minus_fo, const float *fo) */
__global__ void update_field(float *f, const float *df, float *f_minus_fo, const float *fo)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i < NX) && (j < NY))
{
int offset = i + NX * j;
bool flag = (i >= 21) && (i < 180) && (j >= 21) && (j < 180);
float alpha = flag ? 1.f : 0.f;
f[offset] += 20000.f * alpha * df[offset];
f_minus_fo[offset] = f[offset] - fo[offset];
}
}
__global__ void reset(const float *f, float *v, float *r, float *r2, float *s)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i < NX) && (j < NY))
{
int offset = i + NX * j;
v[offset] = 1500.f * sqrtf(1.f + f[offset]);
r[offset] = v[offset] * dt / hx;
r2[offset] = r[offset] * r[offset];
s[offset] = 2.f - 4.f * r2[offset];
}
}
void IO_Files(float *x, float *y, float *fo, float *f)
{
int i = 0, j = 0;
/* int k = 0; */
// I/O Files
ofstream x_file, y_file;
ofstream fo_file;
ofstream f_file;
x_file.open("dev_x.txt");
y_file.open("dev_y.txt");
fo_file.open("dev_f0.txt");
f_file.open("dev_f.txt");
for(i = 0; i < NX; i++)
{
x_file << x[i];
x_file << "\n";
}
for(j = 0; j < NX; j++)
{
y_file << y[j];
y_file << "\n";
}
for(j = 0; j < NY; j++)
{
for(i = 0; i < NX; i++)
{
fo_file << fo[i + NX * j];
fo_file << " ";
}
fo_file << "\n";
}
for(j = 0; j < NY; j++)
{
for(i = 0; i < NX; i++)
{
f_file << f[i + NX * j];
f_file << " ";
}
f_file << "\n";
}
x_file.close();
y_file.close();
fo_file.close();
f_file.close();
}
float norm(float *A, int length)
{
float sum = 0;
for(int i = 0; i < length; i++) {
sum += A[i] * A[i];
}
return sqrtf(sum);
}
|
91d25e6e49534cde6c3034fedbe234704204c55f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include "maxpool_layer.h"
#include "hip/hip_runtime.h"
}
__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *input, float *output, int *indexes)
{
int h = (in_h + pad - size)/stride + 1;
int w = (in_w + pad - size)/stride + 1;
int c = in_c;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -pad/2;
int h_offset = -pad/2;
int out_index = j + w*(i + h*(k + c*b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for(l = 0; l < size; ++l){
for(m = 0; m < size; ++m){
int cur_h = h_offset + i*stride + l;
int cur_w = w_offset + j*stride + m;
int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c));
int valid = (cur_h >= 0 && cur_h < in_h &&
cur_w >= 0 && cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
output[out_index] = max;
indexes[out_index] = max_i;
}
__global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *delta, float *prev_delta, int *indexes)
{
int h = (in_h + pad - size)/stride + 1;
int w = (in_w + pad - size)/stride + 1;
int c = in_c;
int area = (size-1)/stride;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int index = id;
int j = id % in_w;
id /= in_w;
int i = id % in_h;
id /= in_h;
int k = id % in_c;
id /= in_c;
int b = id;
int w_offset = -pad/2;
int h_offset = -pad/2;
float d = 0;
int l, m;
for(l = -area; l < area+1; ++l){
for(m = -area; m < area+1; ++m){
int out_w = (j-w_offset)/stride + m;
int out_h = (i-h_offset)/stride + l;
int out_index = out_w + w*(out_h + h*(k + c*b));
int valid = (out_w >= 0 && out_w < w &&
out_h >= 0 && out_h < h);
d += (valid && indexes[out_index] == index) ? delta[out_index] : 0;
}
}
prev_delta[index] += d;
}
extern "C" void forward_maxpool_layer_gpu(maxpool_layer layer, network net)
{
int h = layer.out_h;
int w = layer.out_w;
int c = layer.c;
size_t n = h*w*c*layer.batch;
hipLaunchKernelGGL(( forward_maxpool_layer_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, net.input_gpu, layer.output_gpu, layer.indexes_gpu);
check_error(hipPeekAtLastError());
}
extern "C" void backward_maxpool_layer_gpu(maxpool_layer layer, network net)
{
size_t epoch = get_current_batch(&net);
size_t n = layer.h*layer.w*layer.c*layer.batch;
if (net.save_delta) save_layer_delta_gpu(&layer, "maxpool_next", epoch);
hipLaunchKernelGGL(( backward_maxpool_layer_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, layer.delta_gpu, net.delta_gpu, layer.indexes_gpu);
check_error(hipPeekAtLastError());
if (net.save_delta) save_layer_delta_gpu(&layer, "maxpool", epoch);
}
| 91d25e6e49534cde6c3034fedbe234704204c55f.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include "maxpool_layer.h"
#include "cuda.h"
}
__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *input, float *output, int *indexes)
{
int h = (in_h + pad - size)/stride + 1;
int w = (in_w + pad - size)/stride + 1;
int c = in_c;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -pad/2;
int h_offset = -pad/2;
int out_index = j + w*(i + h*(k + c*b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for(l = 0; l < size; ++l){
for(m = 0; m < size; ++m){
int cur_h = h_offset + i*stride + l;
int cur_w = w_offset + j*stride + m;
int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c));
int valid = (cur_h >= 0 && cur_h < in_h &&
cur_w >= 0 && cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
output[out_index] = max;
indexes[out_index] = max_i;
}
__global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *delta, float *prev_delta, int *indexes)
{
int h = (in_h + pad - size)/stride + 1;
int w = (in_w + pad - size)/stride + 1;
int c = in_c;
int area = (size-1)/stride;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int index = id;
int j = id % in_w;
id /= in_w;
int i = id % in_h;
id /= in_h;
int k = id % in_c;
id /= in_c;
int b = id;
int w_offset = -pad/2;
int h_offset = -pad/2;
float d = 0;
int l, m;
for(l = -area; l < area+1; ++l){
for(m = -area; m < area+1; ++m){
int out_w = (j-w_offset)/stride + m;
int out_h = (i-h_offset)/stride + l;
int out_index = out_w + w*(out_h + h*(k + c*b));
int valid = (out_w >= 0 && out_w < w &&
out_h >= 0 && out_h < h);
d += (valid && indexes[out_index] == index) ? delta[out_index] : 0;
}
}
prev_delta[index] += d;
}
extern "C" void forward_maxpool_layer_gpu(maxpool_layer layer, network net)
{
int h = layer.out_h;
int w = layer.out_w;
int c = layer.c;
size_t n = h*w*c*layer.batch;
forward_maxpool_layer_kernel<<<cuda_gridsize(n), BLOCK>>>(n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, net.input_gpu, layer.output_gpu, layer.indexes_gpu);
check_error(cudaPeekAtLastError());
}
extern "C" void backward_maxpool_layer_gpu(maxpool_layer layer, network net)
{
size_t epoch = get_current_batch(&net);
size_t n = layer.h*layer.w*layer.c*layer.batch;
if (net.save_delta) save_layer_delta_gpu(&layer, "maxpool_next", epoch);
backward_maxpool_layer_kernel<<<cuda_gridsize(n), BLOCK>>>(n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, layer.delta_gpu, net.delta_gpu, layer.indexes_gpu);
check_error(cudaPeekAtLastError());
if (net.save_delta) save_layer_delta_gpu(&layer, "maxpool", epoch);
}
|
0f8fd1c7230c399b99db39f047f041d8461f3853.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#pragma once
#include "run_des.cuh"
#include "Header.cuh"
int bits = 15; // half // 37 took 1 min
__global__ void crack_des(uint64_t* final_key, int limit, uint64_t message, uint64_t encoded, bool* done, int k)
{
if (*done == 1)
return;
uint64_t mid = 1ULL * threadIdx.x + (blockIdx.x) * blockDim.x + k * 1ULL * limit;
uint64_t encrypted_message;
uint64_t now = mid | (*final_key);
run_des(now, message, &encrypted_message);
// compare the new encoded message with the original one
if (encoded == encrypted_message) {
*final_key = now;
*done = 1;
}
}
hipError_t kernel(uint64_t* final_key, uint64_t message, uint64_t encoded)
{
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
}
uint64_t* dev_final_key;
bool* dev_done;
dim3 block(4096 * 1, 1, 1); // 40 bit
dim3 thread(512, 1, 1);
uint64_t nom = ((1ULL << (bits)));
uint64_t dom = (1ULL * block.x * thread.x);
int limit = nom / dom + 1;
bool done = 0;
cudaStatus = hipMalloc((void**)&dev_done, sizeof(bool));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
}
cudaStatus = hipMalloc((void**)&dev_final_key, sizeof(uint64_t));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
}
cudaStatus = hipMemcpy(dev_done, &done, sizeof(bool), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed! ");
}
cudaStatus = hipMemcpy(dev_final_key, final_key, sizeof(uint64_t), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed! ");
}
hipEvent_t start, end; float time;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start, 0);
printf("\Loops = %d\n", limit);
for (int i = 0; i < limit; ++i) {
hipLaunchKernelGGL(( crack_des) , dim3(block), dim3(thread) , 0, 0, dev_final_key, block.x * thread.x, message, encoded, dev_done, i);
}
hipDeviceSynchronize();
hipEventRecord(end, 0);
hipEventSynchronize(end);
hipEventElapsedTime(&time, start, end);
hipEventDestroy(start);
hipEventDestroy(end);
printf("DEVICE FINISHED, time = %f ms\n", time);
cudaStatus = hipMemcpy(final_key, dev_final_key, sizeof(uint64_t), hipMemcpyDeviceToHost);
cudaStatus = hipMemcpy(&done, dev_done, sizeof(bool), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed! %d", cudaStatus);
}
hipFree(dev_final_key);
if (done == 1) {
printf("KEY IS FOUND\n");
}
return cudaStatus;
}
// brute force on CPU
void crack_des_host(uint64_t message, uint64_t final_key, uint64_t encoded)
{
clock_t cpu_start, cpu_end;
float cpu_time = 0;
cpu_start = clock();
bool found = 0;
uint64_t want;
uint64_t all = (1ULL << (bits));
for (uint64_t test = 0; test < all; test++) {
uint64_t now = test | final_key;
uint64_t encrypted_message;
h_run_des(now, message, &encrypted_message);
if (encoded == encrypted_message) {
found = 1;
want = now;
}
}
cpu_end = clock();
cpu_time = 1000.0 * (cpu_end - cpu_start) / (1.0 * CLOCKS_PER_SEC);
if (found) {
printf("CPU FOUND IT :\n");
print_in_hex(want);
uint64_t tt, yy;
h_run_des(want, message, &tt);
h_run_des(main_key, message, &yy);
if (tt == yy)
printf("\nHOST THEY ARE EQUAL\n");
}
printf("\nhost ended!, time = %f ms\n", cpu_time);
}
int main()
{
generate_key(&main_key);
//main_key = string_to_int("0E329232EA6D0D73");
//main_key = string_to_int("FFFFFFFFFFFFFFFF");
printf("Keys is: ");
print_in_hex(main_key);
uint64_t message;
uint64_t encrypted_message;
padding(&message, plaintext);
h_run_des(main_key, message, &encrypted_message);
uint64_t final_key = get_partial_key(main_key, 64 - bits);
uint64_t final_key_host = final_key;
hipError_t cudaStatus = kernel(&final_key, message, encrypted_message);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "NOT SUCCEDD!");
}
// <test if the key is correct>
uint64_t encrypted_test;
h_run_des(final_key, message, &encrypted_test);
if (encrypted_test == encrypted_message)
printf("\nTHEY ARE EQUIVALENT\n");
printf("KEY FOUND IS: ");
print_in_hex(final_key);
// </test if the key is correct>
// run on cpu
crack_des_host(message, final_key_host, encrypted_message);
pause_console();
return 0;
}
| 0f8fd1c7230c399b99db39f047f041d8461f3853.cu | #pragma once
#include "run_des.cuh"
#include "Header.cuh"
int bits = 15; // half // 37 took 1 min
__global__ void crack_des(uint64_t* final_key, int limit, uint64_t message, uint64_t encoded, bool* done, int k)
{
if (*done == 1)
return;
uint64_t mid = 1ULL * threadIdx.x + (blockIdx.x) * blockDim.x + k * 1ULL * limit;
uint64_t encrypted_message;
uint64_t now = mid | (*final_key);
run_des(now, message, &encrypted_message);
// compare the new encoded message with the original one
if (encoded == encrypted_message) {
*final_key = now;
*done = 1;
}
}
cudaError_t kernel(uint64_t* final_key, uint64_t message, uint64_t encoded)
{
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
}
uint64_t* dev_final_key;
bool* dev_done;
dim3 block(4096 * 1, 1, 1); // 40 bit
dim3 thread(512, 1, 1);
uint64_t nom = ((1ULL << (bits)));
uint64_t dom = (1ULL * block.x * thread.x);
int limit = nom / dom + 1;
bool done = 0;
cudaStatus = cudaMalloc((void**)&dev_done, sizeof(bool));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
cudaStatus = cudaMalloc((void**)&dev_final_key, sizeof(uint64_t));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
cudaStatus = cudaMemcpy(dev_done, &done, sizeof(bool), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed! ");
}
cudaStatus = cudaMemcpy(dev_final_key, final_key, sizeof(uint64_t), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed! ");
}
cudaEvent_t start, end; float time;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
printf("\Loops = %d\n", limit);
for (int i = 0; i < limit; ++i) {
crack_des <<< block, thread >>> (dev_final_key, block.x * thread.x, message, encoded, dev_done, i);
}
cudaDeviceSynchronize();
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
printf("DEVICE FINISHED, time = %f ms\n", time);
cudaStatus = cudaMemcpy(final_key, dev_final_key, sizeof(uint64_t), cudaMemcpyDeviceToHost);
cudaStatus = cudaMemcpy(&done, dev_done, sizeof(bool), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed! %d", cudaStatus);
}
cudaFree(dev_final_key);
if (done == 1) {
printf("KEY IS FOUND\n");
}
return cudaStatus;
}
// brute force on CPU
void crack_des_host(uint64_t message, uint64_t final_key, uint64_t encoded)
{
clock_t cpu_start, cpu_end;
float cpu_time = 0;
cpu_start = clock();
bool found = 0;
uint64_t want;
uint64_t all = (1ULL << (bits));
for (uint64_t test = 0; test < all; test++) {
uint64_t now = test | final_key;
uint64_t encrypted_message;
h_run_des(now, message, &encrypted_message);
if (encoded == encrypted_message) {
found = 1;
want = now;
}
}
cpu_end = clock();
cpu_time = 1000.0 * (cpu_end - cpu_start) / (1.0 * CLOCKS_PER_SEC);
if (found) {
printf("CPU FOUND IT :\n");
print_in_hex(want);
uint64_t tt, yy;
h_run_des(want, message, &tt);
h_run_des(main_key, message, &yy);
if (tt == yy)
printf("\nHOST THEY ARE EQUAL\n");
}
printf("\nhost ended!, time = %f ms\n", cpu_time);
}
int main()
{
generate_key(&main_key);
//main_key = string_to_int("0E329232EA6D0D73");
//main_key = string_to_int("FFFFFFFFFFFFFFFF");
printf("Keys is: ");
print_in_hex(main_key);
uint64_t message;
uint64_t encrypted_message;
padding(&message, plaintext);
h_run_des(main_key, message, &encrypted_message);
uint64_t final_key = get_partial_key(main_key, 64 - bits);
uint64_t final_key_host = final_key;
cudaError_t cudaStatus = kernel(&final_key, message, encrypted_message);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "NOT SUCCEDD!");
}
// <test if the key is correct>
uint64_t encrypted_test;
h_run_des(final_key, message, &encrypted_test);
if (encrypted_test == encrypted_message)
printf("\nTHEY ARE EQUIVALENT\n");
printf("KEY FOUND IS: ");
print_in_hex(final_key);
// </test if the key is correct>
// run on cpu
crack_des_host(message, final_key_host, encrypted_message);
pause_console();
return 0;
}
|
bec8f75b2aebdf5c3dc7532268b21d13a79a3c25.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
// Own
#include "hw2_kernels_cu.h"
// C
#include <stdio.h>
// Third party
#include "cs344/reuse/utils.h"
// Inner reuse
//#include "cc/splitters.h"
#define max_cuda( a, b ) ( ((a) > (b)) ? (a) : (b) )
#define min_cuda( a, b ) ( ((a) < (b)) ? (a) : (b) )
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
static void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
// Own
checkCudaErrors(hipFree(d_filter));
}
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
const int2 pos2d = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int pos1d = pos2d.y * numCols + pos2d.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (pos2d.x >= numCols || pos2d.y >= numRows)
return;
float result = 0.f;
//For every value in the filter around the pixel (c, r)
// INFO:
for (int filterRow = -filterWidth/2; filterRow <= filterWidth/2; ++filterRow) {
for (int filterCol = -filterWidth/2; filterCol <= filterWidth/2; ++filterCol) {
// Find the global image position for this filter position
// clamp to boundary of the image
int readedPixelRow =
min_cuda(max_cuda(pos2d.y + filterRow, 0), static_cast<int>(numRows - 1));
int readedPixelCol =
min_cuda(max_cuda(pos2d.x + filterCol, 0), static_cast<int>(numCols - 1));
float pixelValue = static_cast<float>(inputChannel[readedPixelRow * numCols + readedPixelCol]);
float filterValue = filter[(filterRow + filterWidth/2) * filterWidth + filterCol + filterWidth/2];
result += pixelValue * filterValue;
}
}
outputChannel[pos1d] = result;
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
//outputChannel[thread_1D_pos] = result;
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
const int2 pos2d = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int pos1d = pos2d.y * numCols + pos2d.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (pos2d.x >= numCols || pos2d.y >= numRows)
return;
redChannel[pos1d] = inputImageRGBA[pos1d].x;
greenChannel[pos1d] = inputImageRGBA[pos1d].y;
blueChannel[pos1d] = inputImageRGBA[pos1d].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 pos2d = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int pos1d = pos2d.y * numCols + pos2d.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (pos2d.x >= numCols || pos2d.y >= numRows)
return;
unsigned char red = redChannel[pos1d];
unsigned char green = greenChannel[pos1d];
unsigned char blue = blueChannel[pos1d];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[pos1d] = outputPixel;
}
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
checkCudaErrors(hipMalloc((void**)&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter,
sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA,
uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA,
const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
//layout2d_t metro = spliGetOpt2DParams(numRows, numCols, 512);
dim3 blockDim(32, 16, 1);
dim3 gridDim( (numCols - 1) / blockDim.x + 1, (numRows - 1) / blockDim.y + 1, 1);
const dim3 blockSize = blockDim;
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize = gridDim;
//TODO: Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0,
d_inputImageRGBA,
numRows,
numCols,
d_red,
d_green,
d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors()
// immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
cleanup();
}
| bec8f75b2aebdf5c3dc7532268b21d13a79a3c25.cu | // Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
// Own
#include "hw2_kernels_cu.h"
// C
#include <stdio.h>
// Third party
#include "cs344/reuse/utils.h"
// Inner reuse
//#include "cc/splitters.h"
#define max_cuda( a, b ) ( ((a) > (b)) ? (a) : (b) )
#define min_cuda( a, b ) ( ((a) < (b)) ? (a) : (b) )
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
static void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
// Own
checkCudaErrors(cudaFree(d_filter));
}
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
const int2 pos2d = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int pos1d = pos2d.y * numCols + pos2d.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (pos2d.x >= numCols || pos2d.y >= numRows)
return;
float result = 0.f;
//For every value in the filter around the pixel (c, r)
// INFO: Фильтр имеет нечетную длину
for (int filterRow = -filterWidth/2; filterRow <= filterWidth/2; ++filterRow) {
for (int filterCol = -filterWidth/2; filterCol <= filterWidth/2; ++filterCol) {
// Find the global image position for this filter position
// clamp to boundary of the image
int readedPixelRow =
min_cuda(max_cuda(pos2d.y + filterRow, 0), static_cast<int>(numRows - 1));
int readedPixelCol =
min_cuda(max_cuda(pos2d.x + filterCol, 0), static_cast<int>(numCols - 1));
float pixelValue = static_cast<float>(inputChannel[readedPixelRow * numCols + readedPixelCol]);
float filterValue = filter[(filterRow + filterWidth/2) * filterWidth + filterCol + filterWidth/2];
result += pixelValue * filterValue;
}
}
outputChannel[pos1d] = result;
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
//outputChannel[thread_1D_pos] = result;
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
const int2 pos2d = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int pos1d = pos2d.y * numCols + pos2d.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (pos2d.x >= numCols || pos2d.y >= numRows)
return;
redChannel[pos1d] = inputImageRGBA[pos1d].x;
greenChannel[pos1d] = inputImageRGBA[pos1d].y;
blueChannel[pos1d] = inputImageRGBA[pos1d].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 pos2d = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int pos1d = pos2d.y * numCols + pos2d.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (pos2d.x >= numCols || pos2d.y >= numRows)
return;
unsigned char red = redChannel[pos1d];
unsigned char green = greenChannel[pos1d];
unsigned char blue = blueChannel[pos1d];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[pos1d] = outputPixel;
}
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
checkCudaErrors(cudaMalloc((void**)&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter,
sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA,
uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA,
const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
//layout2d_t metro = spliGetOpt2DParams(numRows, numCols, 512);
dim3 blockDim(32, 16, 1);
dim3 gridDim( (numCols - 1) / blockDim.x + 1, (numRows - 1) / blockDim.y + 1, 1);
const dim3 blockSize = blockDim;
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize = gridDim;
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(
d_inputImageRGBA,
numRows,
numCols,
d_red,
d_green,
d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors()
// immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
cleanup();
}
|
449641b95662fd41df510ff64d7fa3ff532cb7c4.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright 2019 Jij Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifdef USE_ROCM
#include <hip/hip_runtime.h>
#include <system/gpu/chimera_cuda/kernel.hpp>
#include <iostream>
namespace openjij {
namespace system {
//for cuda device (kernel)
namespace chimera_cuda {
template<
typename FloatType,
std::size_t block_row,
std::size_t block_col,
std::size_t block_trot,
std::size_t unitsize
>
__global__ void metropolis(
int32_t sw,
int32_t* spin, const FloatType* rand,
const FloatType* J_out_p,
const FloatType* J_out_n,
const FloatType* J_in_04,
const FloatType* J_in_15,
const FloatType* J_in_26,
const FloatType* J_in_37,
const FloatType* h,
ChimeraInfo info,
double beta, FloatType gamma, double s
){
static_assert(block_row*block_col*block_trot*unitsize <= 1024, "max limit of the number of thread for each block is 1024.");
//switch
//(0 -> 1st chimera unit (t==0, i==0, j==0) -> (0...3))
//(1 -> 1st chimera unit (t==0, i==0, j==0) -> (4...7))
//shared memory
//spin with boundaries
__shared__ int32_t spincache[(block_row+2) * (block_col+2) * (block_trot+2) * unitsize];
__shared__ FloatType randcache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType J_out_p_cache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType J_out_n_cache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType J_in_04_cache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType J_in_15_cache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType J_in_26_cache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType J_in_37_cache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType h_cache[block_row * block_col * block_trot * unitsize];
//__shared__ FloatType dE_cache[block_row * block_col * block_trot * unitsize];
FloatType J_trot = 0;
//know who and where we are
int32_t r = idx_r(info, blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z);
int32_t c = idx_c(info, blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z);
int32_t i = idx_i(info, blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z);
int32_t t = idx_t(info, blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z);
int32_t b_r = r%block_row;
int32_t b_c = c%block_col;
int32_t b_t = t%block_trot;
int32_t global_index = glIdx(info, r, c, i, t);
int32_t local_index = glIdx(info, r, c, i);
int32_t block_index = bkIdx<block_row,block_col,block_trot>(info,b_r,b_c,i,b_t);
int32_t spin_index = bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,i,b_t);
if(info.trotters > 1){
J_trot = 0.5*log(tanh(beta*gamma*(1-s)/(FloatType)info.trotters)); //-(1/2)log(coth(beta*gamma/M))
}
J_out_p_cache[block_index] = J_out_p[local_index];
J_out_n_cache[block_index] = J_out_n[local_index];
J_in_04_cache[block_index] = J_in_04[local_index];
J_in_15_cache[block_index] = J_in_15[local_index];
J_in_26_cache[block_index] = J_in_26[local_index];
J_in_37_cache[block_index] = J_in_37[local_index];
h_cache[block_index] = h[local_index];
randcache[block_index] = rand[global_index];
spincache[spin_index] = spin[global_index];
//be sure that dE_cache is initialized with zero
//dE_cache[block_index] = 0;
//copy boundary spins to shared memory
//row
if(r%block_row == 0 && r != 0){
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,-1,b_c,i,b_t)]
= spin[glIdx(info,r-1,c,i,t)];
}
if(r%block_row == block_row-1 && r != info.rows-1){
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,block_row,b_c,i,b_t)]
= spin[glIdx(info,r+1,c,i,t)];
}
//col
if(c%block_col == 0 && c != 0){
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,-1,i,b_t)]
= spin[glIdx(info,r,c-1,i,t)];
}
if(c%block_col == block_col-1 && c != info.cols-1){
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,block_col,i,b_t)]
= spin[glIdx(info,r,c+1,i,t)];
}
//trotter slices
if(info.trotters > 1){
if(t%block_trot == 0){
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,i,-1)]
= spin[glIdx(info,r,c,i,(t!=0)?t-1:info.trotters-1)];
}
if(t%block_trot == block_trot-1){
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,i,block_trot)]
= spin[glIdx(info,r,c,i,(t!=info.trotters-1)?t+1:0)];
}
}
__syncthreads();
//do metropolis
if(((r+c+t)%2 == sw && i <= 3) || ((r+c+t)%2 != sw && 4 <= i)){
FloatType temp_dE =
-2*s*spincache[spin_index]*beta/(FloatType)(info.trotters)*(
//outside chimera unit
J_out_p_cache[block_index]
//0 to 3 -> go up 4 to 7 -> go left
*spincache[bkIdx_ext<block_row,block_col,block_trot>(info,(i<=3)?b_r-1:b_r, (4<=i)?b_c-1:b_c,i,b_t)]+
J_out_n_cache[block_index]
//0 to 3 -> go down 4 to 7 -> go right
*spincache[bkIdx_ext<block_row,block_col,block_trot>(info,(i<=3)?b_r+1:b_r, (4<=i)?b_c+1:b_c,i,b_t)]+
//inside chimera unit
J_in_04_cache[block_index]
*spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,(i<=3)?4:0,b_t)]+
J_in_15_cache[block_index]
*spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,(i<=3)?5:1,b_t)]+
J_in_26_cache[block_index]
*spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,(i<=3)?6:2,b_t)]+
J_in_37_cache[block_index]
*spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,(i<=3)?7:3,b_t)]+
//local magnetization
h_cache[block_index]);
//trotter slices
if(info.trotters > 1){
temp_dE +=
-2*spincache[spin_index]*J_trot*(
//trotter slices
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,i,b_t+1)]+
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,i,b_t-1)]
);
}
//update
spin[global_index] = ((exp(-temp_dE) > randcache[block_index])?-1:1)*spincache[spin_index];
}
__syncthreads();
// reduction for calculating dE
//uint32_t count = block_row * block_col * block_trot * unitsize; // <= 1024
//// thread index
//uint32_t ti = threadIdx.z*(blockDim.y*blockDim.x)+threadIdx.y*(blockDim.x)+threadIdx.x;
//count = count/2; //1024 -> 512
//if(ti < count){
// dE_cache[ti] += dE_cache[ti+count];
//}
//__syncthreads();
//count = count/2; //512 -> 256
//if(ti < count){
// dE_cache[ti] += dE_cache[ti+count];
//}
//__syncthreads();
//count = count/2; //256 -> 128
//if(ti < count){
// dE_cache[ti] += dE_cache[ti+count];
//}
//__syncthreads();
//count = count/2; //128 -> 64
//if(ti < count){
// dE_cache[ti] += dE_cache[ti+count];
//}
//__syncthreads();
//count = count/2; //64 -> 32
//if(ti < count){
// dE_cache[ti] += dE_cache[ti+count];
//}
//__syncthreads();
//count = count/2; //32 -> 16
//if(ti < count){
// dE_cache[ti] += dE_cache[ti+count];
//}
//__syncthreads();
//count = count/2; //16 -> 8
//if(ti < count){
// dE_cache[ti] += dE_cache[ti+count];
//}
//__syncthreads();
//count = count/2; //8 -> 4
//if(ti < count){
// dE_cache[ti] += dE_cache[ti+count];
//}
//__syncthreads();
//count = count/2; //4 -> 2
//if(ti < count){
// dE_cache[ti] += dE_cache[ti+count];
//}
//__syncthreads();
//count = count/2; //2 -> 1
//if(ti < count){
// dE_cache[ti] += dE_cache[ti+count];
//}
//__syncthreads();
//if(ti == 0){
// //add 'em
// atomicAdd(&dE[0], dE_cache[ti]);
//}
}
template<
typename FloatType,
std::size_t block_row,
std::size_t block_col,
std::size_t block_trot>
void metropolis_interface(
int32_t sw,
int32_t* spin, const FloatType* rand,
const FloatType* J_out_p,
const FloatType* J_out_n,
const FloatType* J_in_04,
const FloatType* J_in_15,
const FloatType* J_in_26,
const FloatType* J_in_37,
const FloatType* h,
const ChimeraInfo& info, const dim3& grid, const dim3& block,
double beta, FloatType gamma, double s){
hipLaunchKernelGGL(( metropolis<FloatType, block_row, block_col, block_trot, info.chimera_unitsize>), dim3(grid), dim3(block), 0, 0,
sw,
spin, rand,
J_out_p,
J_out_n,
J_in_04,
J_in_15,
J_in_26,
J_in_37,
h,
info,
beta, gamma, s
);
}
//template instantiation
#define FLOAT_ARGTYPE int32_t,int32_t*,const float*,const float*,const float*,const float*,const float*,const float*,const float*,const float*,const ChimeraInfo&,const dim3&,const dim3&,double,float,double
#define DOUBLE_ARGTYPE int32_t,int32_t*,const double*,const double*,const double*,const double*,const double*,const double*,const double*,const double*,const ChimeraInfo&,const dim3&,const dim3&,double,double,double
template void metropolis_interface<float,1,1,1>(FLOAT_ARGTYPE);
template void metropolis_interface<float,2,2,2>(FLOAT_ARGTYPE);
template void metropolis_interface<float,3,3,3>(FLOAT_ARGTYPE);
template void metropolis_interface<float,4,4,4>(FLOAT_ARGTYPE);
template void metropolis_interface<float,2,2,1>(FLOAT_ARGTYPE);
template void metropolis_interface<float,3,3,1>(FLOAT_ARGTYPE);
template void metropolis_interface<float,4,4,1>(FLOAT_ARGTYPE);
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 600
template void metropolis_interface<double,1,1,1>(DOUBLE_ARGTYPE);
template void metropolis_interface<double,2,2,2>(DOUBLE_ARGTYPE);
template void metropolis_interface<double,3,3,3>(DOUBLE_ARGTYPE);
template void metropolis_interface<double,4,4,4>(DOUBLE_ARGTYPE);
template void metropolis_interface<double,2,2,1>(DOUBLE_ARGTYPE);
template void metropolis_interface<double,3,3,1>(DOUBLE_ARGTYPE);
template void metropolis_interface<double,4,4,1>(DOUBLE_ARGTYPE);
#endif
} // namespace chimera_cuda
} // namespace system
} // namespace openjij
#endif
| 449641b95662fd41df510ff64d7fa3ff532cb7c4.cu | // Copyright 2019 Jij Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifdef USE_CUDA
#include <cuda_runtime.h>
#include <system/gpu/chimera_cuda/kernel.hpp>
#include <iostream>
namespace openjij {
namespace system {
//for cuda device (kernel)
namespace chimera_cuda {
template<
typename FloatType,
std::size_t block_row,
std::size_t block_col,
std::size_t block_trot,
std::size_t unitsize
>
__global__ void metropolis(
int32_t sw,
int32_t* spin, const FloatType* rand,
const FloatType* J_out_p,
const FloatType* J_out_n,
const FloatType* J_in_04,
const FloatType* J_in_15,
const FloatType* J_in_26,
const FloatType* J_in_37,
const FloatType* h,
ChimeraInfo info,
double beta, FloatType gamma, double s
){
static_assert(block_row*block_col*block_trot*unitsize <= 1024, "max limit of the number of thread for each block is 1024.");
//switch
//(0 -> 1st chimera unit (t==0, i==0, j==0) -> (0...3))
//(1 -> 1st chimera unit (t==0, i==0, j==0) -> (4...7))
//shared memory
//spin with boundaries
__shared__ int32_t spincache[(block_row+2) * (block_col+2) * (block_trot+2) * unitsize];
__shared__ FloatType randcache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType J_out_p_cache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType J_out_n_cache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType J_in_04_cache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType J_in_15_cache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType J_in_26_cache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType J_in_37_cache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType h_cache[block_row * block_col * block_trot * unitsize];
//__shared__ FloatType dE_cache[block_row * block_col * block_trot * unitsize];
FloatType J_trot = 0;
//know who and where we are
int32_t r = idx_r(info, blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z);
int32_t c = idx_c(info, blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z);
int32_t i = idx_i(info, blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z);
int32_t t = idx_t(info, blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z);
int32_t b_r = r%block_row;
int32_t b_c = c%block_col;
int32_t b_t = t%block_trot;
int32_t global_index = glIdx(info, r, c, i, t);
int32_t local_index = glIdx(info, r, c, i);
int32_t block_index = bkIdx<block_row,block_col,block_trot>(info,b_r,b_c,i,b_t);
int32_t spin_index = bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,i,b_t);
if(info.trotters > 1){
J_trot = 0.5*log(tanh(beta*gamma*(1-s)/(FloatType)info.trotters)); //-(1/2)log(coth(beta*gamma/M))
}
J_out_p_cache[block_index] = J_out_p[local_index];
J_out_n_cache[block_index] = J_out_n[local_index];
J_in_04_cache[block_index] = J_in_04[local_index];
J_in_15_cache[block_index] = J_in_15[local_index];
J_in_26_cache[block_index] = J_in_26[local_index];
J_in_37_cache[block_index] = J_in_37[local_index];
h_cache[block_index] = h[local_index];
randcache[block_index] = rand[global_index];
spincache[spin_index] = spin[global_index];
//be sure that dE_cache is initialized with zero
//dE_cache[block_index] = 0;
//copy boundary spins to shared memory
//row
if(r%block_row == 0 && r != 0){
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,-1,b_c,i,b_t)]
= spin[glIdx(info,r-1,c,i,t)];
}
if(r%block_row == block_row-1 && r != info.rows-1){
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,block_row,b_c,i,b_t)]
= spin[glIdx(info,r+1,c,i,t)];
}
//col
if(c%block_col == 0 && c != 0){
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,-1,i,b_t)]
= spin[glIdx(info,r,c-1,i,t)];
}
if(c%block_col == block_col-1 && c != info.cols-1){
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,block_col,i,b_t)]
= spin[glIdx(info,r,c+1,i,t)];
}
//trotter slices
if(info.trotters > 1){
if(t%block_trot == 0){
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,i,-1)]
= spin[glIdx(info,r,c,i,(t!=0)?t-1:info.trotters-1)];
}
if(t%block_trot == block_trot-1){
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,i,block_trot)]
= spin[glIdx(info,r,c,i,(t!=info.trotters-1)?t+1:0)];
}
}
__syncthreads();
//do metropolis
if(((r+c+t)%2 == sw && i <= 3) || ((r+c+t)%2 != sw && 4 <= i)){
FloatType temp_dE =
-2*s*spincache[spin_index]*beta/(FloatType)(info.trotters)*(
//outside chimera unit
J_out_p_cache[block_index]
//0 to 3 -> go up 4 to 7 -> go left
*spincache[bkIdx_ext<block_row,block_col,block_trot>(info,(i<=3)?b_r-1:b_r, (4<=i)?b_c-1:b_c,i,b_t)]+
J_out_n_cache[block_index]
//0 to 3 -> go down 4 to 7 -> go right
*spincache[bkIdx_ext<block_row,block_col,block_trot>(info,(i<=3)?b_r+1:b_r, (4<=i)?b_c+1:b_c,i,b_t)]+
//inside chimera unit
J_in_04_cache[block_index]
*spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,(i<=3)?4:0,b_t)]+
J_in_15_cache[block_index]
*spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,(i<=3)?5:1,b_t)]+
J_in_26_cache[block_index]
*spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,(i<=3)?6:2,b_t)]+
J_in_37_cache[block_index]
*spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,(i<=3)?7:3,b_t)]+
//local magnetization
h_cache[block_index]);
//trotter slices
if(info.trotters > 1){
temp_dE +=
-2*spincache[spin_index]*J_trot*(
//trotter slices
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,i,b_t+1)]+
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,i,b_t-1)]
);
}
//update
spin[global_index] = ((exp(-temp_dE) > randcache[block_index])?-1:1)*spincache[spin_index];
}
__syncthreads();
// reduction for calculating dE
//uint32_t count = block_row * block_col * block_trot * unitsize; // <= 1024
//// thread index
//uint32_t ti = threadIdx.z*(blockDim.y*blockDim.x)+threadIdx.y*(blockDim.x)+threadIdx.x;
//count = count/2; //1024 -> 512
//if(ti < count){
// dE_cache[ti] += dE_cache[ti+count];
//}
//__syncthreads();
//count = count/2; //512 -> 256
//if(ti < count){
// dE_cache[ti] += dE_cache[ti+count];
//}
//__syncthreads();
//count = count/2; //256 -> 128
//if(ti < count){
// dE_cache[ti] += dE_cache[ti+count];
//}
//__syncthreads();
//count = count/2; //128 -> 64
//if(ti < count){
// dE_cache[ti] += dE_cache[ti+count];
//}
//__syncthreads();
//count = count/2; //64 -> 32
//if(ti < count){
// dE_cache[ti] += dE_cache[ti+count];
//}
//__syncthreads();
//count = count/2; //32 -> 16
//if(ti < count){
// dE_cache[ti] += dE_cache[ti+count];
//}
//__syncthreads();
//count = count/2; //16 -> 8
//if(ti < count){
// dE_cache[ti] += dE_cache[ti+count];
//}
//__syncthreads();
//count = count/2; //8 -> 4
//if(ti < count){
// dE_cache[ti] += dE_cache[ti+count];
//}
//__syncthreads();
//count = count/2; //4 -> 2
//if(ti < count){
// dE_cache[ti] += dE_cache[ti+count];
//}
//__syncthreads();
//count = count/2; //2 -> 1
//if(ti < count){
// dE_cache[ti] += dE_cache[ti+count];
//}
//__syncthreads();
//if(ti == 0){
// //add 'em
// atomicAdd(&dE[0], dE_cache[ti]);
//}
}
template<
typename FloatType,
std::size_t block_row,
std::size_t block_col,
std::size_t block_trot>
void metropolis_interface(
int32_t sw,
int32_t* spin, const FloatType* rand,
const FloatType* J_out_p,
const FloatType* J_out_n,
const FloatType* J_in_04,
const FloatType* J_in_15,
const FloatType* J_in_26,
const FloatType* J_in_37,
const FloatType* h,
const ChimeraInfo& info, const dim3& grid, const dim3& block,
double beta, FloatType gamma, double s){
metropolis<FloatType, block_row, block_col, block_trot, info.chimera_unitsize><<<grid, block>>>(
sw,
spin, rand,
J_out_p,
J_out_n,
J_in_04,
J_in_15,
J_in_26,
J_in_37,
h,
info,
beta, gamma, s
);
}
//template instantiation
#define FLOAT_ARGTYPE int32_t,int32_t*,const float*,const float*,const float*,const float*,const float*,const float*,const float*,const float*,const ChimeraInfo&,const dim3&,const dim3&,double,float,double
#define DOUBLE_ARGTYPE int32_t,int32_t*,const double*,const double*,const double*,const double*,const double*,const double*,const double*,const double*,const ChimeraInfo&,const dim3&,const dim3&,double,double,double
template void metropolis_interface<float,1,1,1>(FLOAT_ARGTYPE);
template void metropolis_interface<float,2,2,2>(FLOAT_ARGTYPE);
template void metropolis_interface<float,3,3,3>(FLOAT_ARGTYPE);
template void metropolis_interface<float,4,4,4>(FLOAT_ARGTYPE);
template void metropolis_interface<float,2,2,1>(FLOAT_ARGTYPE);
template void metropolis_interface<float,3,3,1>(FLOAT_ARGTYPE);
template void metropolis_interface<float,4,4,1>(FLOAT_ARGTYPE);
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 600
template void metropolis_interface<double,1,1,1>(DOUBLE_ARGTYPE);
template void metropolis_interface<double,2,2,2>(DOUBLE_ARGTYPE);
template void metropolis_interface<double,3,3,3>(DOUBLE_ARGTYPE);
template void metropolis_interface<double,4,4,4>(DOUBLE_ARGTYPE);
template void metropolis_interface<double,2,2,1>(DOUBLE_ARGTYPE);
template void metropolis_interface<double,3,3,1>(DOUBLE_ARGTYPE);
template void metropolis_interface<double,4,4,1>(DOUBLE_ARGTYPE);
#endif
} // namespace chimera_cuda
} // namespace system
} // namespace openjij
#endif
|
5622fd699a16856b2829e987892cd5a1fc7e78cf.hip | // !!! This is a file automatically generated by hipify!!!
//
// auto-generated by ops.py//
//header
#define OPS_ACC_MD_MACROS
#define OPS_3D
#include "ops_lib_cpp.h"
#include "ops_cuda_rt_support.h"
#include "ops_cuda_reduction.h"
#include <hip/hip_complex.h>
#define OPS_FUN_PREFIX __device__ __host__
#include "user_types.h"
#ifdef OPS_MPI
#include "ops_mpi_core.h"
#endif
// global constants
__constant__ double g_small;
__constant__ double g_big;
__constant__ double dtc_safe;
__constant__ double dtu_safe;
__constant__ double dtv_safe;
__constant__ double dtw_safe;
__constant__ double dtdiv_safe;
__constant__ field_type field;
__constant__ grid_type grid;
__constant__ state_type *states;
__constant__ int number_of_states;
__constant__ int g_sphe;
__constant__ int g_point;
__constant__ int g_cube;
__constant__ double dt;
void ops_init_backend() {}
void ops_decl_const_char(int dim, char const *type,
int size, char *dat, char const *name){
if (!strcmp(name,"g_small")) {
cutilSafeCall(hipMemcpyToSymbol(g_small, dat, dim*size));
}
else
if (!strcmp(name,"g_big")) {
cutilSafeCall(hipMemcpyToSymbol(g_big, dat, dim*size));
}
else
if (!strcmp(name,"dtc_safe")) {
cutilSafeCall(hipMemcpyToSymbol(dtc_safe, dat, dim*size));
}
else
if (!strcmp(name,"dtu_safe")) {
cutilSafeCall(hipMemcpyToSymbol(dtu_safe, dat, dim*size));
}
else
if (!strcmp(name,"dtv_safe")) {
cutilSafeCall(hipMemcpyToSymbol(dtv_safe, dat, dim*size));
}
else
if (!strcmp(name,"dtw_safe")) {
cutilSafeCall(hipMemcpyToSymbol(dtw_safe, dat, dim*size));
}
else
if (!strcmp(name,"dtdiv_safe")) {
cutilSafeCall(hipMemcpyToSymbol(dtdiv_safe, dat, dim*size));
}
else
if (!strcmp(name,"field")) {
cutilSafeCall(hipMemcpyToSymbol(field, dat, dim*size));
}
else
if (!strcmp(name,"grid")) {
cutilSafeCall(hipMemcpyToSymbol(grid, dat, dim*size));
}
else
if (!strcmp(name,"states")) {
char *temp; cutilSafeCall(hipMalloc((void**)&temp,dim*size));
cutilSafeCall(hipMemcpy(temp,dat,dim*size,hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpyToSymbol(states, &temp, sizeof(char *)));
}
else
if (!strcmp(name,"number_of_states")) {
cutilSafeCall(hipMemcpyToSymbol(number_of_states, dat, dim*size));
}
else
if (!strcmp(name,"g_sphe")) {
cutilSafeCall(hipMemcpyToSymbol(g_sphe, dat, dim*size));
}
else
if (!strcmp(name,"g_point")) {
cutilSafeCall(hipMemcpyToSymbol(g_point, dat, dim*size));
}
else
if (!strcmp(name,"g_cube")) {
cutilSafeCall(hipMemcpyToSymbol(g_cube, dat, dim*size));
}
else
if (!strcmp(name,"dt")) {
cutilSafeCall(hipMemcpyToSymbol(dt, dat, dim*size));
}
else
{
printf("error: unknown const name\n"); exit(1);
}
}
//user kernel files
#include "initialise_chunk_kernel_xx_cuda_kernel.cu"
#include "initialise_chunk_kernel_yy_cuda_kernel.cu"
#include "initialise_chunk_kernel_zz_cuda_kernel.cu"
#include "initialise_chunk_kernel_x_cuda_kernel.cu"
#include "initialise_chunk_kernel_y_cuda_kernel.cu"
#include "initialise_chunk_kernel_z_cuda_kernel.cu"
#include "initialise_chunk_kernel_cellx_cuda_kernel.cu"
#include "initialise_chunk_kernel_celly_cuda_kernel.cu"
#include "initialise_chunk_kernel_cellz_cuda_kernel.cu"
#include "initialise_chunk_kernel_volume_cuda_kernel.cu"
#include "generate_chunk_kernel_cuda_kernel.cu"
#include "ideal_gas_kernel_cuda_kernel.cu"
#include "update_halo_kernel1_b2_cuda_kernel.cu"
#include "update_halo_kernel1_b1_cuda_kernel.cu"
#include "update_halo_kernel1_t2_cuda_kernel.cu"
#include "update_halo_kernel1_t1_cuda_kernel.cu"
#include "update_halo_kernel1_l2_cuda_kernel.cu"
#include "update_halo_kernel1_l1_hip_kernel.hip"
#include "update_halo_kernel1_r2_cuda_kernel.cu"
#include "update_halo_kernel1_r1_cuda_kernel.cu"
#include "update_halo_kernel1_ba2_hip_kernel.hip"
#include "update_halo_kernel1_ba1_cuda_kernel.cu"
#include "update_halo_kernel1_fr2_hip_kernel.hip"
#include "update_halo_kernel1_fr1_cuda_kernel.cu"
#include "update_halo_kernel2_xvel_plus_4_bot_cuda_kernel.cu"
#include "update_halo_kernel2_xvel_plus_2_bot_cuda_kernel.cu"
#include "update_halo_kernel2_xvel_plus_4_top_cuda_kernel.cu"
#include "update_halo_kernel2_xvel_plus_2_top_cuda_kernel.cu"
#include "update_halo_kernel2_xvel_minus_4_left_cuda_kernel.cu"
#include "update_halo_kernel2_xvel_minus_2_left_hip_kernel.hip"
#include "update_halo_kernel2_xvel_minus_4_right_cuda_kernel.cu"
#include "update_halo_kernel2_xvel_minus_2_right_cuda_kernel.cu"
#include "update_halo_kernel2_xvel_plus_4_back_cuda_kernel.cu"
#include "update_halo_kernel2_xvel_plus_2_back_hip_kernel.hip"
#include "update_halo_kernel2_xvel_plus_4_front_cuda_kernel.cu"
#include "update_halo_kernel2_xvel_plus_2_front_cuda_kernel.cu"
#include "update_halo_kernel2_yvel_minus_4_bot_cuda_kernel.cu"
#include "update_halo_kernel2_yvel_minus_2_bot_cuda_kernel.cu"
#include "update_halo_kernel2_yvel_minus_4_top_cuda_kernel.cu"
#include "update_halo_kernel2_yvel_minus_2_top_cuda_kernel.cu"
#include "update_halo_kernel2_yvel_plus_4_left_cuda_kernel.cu"
#include "update_halo_kernel2_yvel_plus_2_left_cuda_kernel.cu"
#include "update_halo_kernel2_yvel_plus_4_right_cuda_kernel.cu"
#include "update_halo_kernel2_yvel_plus_2_right_cuda_kernel.cu"
#include "update_halo_kernel2_yvel_plus_4_back_hip_kernel.hip"
#include "update_halo_kernel2_yvel_plus_2_back_cuda_kernel.cu"
#include "update_halo_kernel2_yvel_plus_4_front_cuda_kernel.cu"
#include "update_halo_kernel2_yvel_plus_2_front_cuda_kernel.cu"
#include "update_halo_kernel2_zvel_plus_4_bot_hip_kernel.hip"
#include "update_halo_kernel2_zvel_plus_2_bot_cuda_kernel.cu"
#include "update_halo_kernel2_zvel_plus_4_top_cuda_kernel.cu"
#include "update_halo_kernel2_zvel_plus_2_top_cuda_kernel.cu"
#include "update_halo_kernel2_zvel_plus_4_left_hip_kernel.hip"
#include "update_halo_kernel2_zvel_plus_2_left_cuda_kernel.cu"
#include "update_halo_kernel2_zvel_plus_4_right_hip_kernel.hip"
#include "update_halo_kernel2_zvel_plus_2_right_cuda_kernel.cu"
#include "update_halo_kernel2_zvel_minus_4_back_cuda_kernel.cu"
#include "update_halo_kernel2_zvel_minus_2_back_cuda_kernel.cu"
#include "update_halo_kernel2_zvel_minus_4_front_cuda_kernel.cu"
#include "update_halo_kernel2_zvel_minus_2_front_cuda_kernel.cu"
#include "update_halo_kernel3_plus_4_a_cuda_kernel.cu"
#include "update_halo_kernel3_plus_2_a_cuda_kernel.cu"
#include "update_halo_kernel3_plus_4_b_cuda_kernel.cu"
#include "update_halo_kernel3_plus_2_b_cuda_kernel.cu"
#include "update_halo_kernel3_minus_4_a_cuda_kernel.cu"
#include "update_halo_kernel3_minus_2_a_cuda_kernel.cu"
#include "update_halo_kernel3_minus_4_b_cuda_kernel.cu"
#include "update_halo_kernel3_minus_2_b_cuda_kernel.cu"
#include "update_halo_kernel3_plus_4_back_cuda_kernel.cu"
#include "update_halo_kernel3_plus_2_back_cuda_kernel.cu"
#include "update_halo_kernel3_plus_4_front_cuda_kernel.cu"
#include "update_halo_kernel3_plus_2_front_cuda_kernel.cu"
#include "update_halo_kernel4_minus_4_a_cuda_kernel.cu"
#include "update_halo_kernel4_minus_2_a_cuda_kernel.cu"
#include "update_halo_kernel4_minus_4_b_cuda_kernel.cu"
#include "update_halo_kernel4_minus_2_b_cuda_kernel.cu"
#include "update_halo_kernel4_plus_4_a_cuda_kernel.cu"
#include "update_halo_kernel4_plus_2_a_cuda_kernel.cu"
#include "update_halo_kernel4_plus_4_b_cuda_kernel.cu"
#include "update_halo_kernel4_plus_2_b_cuda_kernel.cu"
#include "update_halo_kernel4_plus_4_back_cuda_kernel.cu"
#include "update_halo_kernel4_plus_2_back_cuda_kernel.cu"
#include "update_halo_kernel4_plus_4_front_cuda_kernel.cu"
#include "update_halo_kernel4_plus_2_front_cuda_kernel.cu"
#include "update_halo_kernel5_plus_4_a_cuda_kernel.cu"
#include "update_halo_kernel5_plus_2_a_cuda_kernel.cu"
#include "update_halo_kernel5_plus_4_b_hip_kernel.hip"
#include "update_halo_kernel5_plus_2_b_cuda_kernel.cu"
#include "update_halo_kernel5_plus_4_left_hip_kernel.hip"
#include "update_halo_kernel5_plus_2_left_hip_kernel.hip"
#include "update_halo_kernel5_plus_4_right_cuda_kernel.cu"
#include "update_halo_kernel5_plus_2_right_cuda_kernel.cu"
#include "update_halo_kernel5_minus_4_back_hip_kernel.hip"
#include "update_halo_kernel5_minus_2_back_cuda_kernel.cu"
#include "update_halo_kernel5_minus_4_front_cuda_kernel.cu"
#include "update_halo_kernel5_minus_2_front_cuda_kernel.cu"
#include "field_summary_kernel_cuda_kernel.cu"
#include "viscosity_kernel_cuda_kernel.cu"
#include "calc_dt_kernel_cuda_kernel.cu"
#include "calc_dt_kernel_min_cuda_kernel.cu"
#include "calc_dt_kernel_get_cuda_kernel.cu"
#include "calc_dt_kernel_print_cuda_kernel.cu"
#include "PdV_kernel_predict_cuda_kernel.cu"
#include "PdV_kernel_nopredict_cuda_kernel.cu"
#include "revert_kernel_cuda_kernel.cu"
#include "accelerate_kernel_cuda_kernel.cu"
#include "flux_calc_kernelx_cuda_kernel.cu"
#include "flux_calc_kernely_cuda_kernel.cu"
#include "flux_calc_kernelz_cuda_kernel.cu"
#include "advec_cell_kernel1_xdir_hip_kernel.hip"
#include "advec_cell_kernel2_xdir_hip_kernel.hip"
#include "advec_cell_kernel3_xdir_cuda_kernel.cu"
#include "advec_cell_kernel4_xdir_cuda_kernel.cu"
#include "advec_cell_kernel1_ydir_cuda_kernel.cu"
#include "advec_cell_kernel2_ydir_hip_kernel.hip"
#include "advec_cell_kernel3_ydir_cuda_kernel.cu"
#include "advec_cell_kernel4_ydir_cuda_kernel.cu"
#include "advec_cell_kernel1_zdir_cuda_kernel.cu"
#include "advec_cell_kernel2_zdir_cuda_kernel.cu"
#include "advec_cell_kernel3_zdir_cuda_kernel.cu"
#include "advec_cell_kernel4_zdir_cuda_kernel.cu"
#include "advec_mom_kernel_x1_cuda_kernel.cu"
#include "advec_mom_kernel_z1_cuda_kernel.cu"
#include "advec_mom_kernel_x2_cuda_kernel.cu"
#include "advec_mom_kernel_y2_cuda_kernel.cu"
#include "advec_mom_kernel_x3_cuda_kernel.cu"
#include "advec_mom_kernel_z3_hip_kernel.hip"
#include "advec_mom_kernel_mass_flux_x_cuda_kernel.cu"
#include "advec_mom_kernel_post_pre_advec_x_cuda_kernel.cu"
#include "advec_mom_kernel1_x_nonvector_cuda_kernel.cu"
#include "advec_mom_kernel2_x_hip_kernel.hip"
#include "advec_mom_kernel_mass_flux_y_cuda_kernel.cu"
#include "advec_mom_kernel_post_pre_advec_y_cuda_kernel.cu"
#include "advec_mom_kernel1_y_nonvector_cuda_kernel.cu"
#include "advec_mom_kernel2_y_cuda_kernel.cu"
#include "advec_mom_kernel_mass_flux_z_cuda_kernel.cu"
#include "advec_mom_kernel_post_pre_advec_z_cuda_kernel.cu"
#include "advec_mom_kernel1_z_nonvector_cuda_kernel.cu"
#include "advec_mom_kernel2_z_cuda_kernel.cu"
#include "reset_field_kernel1_cuda_kernel.cu"
#include "reset_field_kernel2_cuda_kernel.cu"
| 5622fd699a16856b2829e987892cd5a1fc7e78cf.cu | //
// auto-generated by ops.py//
//header
#define OPS_ACC_MD_MACROS
#define OPS_3D
#include "ops_lib_cpp.h"
#include "ops_cuda_rt_support.h"
#include "ops_cuda_reduction.h"
#include <cuComplex.h>
#define OPS_FUN_PREFIX __device__ __host__
#include "user_types.h"
#ifdef OPS_MPI
#include "ops_mpi_core.h"
#endif
// global constants
__constant__ double g_small;
__constant__ double g_big;
__constant__ double dtc_safe;
__constant__ double dtu_safe;
__constant__ double dtv_safe;
__constant__ double dtw_safe;
__constant__ double dtdiv_safe;
__constant__ field_type field;
__constant__ grid_type grid;
__constant__ state_type *states;
__constant__ int number_of_states;
__constant__ int g_sphe;
__constant__ int g_point;
__constant__ int g_cube;
__constant__ double dt;
void ops_init_backend() {}
void ops_decl_const_char(int dim, char const *type,
int size, char *dat, char const *name){
if (!strcmp(name,"g_small")) {
cutilSafeCall(cudaMemcpyToSymbol(g_small, dat, dim*size));
}
else
if (!strcmp(name,"g_big")) {
cutilSafeCall(cudaMemcpyToSymbol(g_big, dat, dim*size));
}
else
if (!strcmp(name,"dtc_safe")) {
cutilSafeCall(cudaMemcpyToSymbol(dtc_safe, dat, dim*size));
}
else
if (!strcmp(name,"dtu_safe")) {
cutilSafeCall(cudaMemcpyToSymbol(dtu_safe, dat, dim*size));
}
else
if (!strcmp(name,"dtv_safe")) {
cutilSafeCall(cudaMemcpyToSymbol(dtv_safe, dat, dim*size));
}
else
if (!strcmp(name,"dtw_safe")) {
cutilSafeCall(cudaMemcpyToSymbol(dtw_safe, dat, dim*size));
}
else
if (!strcmp(name,"dtdiv_safe")) {
cutilSafeCall(cudaMemcpyToSymbol(dtdiv_safe, dat, dim*size));
}
else
if (!strcmp(name,"field")) {
cutilSafeCall(cudaMemcpyToSymbol(field, dat, dim*size));
}
else
if (!strcmp(name,"grid")) {
cutilSafeCall(cudaMemcpyToSymbol(grid, dat, dim*size));
}
else
if (!strcmp(name,"states")) {
char *temp; cutilSafeCall(cudaMalloc((void**)&temp,dim*size));
cutilSafeCall(cudaMemcpy(temp,dat,dim*size,cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpyToSymbol(states, &temp, sizeof(char *)));
}
else
if (!strcmp(name,"number_of_states")) {
cutilSafeCall(cudaMemcpyToSymbol(number_of_states, dat, dim*size));
}
else
if (!strcmp(name,"g_sphe")) {
cutilSafeCall(cudaMemcpyToSymbol(g_sphe, dat, dim*size));
}
else
if (!strcmp(name,"g_point")) {
cutilSafeCall(cudaMemcpyToSymbol(g_point, dat, dim*size));
}
else
if (!strcmp(name,"g_cube")) {
cutilSafeCall(cudaMemcpyToSymbol(g_cube, dat, dim*size));
}
else
if (!strcmp(name,"dt")) {
cutilSafeCall(cudaMemcpyToSymbol(dt, dat, dim*size));
}
else
{
printf("error: unknown const name\n"); exit(1);
}
}
//user kernel files
#include "initialise_chunk_kernel_xx_cuda_kernel.cu"
#include "initialise_chunk_kernel_yy_cuda_kernel.cu"
#include "initialise_chunk_kernel_zz_cuda_kernel.cu"
#include "initialise_chunk_kernel_x_cuda_kernel.cu"
#include "initialise_chunk_kernel_y_cuda_kernel.cu"
#include "initialise_chunk_kernel_z_cuda_kernel.cu"
#include "initialise_chunk_kernel_cellx_cuda_kernel.cu"
#include "initialise_chunk_kernel_celly_cuda_kernel.cu"
#include "initialise_chunk_kernel_cellz_cuda_kernel.cu"
#include "initialise_chunk_kernel_volume_cuda_kernel.cu"
#include "generate_chunk_kernel_cuda_kernel.cu"
#include "ideal_gas_kernel_cuda_kernel.cu"
#include "update_halo_kernel1_b2_cuda_kernel.cu"
#include "update_halo_kernel1_b1_cuda_kernel.cu"
#include "update_halo_kernel1_t2_cuda_kernel.cu"
#include "update_halo_kernel1_t1_cuda_kernel.cu"
#include "update_halo_kernel1_l2_cuda_kernel.cu"
#include "update_halo_kernel1_l1_cuda_kernel.cu"
#include "update_halo_kernel1_r2_cuda_kernel.cu"
#include "update_halo_kernel1_r1_cuda_kernel.cu"
#include "update_halo_kernel1_ba2_cuda_kernel.cu"
#include "update_halo_kernel1_ba1_cuda_kernel.cu"
#include "update_halo_kernel1_fr2_cuda_kernel.cu"
#include "update_halo_kernel1_fr1_cuda_kernel.cu"
#include "update_halo_kernel2_xvel_plus_4_bot_cuda_kernel.cu"
#include "update_halo_kernel2_xvel_plus_2_bot_cuda_kernel.cu"
#include "update_halo_kernel2_xvel_plus_4_top_cuda_kernel.cu"
#include "update_halo_kernel2_xvel_plus_2_top_cuda_kernel.cu"
#include "update_halo_kernel2_xvel_minus_4_left_cuda_kernel.cu"
#include "update_halo_kernel2_xvel_minus_2_left_cuda_kernel.cu"
#include "update_halo_kernel2_xvel_minus_4_right_cuda_kernel.cu"
#include "update_halo_kernel2_xvel_minus_2_right_cuda_kernel.cu"
#include "update_halo_kernel2_xvel_plus_4_back_cuda_kernel.cu"
#include "update_halo_kernel2_xvel_plus_2_back_cuda_kernel.cu"
#include "update_halo_kernel2_xvel_plus_4_front_cuda_kernel.cu"
#include "update_halo_kernel2_xvel_plus_2_front_cuda_kernel.cu"
#include "update_halo_kernel2_yvel_minus_4_bot_cuda_kernel.cu"
#include "update_halo_kernel2_yvel_minus_2_bot_cuda_kernel.cu"
#include "update_halo_kernel2_yvel_minus_4_top_cuda_kernel.cu"
#include "update_halo_kernel2_yvel_minus_2_top_cuda_kernel.cu"
#include "update_halo_kernel2_yvel_plus_4_left_cuda_kernel.cu"
#include "update_halo_kernel2_yvel_plus_2_left_cuda_kernel.cu"
#include "update_halo_kernel2_yvel_plus_4_right_cuda_kernel.cu"
#include "update_halo_kernel2_yvel_plus_2_right_cuda_kernel.cu"
#include "update_halo_kernel2_yvel_plus_4_back_cuda_kernel.cu"
#include "update_halo_kernel2_yvel_plus_2_back_cuda_kernel.cu"
#include "update_halo_kernel2_yvel_plus_4_front_cuda_kernel.cu"
#include "update_halo_kernel2_yvel_plus_2_front_cuda_kernel.cu"
#include "update_halo_kernel2_zvel_plus_4_bot_cuda_kernel.cu"
#include "update_halo_kernel2_zvel_plus_2_bot_cuda_kernel.cu"
#include "update_halo_kernel2_zvel_plus_4_top_cuda_kernel.cu"
#include "update_halo_kernel2_zvel_plus_2_top_cuda_kernel.cu"
#include "update_halo_kernel2_zvel_plus_4_left_cuda_kernel.cu"
#include "update_halo_kernel2_zvel_plus_2_left_cuda_kernel.cu"
#include "update_halo_kernel2_zvel_plus_4_right_cuda_kernel.cu"
#include "update_halo_kernel2_zvel_plus_2_right_cuda_kernel.cu"
#include "update_halo_kernel2_zvel_minus_4_back_cuda_kernel.cu"
#include "update_halo_kernel2_zvel_minus_2_back_cuda_kernel.cu"
#include "update_halo_kernel2_zvel_minus_4_front_cuda_kernel.cu"
#include "update_halo_kernel2_zvel_minus_2_front_cuda_kernel.cu"
#include "update_halo_kernel3_plus_4_a_cuda_kernel.cu"
#include "update_halo_kernel3_plus_2_a_cuda_kernel.cu"
#include "update_halo_kernel3_plus_4_b_cuda_kernel.cu"
#include "update_halo_kernel3_plus_2_b_cuda_kernel.cu"
#include "update_halo_kernel3_minus_4_a_cuda_kernel.cu"
#include "update_halo_kernel3_minus_2_a_cuda_kernel.cu"
#include "update_halo_kernel3_minus_4_b_cuda_kernel.cu"
#include "update_halo_kernel3_minus_2_b_cuda_kernel.cu"
#include "update_halo_kernel3_plus_4_back_cuda_kernel.cu"
#include "update_halo_kernel3_plus_2_back_cuda_kernel.cu"
#include "update_halo_kernel3_plus_4_front_cuda_kernel.cu"
#include "update_halo_kernel3_plus_2_front_cuda_kernel.cu"
#include "update_halo_kernel4_minus_4_a_cuda_kernel.cu"
#include "update_halo_kernel4_minus_2_a_cuda_kernel.cu"
#include "update_halo_kernel4_minus_4_b_cuda_kernel.cu"
#include "update_halo_kernel4_minus_2_b_cuda_kernel.cu"
#include "update_halo_kernel4_plus_4_a_cuda_kernel.cu"
#include "update_halo_kernel4_plus_2_a_cuda_kernel.cu"
#include "update_halo_kernel4_plus_4_b_cuda_kernel.cu"
#include "update_halo_kernel4_plus_2_b_cuda_kernel.cu"
#include "update_halo_kernel4_plus_4_back_cuda_kernel.cu"
#include "update_halo_kernel4_plus_2_back_cuda_kernel.cu"
#include "update_halo_kernel4_plus_4_front_cuda_kernel.cu"
#include "update_halo_kernel4_plus_2_front_cuda_kernel.cu"
#include "update_halo_kernel5_plus_4_a_cuda_kernel.cu"
#include "update_halo_kernel5_plus_2_a_cuda_kernel.cu"
#include "update_halo_kernel5_plus_4_b_cuda_kernel.cu"
#include "update_halo_kernel5_plus_2_b_cuda_kernel.cu"
#include "update_halo_kernel5_plus_4_left_cuda_kernel.cu"
#include "update_halo_kernel5_plus_2_left_cuda_kernel.cu"
#include "update_halo_kernel5_plus_4_right_cuda_kernel.cu"
#include "update_halo_kernel5_plus_2_right_cuda_kernel.cu"
#include "update_halo_kernel5_minus_4_back_cuda_kernel.cu"
#include "update_halo_kernel5_minus_2_back_cuda_kernel.cu"
#include "update_halo_kernel5_minus_4_front_cuda_kernel.cu"
#include "update_halo_kernel5_minus_2_front_cuda_kernel.cu"
#include "field_summary_kernel_cuda_kernel.cu"
#include "viscosity_kernel_cuda_kernel.cu"
#include "calc_dt_kernel_cuda_kernel.cu"
#include "calc_dt_kernel_min_cuda_kernel.cu"
#include "calc_dt_kernel_get_cuda_kernel.cu"
#include "calc_dt_kernel_print_cuda_kernel.cu"
#include "PdV_kernel_predict_cuda_kernel.cu"
#include "PdV_kernel_nopredict_cuda_kernel.cu"
#include "revert_kernel_cuda_kernel.cu"
#include "accelerate_kernel_cuda_kernel.cu"
#include "flux_calc_kernelx_cuda_kernel.cu"
#include "flux_calc_kernely_cuda_kernel.cu"
#include "flux_calc_kernelz_cuda_kernel.cu"
#include "advec_cell_kernel1_xdir_cuda_kernel.cu"
#include "advec_cell_kernel2_xdir_cuda_kernel.cu"
#include "advec_cell_kernel3_xdir_cuda_kernel.cu"
#include "advec_cell_kernel4_xdir_cuda_kernel.cu"
#include "advec_cell_kernel1_ydir_cuda_kernel.cu"
#include "advec_cell_kernel2_ydir_cuda_kernel.cu"
#include "advec_cell_kernel3_ydir_cuda_kernel.cu"
#include "advec_cell_kernel4_ydir_cuda_kernel.cu"
#include "advec_cell_kernel1_zdir_cuda_kernel.cu"
#include "advec_cell_kernel2_zdir_cuda_kernel.cu"
#include "advec_cell_kernel3_zdir_cuda_kernel.cu"
#include "advec_cell_kernel4_zdir_cuda_kernel.cu"
#include "advec_mom_kernel_x1_cuda_kernel.cu"
#include "advec_mom_kernel_z1_cuda_kernel.cu"
#include "advec_mom_kernel_x2_cuda_kernel.cu"
#include "advec_mom_kernel_y2_cuda_kernel.cu"
#include "advec_mom_kernel_x3_cuda_kernel.cu"
#include "advec_mom_kernel_z3_cuda_kernel.cu"
#include "advec_mom_kernel_mass_flux_x_cuda_kernel.cu"
#include "advec_mom_kernel_post_pre_advec_x_cuda_kernel.cu"
#include "advec_mom_kernel1_x_nonvector_cuda_kernel.cu"
#include "advec_mom_kernel2_x_cuda_kernel.cu"
#include "advec_mom_kernel_mass_flux_y_cuda_kernel.cu"
#include "advec_mom_kernel_post_pre_advec_y_cuda_kernel.cu"
#include "advec_mom_kernel1_y_nonvector_cuda_kernel.cu"
#include "advec_mom_kernel2_y_cuda_kernel.cu"
#include "advec_mom_kernel_mass_flux_z_cuda_kernel.cu"
#include "advec_mom_kernel_post_pre_advec_z_cuda_kernel.cu"
#include "advec_mom_kernel1_z_nonvector_cuda_kernel.cu"
#include "advec_mom_kernel2_z_cuda_kernel.cu"
#include "reset_field_kernel1_cuda_kernel.cu"
#include "reset_field_kernel2_cuda_kernel.cu"
|
1bd6213ae304e9e76fd7cf59a3b268d23ce88930.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <fft_cuda.cuh>
__global__ void fft_cuda4_kernel(complex_t *ip, complex_t *op, int m, int size)
{
__shared__ complex_t shared_op[2048];
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (4 * tid < size) {
complex_t eps = (complex_t){0.0, -1.0};
complex_t ip0 = ip[reverse(4*tid+0, m)];
complex_t ip1 = ip[reverse(4*tid+1, m)];
complex_t ip2 = ip[reverse(4*tid+2, m)];
complex_t ip3 = ip[reverse(4*tid+3, m)];
complex_t t0 = cuda_complex_add(ip0, ip1);
complex_t t1 = cuda_complex_add(ip2, ip3);
complex_t s0 = cuda_complex_sub(ip0, ip1);
complex_t s1 = cuda_complex_mult(cuda_complex_sub(ip2, ip3), eps);
shared_op[4*tid+0] = cuda_complex_add(t0, t1);
shared_op[4*tid+2] = cuda_complex_sub(t0, t1);
shared_op[4*tid+1] = cuda_complex_add(s0, s1);
shared_op[4*tid+3] = cuda_complex_sub(s0, s1);
}
__syncthreads();
for (int i = 2; i < m; i++) {
int bfly_size = (1 << (i + 1));
int bfly_half = (1 << i);
int k = tid & ((1 << i ) - 1);
int bfly_idx = tid >> i;
if (tid >= size/2) {
continue;
}
double angle = (-2.0 * PI * k / bfly_size);
complex_t omega = {cos(angle), sin(angle)};
int off = bfly_idx * bfly_size;
complex_t temp = cuda_complex_mult(omega, shared_op[off + k + bfly_half]);
shared_op[off + k + bfly_half] = cuda_complex_sub(shared_op[off + k], temp);
shared_op[off + k] = cuda_complex_add(shared_op[off + k], temp);
__syncthreads();
}
if (2 * tid < size) {
op[2*tid+0] = shared_op[2*tid+0];
op[2*tid+1] = shared_op[2*tid+1];
}
}
void fft_cuda4(complex_t *_ip, complex_t *_op, int size)
{
int m = (int)log2((double)size);
complex_t *ip = (complex_t *)_ip;
complex_t *op = (complex_t *)_op;
gpuErrchk(hipMemcpy(dev_ip, ip, size*sizeof(complex_t), hipMemcpyHostToDevice));
/* Can only work until size 2048 */
int threads = (128 < size) ? 128 : size;
dim3 block(threads, 1, 1);
dim3 grid(size/threads, 1, 1);
hipLaunchKernelGGL(( fft_cuda4_kernel), dim3(grid), dim3(block), 0, 0, dev_ip, dev_op, m, size);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipMemcpy(op, dev_op, size*sizeof(complex_t), hipMemcpyDeviceToHost));
}
| 1bd6213ae304e9e76fd7cf59a3b268d23ce88930.cu | #include <fft_cuda.cuh>
__global__ void fft_cuda4_kernel(complex_t *ip, complex_t *op, int m, int size)
{
__shared__ complex_t shared_op[2048];
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (4 * tid < size) {
complex_t eps = (complex_t){0.0, -1.0};
complex_t ip0 = ip[reverse(4*tid+0, m)];
complex_t ip1 = ip[reverse(4*tid+1, m)];
complex_t ip2 = ip[reverse(4*tid+2, m)];
complex_t ip3 = ip[reverse(4*tid+3, m)];
complex_t t0 = cuda_complex_add(ip0, ip1);
complex_t t1 = cuda_complex_add(ip2, ip3);
complex_t s0 = cuda_complex_sub(ip0, ip1);
complex_t s1 = cuda_complex_mult(cuda_complex_sub(ip2, ip3), eps);
shared_op[4*tid+0] = cuda_complex_add(t0, t1);
shared_op[4*tid+2] = cuda_complex_sub(t0, t1);
shared_op[4*tid+1] = cuda_complex_add(s0, s1);
shared_op[4*tid+3] = cuda_complex_sub(s0, s1);
}
__syncthreads();
for (int i = 2; i < m; i++) {
int bfly_size = (1 << (i + 1));
int bfly_half = (1 << i);
int k = tid & ((1 << i ) - 1);
int bfly_idx = tid >> i;
if (tid >= size/2) {
continue;
}
double angle = (-2.0 * PI * k / bfly_size);
complex_t omega = {cos(angle), sin(angle)};
int off = bfly_idx * bfly_size;
complex_t temp = cuda_complex_mult(omega, shared_op[off + k + bfly_half]);
shared_op[off + k + bfly_half] = cuda_complex_sub(shared_op[off + k], temp);
shared_op[off + k] = cuda_complex_add(shared_op[off + k], temp);
__syncthreads();
}
if (2 * tid < size) {
op[2*tid+0] = shared_op[2*tid+0];
op[2*tid+1] = shared_op[2*tid+1];
}
}
void fft_cuda4(complex_t *_ip, complex_t *_op, int size)
{
int m = (int)log2((double)size);
complex_t *ip = (complex_t *)_ip;
complex_t *op = (complex_t *)_op;
gpuErrchk(cudaMemcpy(dev_ip, ip, size*sizeof(complex_t), cudaMemcpyHostToDevice));
/* Can only work until size 2048 */
int threads = (128 < size) ? 128 : size;
dim3 block(threads, 1, 1);
dim3 grid(size/threads, 1, 1);
fft_cuda4_kernel<<<grid, block>>> (dev_ip, dev_op, m, size);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaMemcpy(op, dev_op, size*sizeof(complex_t), cudaMemcpyDeviceToHost));
}
|
60bc23985bd32d73160d7063648812562a5ada7b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CUDATest.hpp"
#include <ATen/hip/Exceptions.h>
namespace c10d {
namespace test {
namespace {
__global__ void waitClocks(const uint64_t count) {
// Few AMD specific GPUs have different clock intrinsic
#if defined(__GFX11__) && defined(USE_ROCM) && !defined(__CUDA_ARCH__)
clock_t start = wall_clock64();
#else
clock_t start = clock64();
#endif
clock_t offset = 0;
while (offset < count) {
offset = clock() - start;
}
}
} // namespace
void cudaSleep(at::hip::HIPStreamMasqueradingAsCUDA& stream, uint64_t clocks) {
hipLaunchKernelGGL(( waitClocks), dim3(1), dim3(1), 0, stream.stream(), clocks);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
int cudaNumDevices() {
int n = 0;
C10_HIP_CHECK_WARN(hipGetDeviceCount(&n));
return n;
}
} // namespace test
} // namespace c10d
| 60bc23985bd32d73160d7063648812562a5ada7b.cu | #include "CUDATest.hpp"
#include <ATen/cuda/Exceptions.h>
namespace c10d {
namespace test {
namespace {
__global__ void waitClocks(const uint64_t count) {
// Few AMD specific GPUs have different clock intrinsic
#if defined(__GFX11__) && defined(USE_ROCM) && !defined(__CUDA_ARCH__)
clock_t start = wall_clock64();
#else
clock_t start = clock64();
#endif
clock_t offset = 0;
while (offset < count) {
offset = clock() - start;
}
}
} // namespace
void cudaSleep(at::cuda::CUDAStream& stream, uint64_t clocks) {
waitClocks<<<1, 1, 0, stream.stream()>>>(clocks);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
int cudaNumDevices() {
int n = 0;
C10_CUDA_CHECK_WARN(cudaGetDeviceCount(&n));
return n;
}
} // namespace test
} // namespace c10d
|
dcfdaa6a1c0cd4b19821920233ed4b0f53a32d10.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define REPEAT 1
__global__ void arrayFunc(float* d_idata, float* d_jdata, float* d_odata, int size)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < size) {
for(int i=0; i < REPEAT; i++)
d_odata[tid] = d_idata[tid] * __expf(d_jdata[tid]);
}
}
void initArrayData(float * array, float alpha, int size);
void arrayFuncCPU(const float* h_idata, const float* h_jdata, float* h_odata, int size);
#define NSIZE 1048576
int
main (void) {
float *h_a, *h_b, *h_c;
float *d_a, *d_b, *d_c;
int nsize = NSIZE;
int nThreads = 256;
int nBlocks;
hipEvent_t start, end;
float eventEtime;
// calculate block number
nBlocks = (nsize-1) / nThreads + 1;
printf("Number of elements: %d\n", nsize);
printf("GPU execution with %d blocks each one of %d threads\n", nBlocks, nThreads);
// allocation and initialization of host buffers
h_a = (float*) malloc (nsize * sizeof(float));
h_b = (float*) malloc (nsize * sizeof(float));
h_c = (float*) malloc (nsize * sizeof(float));
initArrayData(h_a, 1.0f, nsize);
initArrayData(h_b, 10.0f, nsize);
//-- insert CUDE code ----------------
// allocation of device buffers
//------------------------------------
// creation of cuda events: start, end
hipEventCreate(&start);
hipEventCreate(&end);
printf ("\nGPU computation ... ");
hipEventRecord(start,0);
//-- insert CUDA code ----------------
// host to device buffer copies
//------------------------------------
//-- insert CUDA code ----------------
// arrayFunc kernel launch
//------------------------------------
//-- insert CUDA code ----------------
// copy back of results from device
//------------------------------------
hipEventRecord(end,0);
hipEventSynchronize(end);
hipEventElapsedTime(&eventEtime, start, end);
printf ("ok\n");
printf("Elapsed time on GPU: %.2f ms\n", eventEtime);
// host computation
printf("\nCPU computation ... ");
float *cpuResult;
float eventTimeCPU;
hipHostMalloc((void**)&cpuResult, nsize * sizeof(float));
hipEventRecord(start,0);
arrayFuncCPU(h_a, h_b, cpuResult, nsize);
hipEventRecord(end,0);
hipEventSynchronize(end);
hipEventElapsedTime(&eventTimeCPU, start, end);
printf ("ok\n");
printf("Elapsed time on CPU: %.2f ms\n", eventTimeCPU);
printf("\nSpeed UP CPU/GPU %.1fx\n", eventTimeCPU/eventEtime);
printf("\nCheck results:\n");
printf ("h_c[0] = %f\n", h_c[0]);
printf ("cpuResult[0] = %f\n", cpuResult[0]);
// free resources on device
hipEventDestroy(start);
hipEventDestroy(end);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
// free resources on host
free(h_a);
free(h_b);
free(h_c);
return 0;
}
void
initArrayData(float * array, float alpha, int size)
{
int i;
for (i=0; i< size; i++)
array[i] = alpha * (float) rand() / (float) RAND_MAX;
}
void arrayFuncCPU(const float* h_idata, const float* h_jdata, float* h_odata, int size)
{
int i, j;
for (i=0; i<size; i++)
for (j=0; j<REPEAT; j++)
h_odata[i] = h_idata[i] * expf(h_jdata[i]);
}
| dcfdaa6a1c0cd4b19821920233ed4b0f53a32d10.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define REPEAT 1
__global__ void arrayFunc(float* d_idata, float* d_jdata, float* d_odata, int size)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < size) {
for(int i=0; i < REPEAT; i++)
d_odata[tid] = d_idata[tid] * __expf(d_jdata[tid]);
}
}
void initArrayData(float * array, float alpha, int size);
void arrayFuncCPU(const float* h_idata, const float* h_jdata, float* h_odata, int size);
#define NSIZE 1048576
int
main (void) {
float *h_a, *h_b, *h_c;
float *d_a, *d_b, *d_c;
int nsize = NSIZE;
int nThreads = 256;
int nBlocks;
cudaEvent_t start, end;
float eventEtime;
// calculate block number
nBlocks = (nsize-1) / nThreads + 1;
printf("Number of elements: %d\n", nsize);
printf("GPU execution with %d blocks each one of %d threads\n", nBlocks, nThreads);
// allocation and initialization of host buffers
h_a = (float*) malloc (nsize * sizeof(float));
h_b = (float*) malloc (nsize * sizeof(float));
h_c = (float*) malloc (nsize * sizeof(float));
initArrayData(h_a, 1.0f, nsize);
initArrayData(h_b, 10.0f, nsize);
//-- insert CUDE code ----------------
// allocation of device buffers
//------------------------------------
// creation of cuda events: start, end
cudaEventCreate(&start);
cudaEventCreate(&end);
printf ("\nGPU computation ... ");
cudaEventRecord(start,0);
//-- insert CUDA code ----------------
// host to device buffer copies
//------------------------------------
//-- insert CUDA code ----------------
// arrayFunc kernel launch
//------------------------------------
//-- insert CUDA code ----------------
// copy back of results from device
//------------------------------------
cudaEventRecord(end,0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&eventEtime, start, end);
printf ("ok\n");
printf("Elapsed time on GPU: %.2f ms\n", eventEtime);
// host computation
printf("\nCPU computation ... ");
float *cpuResult;
float eventTimeCPU;
cudaMallocHost((void**)&cpuResult, nsize * sizeof(float));
cudaEventRecord(start,0);
arrayFuncCPU(h_a, h_b, cpuResult, nsize);
cudaEventRecord(end,0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&eventTimeCPU, start, end);
printf ("ok\n");
printf("Elapsed time on CPU: %.2f ms\n", eventTimeCPU);
printf("\nSpeed UP CPU/GPU %.1fx\n", eventTimeCPU/eventEtime);
printf("\nCheck results:\n");
printf ("h_c[0] = %f\n", h_c[0]);
printf ("cpuResult[0] = %f\n", cpuResult[0]);
// free resources on device
cudaEventDestroy(start);
cudaEventDestroy(end);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// free resources on host
free(h_a);
free(h_b);
free(h_c);
return 0;
}
void
initArrayData(float * array, float alpha, int size)
{
int i;
for (i=0; i< size; i++)
array[i] = alpha * (float) rand() / (float) RAND_MAX;
}
void arrayFuncCPU(const float* h_idata, const float* h_jdata, float* h_odata, int size)
{
int i, j;
for (i=0; i<size; i++)
for (j=0; j<REPEAT; j++)
h_odata[i] = h_idata[i] * expf(h_jdata[i]);
}
|
fd0b074a0e1daf533f471d5e6566083f51cdc33d.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by ss on 16-12-14.
//
#include <sys/time.h>
#include <cfloat>
#include "multiSmoSolver.h"
#include "../svm-shared/constant.h"
#include "hip/hip_runtime.h"
#include "../svm-shared/smoGPUHelper.h"
#include "../svm-shared/HessianIO/deviceHessianOnFly.h"
#include "../SharedUtility/Timer.h"
#include "trainClassifier.h"
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
#include "../svm-shared/devUtility.h"
#include "../svm-shared/Cache/subHessianCalculator.h"
void MultiSmoSolver::solve() {
int nrClass = problem.getNumOfClasses();
if (model.vC.size() == 0) {//initialize C for all the binary classes
model.vC = vector<float_point>(nrClass * (nrClass - 1) / 2, param.C);
}
printf("q = %d, working set size = %d\n", q, workingSetSize);
//train nrClass*(nrClass-1)/2 binary models
int k = 0;
for (int i = 0; i < nrClass; ++i) {
for (int j = i + 1; j < nrClass; ++j) {
printf("training classifier with label %d and %d\n", i, j);
SvmProblem subProblem = problem.getSubProblem(i, j);
init4Training(subProblem);
CSRMatrix subProblemMat(subProblem.v_vSamples, subProblem.getNumOfFeatures());
subProblemMat.copy2Dev(devVal, devRowPtr, devColInd, devSelfDot);
nnz = subProblemMat.getNnz();
printf("#positive ins %d, #negative ins %d\n", subProblem.count[0], subProblem.count[1]);
int totalIter = 0;
TIMER_START(trainingTimer)
for (int l = 0;; ++l) {
if (l == 0) {
selectWorkingSetAndPreCompute(subProblem, workingSetSize / 2);
} else {
selectWorkingSetAndPreCompute(subProblem, q / 2);
}
TIMER_START(iterationTimer)
localSMO << < 1, workingSetSize, workingSetSize * sizeof(float) * 3 + 2 * sizeof(float) >> >
(devLabel, devYiGValue, devAlpha, devAlphaDiff, devWorkingSet, workingSetSize, param.C, devHessianMatrixCache, subProblem.getNumOfSamples());
TIMER_STOP(iterationTimer)
TIMER_START(updateGTimer)
updateF << < gridSize, BLOCK_SIZE >> >
(devYiGValue, devLabel, devWorkingSet, workingSetSize, devAlphaDiff, devHessianMatrixCache, subProblem.getNumOfSamples());
TIMER_STOP(updateGTimer)
float diff;
checkCudaErrors(hipMemcpyFromSymbol(&diff, devDiff, sizeof(float_point), 0, hipMemcpyDeviceToHost));
if (l % 10 == 0)
printf(".");
cout.flush();
if (diff < EPS) {
printf("\nup + low = %f\n", diff);
break;
}
}
TIMER_STOP(trainingTimer)
subProblemMat.freeDev(devVal, devRowPtr, devColInd, devSelfDot);
vector<int> svIndex;
vector<float_point> coef;
float_point rho;
extractModel(subProblem, svIndex, coef, rho);
model.addBinaryModel(subProblem, svIndex, coef, rho, i, j);
k++;
deinit4Training();
}
}
}
void MultiSmoSolver::init4Training(const SvmProblem &subProblem) {
unsigned int trainingSize = subProblem.getNumOfSamples();
checkCudaErrors(hipMalloc((void **) &devAlpha, sizeof(float_point) * trainingSize));
checkCudaErrors(hipMalloc((void **) &devYiGValue, sizeof(float_point) * trainingSize));
checkCudaErrors(hipMalloc((void **) &devLabel, sizeof(int) * trainingSize));
checkCudaErrors(hipMemset(devAlpha, 0, sizeof(float_point) * trainingSize));
vector<float_point> negatedLabel(trainingSize);
for (int i = 0; i < trainingSize; ++i) {
negatedLabel[i] = -subProblem.v_nLabels[i];
}
checkCudaErrors(hipMemcpy(devYiGValue, negatedLabel.data(), sizeof(float_point) * trainingSize,
hipMemcpyHostToDevice));
checkCudaErrors(
hipMemcpy(devLabel, subProblem.v_nLabels.data(), sizeof(int) * trainingSize, hipMemcpyHostToDevice));
InitSolver(trainingSize);//initialise base solver
checkCudaErrors(hipMalloc((void **) &devHessianMatrixCache, sizeof(float_point) * workingSetSize * trainingSize));
for (int j = 0; j < trainingSize; ++j) {
hessianDiag[j] = 1;//assume using RBF kernel
}
checkCudaErrors(
hipMemcpy(devHessianDiag, hessianDiag, sizeof(float_point) * trainingSize, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void **) &devFValue4Sort, sizeof(float_point) * trainingSize));
checkCudaErrors(hipMalloc((void **) &devIdx4Sort, sizeof(int) * trainingSize));
}
void MultiSmoSolver::deinit4Training() {
checkCudaErrors(hipFree(devAlpha));
checkCudaErrors(hipFree(devYiGValue));
checkCudaErrors(hipFree(devLabel));
DeInitSolver();
checkCudaErrors(hipFree(devHessianMatrixCache));
checkCudaErrors(hipFree(devFValue4Sort));
checkCudaErrors(hipFree(devIdx4Sort));
}
void MultiSmoSolver::extractModel(const SvmProblem &subProblem, vector<int> &svIndex, vector<float_point> &coef,
float_point &rho) const {
const unsigned int trainingSize = subProblem.getNumOfSamples();
vector<float_point> alpha(trainingSize);
const vector<int> &label = subProblem.v_nLabels;
checkCudaErrors(hipMemcpy(alpha.data(), devAlpha, sizeof(float_point) * trainingSize, hipMemcpyDeviceToHost));
for (int i = 0; i < trainingSize; ++i) {
if (alpha[i] != 0) {
coef.push_back(label[i] * alpha[i]);
svIndex.push_back(i);
}
}
checkCudaErrors(hipMemcpyFromSymbol(&rho, devRho, sizeof(float_point), 0, hipMemcpyDeviceToHost));
printf("# of SV %lu\nbias = %f\n", svIndex.size(), rho);
}
MultiSmoSolver::MultiSmoSolver(const SvmProblem &problem, SvmModel &model, const SVMParam ¶m) :
problem(problem), model(model), param(param) {
q = 256;
workingSetSize = 512;
//workingSetSize must be 2^n and less than 1024
assert(workingSetSize <= 1024);
for (int i = 0; i < problem.getNumOfClasses(); ++i) {
assert(workingSetSize <= problem.count[i]);
}
workingSet = vector<int>(workingSetSize);
checkCudaErrors(hipMalloc((void **) &devAlphaDiff, sizeof(float_point) * workingSetSize));
checkCudaErrors(hipMalloc((void **) &devWorkingSet, sizeof(int) * workingSetSize));
}
MultiSmoSolver::~MultiSmoSolver() {
checkCudaErrors(hipFree(devAlphaDiff));
checkCudaErrors(hipFree(devWorkingSet));
}
void MultiSmoSolver::selectWorkingSetAndPreCompute(const SvmProblem &subProblem, uint numOfSelectPairs) {
uint numOfSamples = subProblem.getNumOfSamples();
uint oldSize = workingSetSize / 2 - numOfSelectPairs;
TIMER_START(selectTimer)
thrust::device_ptr<float> valuePointer = thrust::device_pointer_cast(devFValue4Sort);
thrust::device_ptr<int> indexPointer = thrust::device_pointer_cast(devIdx4Sort);
vector<int> oldWorkingSet = workingSet;
//get q most violation pairs
getFUpValues << < gridSize, BLOCK_SIZE >> >
(devYiGValue, devAlpha, devLabel, numOfSamples, param.C, devFValue4Sort, devIdx4Sort);
thrust::sort_by_key(valuePointer, valuePointer + numOfSamples, indexPointer, thrust::greater<float>());
checkCudaErrors(hipMemcpy(workingSet.data() + oldSize * 2, devIdx4Sort, sizeof(int) * numOfSelectPairs,
hipMemcpyDeviceToHost));
getFLowValues << < gridSize, BLOCK_SIZE >> >
(devYiGValue, devAlpha, devLabel, numOfSamples, param.C, devFValue4Sort, devIdx4Sort);
thrust::sort_by_key(valuePointer, valuePointer + numOfSamples, indexPointer, thrust::greater<float>());
checkCudaErrors(
hipMemcpy(workingSet.data() + oldSize * 2 + numOfSelectPairs, devIdx4Sort, sizeof(int) * numOfSelectPairs,
hipMemcpyDeviceToHost));
//get pairs from last working set
for (int i = 0; i < oldSize * 2; ++i) {
workingSet[i] = oldWorkingSet[numOfSelectPairs * 2 + i];
}
checkCudaErrors(hipMemcpy(devWorkingSet, workingSet.data(), sizeof(int) * workingSetSize, hipMemcpyHostToDevice));
TIMER_STOP(selectTimer)
//move old kernel values to get space
checkCudaErrors(hipMemcpy(devHessianMatrixCache,
devHessianMatrixCache + numOfSamples * numOfSelectPairs * 2,
sizeof(float_point) * numOfSamples * oldSize * 2,
hipMemcpyDeviceToDevice));
vector<vector<KeyValue> > computeSamples;
for (int i = 0; i < numOfSelectPairs * 2; ++i) {
computeSamples.push_back(subProblem.v_vSamples[workingSet[oldSize * 2 + i]]);
}
TIMER_START(preComputeTimer)
//preCompute kernel values of new selected instances
hipsparseHandle_t handle;
hipsparseMatDescr_t descr;
CSRMatrix workingSetMat(computeSamples, subProblem.getNumOfFeatures());
float_point * devWSVal;
int *devWSColInd;
int *devWSRowPtr;
float_point * devWSSelfDot;
workingSetMat.copy2Dev(devWSVal, devWSRowPtr, devWSColInd, devWSSelfDot);
SubHessianCalculator::prepareCSRContext(handle, descr);
CSRMatrix::CSRmm2Dense(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_TRANSPOSE, numOfSelectPairs * 2,
numOfSamples, subProblem.getNumOfFeatures(), descr,
workingSetMat.getNnz(), devWSVal, devWSRowPtr, devWSColInd, descr, nnz, devVal, devRowPtr,
devColInd, devHessianMatrixCache + numOfSamples * oldSize * 2);
RBFKernel << < Ceil(numOfSelectPairs * 2 * numOfSamples, BLOCK_SIZE), BLOCK_SIZE >> > (devWSSelfDot, devSelfDot,
devHessianMatrixCache + numOfSamples * oldSize * 2, numOfSelectPairs * 2, numOfSamples, param.gamma);
SubHessianCalculator::releaseCSRContext(handle, descr);
workingSetMat.freeDev(devWSVal, devWSRowPtr, devWSColInd, devWSSelfDot);
TIMER_STOP(preComputeTimer)
}
| fd0b074a0e1daf533f471d5e6566083f51cdc33d.cu | //
// Created by ss on 16-12-14.
//
#include <sys/time.h>
#include <cfloat>
#include "multiSmoSolver.h"
#include "../svm-shared/constant.h"
#include "cuda_runtime.h"
#include "../svm-shared/smoGPUHelper.h"
#include "../svm-shared/HessianIO/deviceHessianOnFly.h"
#include "../SharedUtility/Timer.h"
#include "trainClassifier.h"
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
#include "../svm-shared/devUtility.h"
#include "../svm-shared/Cache/subHessianCalculator.h"
void MultiSmoSolver::solve() {
int nrClass = problem.getNumOfClasses();
if (model.vC.size() == 0) {//initialize C for all the binary classes
model.vC = vector<float_point>(nrClass * (nrClass - 1) / 2, param.C);
}
printf("q = %d, working set size = %d\n", q, workingSetSize);
//train nrClass*(nrClass-1)/2 binary models
int k = 0;
for (int i = 0; i < nrClass; ++i) {
for (int j = i + 1; j < nrClass; ++j) {
printf("training classifier with label %d and %d\n", i, j);
SvmProblem subProblem = problem.getSubProblem(i, j);
init4Training(subProblem);
CSRMatrix subProblemMat(subProblem.v_vSamples, subProblem.getNumOfFeatures());
subProblemMat.copy2Dev(devVal, devRowPtr, devColInd, devSelfDot);
nnz = subProblemMat.getNnz();
printf("#positive ins %d, #negative ins %d\n", subProblem.count[0], subProblem.count[1]);
int totalIter = 0;
TIMER_START(trainingTimer)
for (int l = 0;; ++l) {
if (l == 0) {
selectWorkingSetAndPreCompute(subProblem, workingSetSize / 2);
} else {
selectWorkingSetAndPreCompute(subProblem, q / 2);
}
TIMER_START(iterationTimer)
localSMO << < 1, workingSetSize, workingSetSize * sizeof(float) * 3 + 2 * sizeof(float) >> >
(devLabel, devYiGValue, devAlpha, devAlphaDiff, devWorkingSet, workingSetSize, param.C, devHessianMatrixCache, subProblem.getNumOfSamples());
TIMER_STOP(iterationTimer)
TIMER_START(updateGTimer)
updateF << < gridSize, BLOCK_SIZE >> >
(devYiGValue, devLabel, devWorkingSet, workingSetSize, devAlphaDiff, devHessianMatrixCache, subProblem.getNumOfSamples());
TIMER_STOP(updateGTimer)
float diff;
checkCudaErrors(cudaMemcpyFromSymbol(&diff, devDiff, sizeof(float_point), 0, cudaMemcpyDeviceToHost));
if (l % 10 == 0)
printf(".");
cout.flush();
if (diff < EPS) {
printf("\nup + low = %f\n", diff);
break;
}
}
TIMER_STOP(trainingTimer)
subProblemMat.freeDev(devVal, devRowPtr, devColInd, devSelfDot);
vector<int> svIndex;
vector<float_point> coef;
float_point rho;
extractModel(subProblem, svIndex, coef, rho);
model.addBinaryModel(subProblem, svIndex, coef, rho, i, j);
k++;
deinit4Training();
}
}
}
void MultiSmoSolver::init4Training(const SvmProblem &subProblem) {
unsigned int trainingSize = subProblem.getNumOfSamples();
checkCudaErrors(cudaMalloc((void **) &devAlpha, sizeof(float_point) * trainingSize));
checkCudaErrors(cudaMalloc((void **) &devYiGValue, sizeof(float_point) * trainingSize));
checkCudaErrors(cudaMalloc((void **) &devLabel, sizeof(int) * trainingSize));
checkCudaErrors(cudaMemset(devAlpha, 0, sizeof(float_point) * trainingSize));
vector<float_point> negatedLabel(trainingSize);
for (int i = 0; i < trainingSize; ++i) {
negatedLabel[i] = -subProblem.v_nLabels[i];
}
checkCudaErrors(cudaMemcpy(devYiGValue, negatedLabel.data(), sizeof(float_point) * trainingSize,
cudaMemcpyHostToDevice));
checkCudaErrors(
cudaMemcpy(devLabel, subProblem.v_nLabels.data(), sizeof(int) * trainingSize, cudaMemcpyHostToDevice));
InitSolver(trainingSize);//initialise base solver
checkCudaErrors(cudaMalloc((void **) &devHessianMatrixCache, sizeof(float_point) * workingSetSize * trainingSize));
for (int j = 0; j < trainingSize; ++j) {
hessianDiag[j] = 1;//assume using RBF kernel
}
checkCudaErrors(
cudaMemcpy(devHessianDiag, hessianDiag, sizeof(float_point) * trainingSize, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void **) &devFValue4Sort, sizeof(float_point) * trainingSize));
checkCudaErrors(cudaMalloc((void **) &devIdx4Sort, sizeof(int) * trainingSize));
}
void MultiSmoSolver::deinit4Training() {
checkCudaErrors(cudaFree(devAlpha));
checkCudaErrors(cudaFree(devYiGValue));
checkCudaErrors(cudaFree(devLabel));
DeInitSolver();
checkCudaErrors(cudaFree(devHessianMatrixCache));
checkCudaErrors(cudaFree(devFValue4Sort));
checkCudaErrors(cudaFree(devIdx4Sort));
}
void MultiSmoSolver::extractModel(const SvmProblem &subProblem, vector<int> &svIndex, vector<float_point> &coef,
float_point &rho) const {
const unsigned int trainingSize = subProblem.getNumOfSamples();
vector<float_point> alpha(trainingSize);
const vector<int> &label = subProblem.v_nLabels;
checkCudaErrors(cudaMemcpy(alpha.data(), devAlpha, sizeof(float_point) * trainingSize, cudaMemcpyDeviceToHost));
for (int i = 0; i < trainingSize; ++i) {
if (alpha[i] != 0) {
coef.push_back(label[i] * alpha[i]);
svIndex.push_back(i);
}
}
checkCudaErrors(cudaMemcpyFromSymbol(&rho, devRho, sizeof(float_point), 0, cudaMemcpyDeviceToHost));
printf("# of SV %lu\nbias = %f\n", svIndex.size(), rho);
}
MultiSmoSolver::MultiSmoSolver(const SvmProblem &problem, SvmModel &model, const SVMParam ¶m) :
problem(problem), model(model), param(param) {
q = 256;
workingSetSize = 512;
//workingSetSize must be 2^n and less than 1024
assert(workingSetSize <= 1024);
for (int i = 0; i < problem.getNumOfClasses(); ++i) {
assert(workingSetSize <= problem.count[i]);
}
workingSet = vector<int>(workingSetSize);
checkCudaErrors(cudaMalloc((void **) &devAlphaDiff, sizeof(float_point) * workingSetSize));
checkCudaErrors(cudaMalloc((void **) &devWorkingSet, sizeof(int) * workingSetSize));
}
MultiSmoSolver::~MultiSmoSolver() {
checkCudaErrors(cudaFree(devAlphaDiff));
checkCudaErrors(cudaFree(devWorkingSet));
}
void MultiSmoSolver::selectWorkingSetAndPreCompute(const SvmProblem &subProblem, uint numOfSelectPairs) {
uint numOfSamples = subProblem.getNumOfSamples();
uint oldSize = workingSetSize / 2 - numOfSelectPairs;
TIMER_START(selectTimer)
thrust::device_ptr<float> valuePointer = thrust::device_pointer_cast(devFValue4Sort);
thrust::device_ptr<int> indexPointer = thrust::device_pointer_cast(devIdx4Sort);
vector<int> oldWorkingSet = workingSet;
//get q most violation pairs
getFUpValues << < gridSize, BLOCK_SIZE >> >
(devYiGValue, devAlpha, devLabel, numOfSamples, param.C, devFValue4Sort, devIdx4Sort);
thrust::sort_by_key(valuePointer, valuePointer + numOfSamples, indexPointer, thrust::greater<float>());
checkCudaErrors(cudaMemcpy(workingSet.data() + oldSize * 2, devIdx4Sort, sizeof(int) * numOfSelectPairs,
cudaMemcpyDeviceToHost));
getFLowValues << < gridSize, BLOCK_SIZE >> >
(devYiGValue, devAlpha, devLabel, numOfSamples, param.C, devFValue4Sort, devIdx4Sort);
thrust::sort_by_key(valuePointer, valuePointer + numOfSamples, indexPointer, thrust::greater<float>());
checkCudaErrors(
cudaMemcpy(workingSet.data() + oldSize * 2 + numOfSelectPairs, devIdx4Sort, sizeof(int) * numOfSelectPairs,
cudaMemcpyDeviceToHost));
//get pairs from last working set
for (int i = 0; i < oldSize * 2; ++i) {
workingSet[i] = oldWorkingSet[numOfSelectPairs * 2 + i];
}
checkCudaErrors(cudaMemcpy(devWorkingSet, workingSet.data(), sizeof(int) * workingSetSize, cudaMemcpyHostToDevice));
TIMER_STOP(selectTimer)
//move old kernel values to get space
checkCudaErrors(cudaMemcpy(devHessianMatrixCache,
devHessianMatrixCache + numOfSamples * numOfSelectPairs * 2,
sizeof(float_point) * numOfSamples * oldSize * 2,
cudaMemcpyDeviceToDevice));
vector<vector<KeyValue> > computeSamples;
for (int i = 0; i < numOfSelectPairs * 2; ++i) {
computeSamples.push_back(subProblem.v_vSamples[workingSet[oldSize * 2 + i]]);
}
TIMER_START(preComputeTimer)
//preCompute kernel values of new selected instances
cusparseHandle_t handle;
cusparseMatDescr_t descr;
CSRMatrix workingSetMat(computeSamples, subProblem.getNumOfFeatures());
float_point * devWSVal;
int *devWSColInd;
int *devWSRowPtr;
float_point * devWSSelfDot;
workingSetMat.copy2Dev(devWSVal, devWSRowPtr, devWSColInd, devWSSelfDot);
SubHessianCalculator::prepareCSRContext(handle, descr);
CSRMatrix::CSRmm2Dense(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_TRANSPOSE, numOfSelectPairs * 2,
numOfSamples, subProblem.getNumOfFeatures(), descr,
workingSetMat.getNnz(), devWSVal, devWSRowPtr, devWSColInd, descr, nnz, devVal, devRowPtr,
devColInd, devHessianMatrixCache + numOfSamples * oldSize * 2);
RBFKernel << < Ceil(numOfSelectPairs * 2 * numOfSamples, BLOCK_SIZE), BLOCK_SIZE >> > (devWSSelfDot, devSelfDot,
devHessianMatrixCache + numOfSamples * oldSize * 2, numOfSelectPairs * 2, numOfSamples, param.gamma);
SubHessianCalculator::releaseCSRContext(handle, descr);
workingSetMat.freeDev(devWSVal, devWSRowPtr, devWSColInd, devWSSelfDot);
TIMER_STOP(preComputeTimer)
}
|
8671ce4e241ca02f921e2fc779373c3e20d29a69.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "naive.h"
#include <iostream> // testing
#include <cassert> // for assert()
#define SECTION_SIZE 1024
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
__global__ void convertFromInclusiveToExclusive(const int* inputArray,
int* outputArray, int inputSize)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
// convert inclusive scan into exclusive scan by shifting
// all elements to the right by one position and fill the frist
// element and out-of-bound elements with 0.
if (i < inputSize && i != 0)
{
outputArray[i] = inputArray[i - 1];
}
else {
outputArray[i] = 0;
}
}
__global__ void kernKoggeStoneScanAddUpSumArray(const int* S,
int* Y, int inputSize)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < inputSize && blockIdx.x > 0)
{
Y[i] += S[blockIdx.x - 1];
}
}
__global__ void kernKoggeStoneScan(int* X, int* Y, int* S, int inputSize)
{
__shared__ int XY[SECTION_SIZE];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < inputSize)
{
XY[threadIdx.x] = X[i];
}
else {
XY[threadIdx.x] = 0;
}
// performs iterative scan on XY
// note that it is stride < blockDim.x, not stride <= blockDim.x:
// if you have 16 elements, stride could only be 1,2,4,8
for (unsigned int stride = 1; stride < blockDim.x; stride *= 2)
{
// make sure that input is in place
__syncthreads();
bool written = false;
int temp = 0;
if (threadIdx.x >= stride)
{
temp = XY[threadIdx.x] + XY[threadIdx.x - stride];
written = true;
}
// make sure previous output has been consumed
__syncthreads();
if (written)
{
XY[threadIdx.x] = temp;
}
}
Y[i] = XY[threadIdx.x];
// the last thread in the block should write the output value of
// the last XY element in the block to the blockIdx.x position of
// SumArray
// make sure XY[sectionSize - 1] has the correct partial sum
__syncthreads();
if (threadIdx.x == blockDim.x - 1)
{
S[blockIdx.x] = XY[SECTION_SIZE - 1];
}
}
__global__ void kernKoggeStoneScan(int* X, int* Y, int inputSize)
{
__shared__ int XY[SECTION_SIZE];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < inputSize)
{
XY[threadIdx.x] = X[i];
}
else {
XY[threadIdx.x] = 0;
}
// performs iterative scan on XY
// note that it is stride < blockDim.x, not stride <= blockDim.x:
// if you have 16 elements, stride could only be 1,2,4,8
for (unsigned int stride = 1; stride < blockDim.x; stride *= 2)
{
// make sure that input is in place
__syncthreads();
bool written = false;
int temp = 0;
if (threadIdx.x >= stride)
{
temp = XY[threadIdx.x] + XY[threadIdx.x - stride];
written = true;
}
// make sure previous output has been consumed
__syncthreads();
if (written)
{
XY[threadIdx.x] = temp;
}
}
Y[i] = XY[threadIdx.x];
}
void scanRecursiveHelper(int n, int* odata, const int* idata)
{
int blockSize = (n + SECTION_SIZE - 1) / SECTION_SIZE;
int idataSizeBytes = n * sizeof(int);
int sumArraySizeBytes = n <= 1024 ? n * sizeof(int)
: (n / SECTION_SIZE) * sizeof(int);
dim3 dimGridKogge(blockSize, 1, 1);
dim3 dimBlockKogge(SECTION_SIZE, 1, 1);
if (blockSize == 1)
{
int* d_X;
int* d_Y;
hipMalloc((void**)&d_X, idataSizeBytes);
checkCUDAError("hipMalloc d_X failed!");
hipMalloc((void**)&d_Y, idataSizeBytes);
checkCUDAError("hipMalloc d_Y failed!");
hipMemcpy(d_X, idata, idataSizeBytes, hipMemcpyHostToDevice);
kernKoggeStoneScan << <dimGridKogge, dimBlockKogge >> > (
d_X, d_Y, n);
hipMemcpy(odata, d_Y, idataSizeBytes, hipMemcpyDeviceToHost);
checkCUDAError("memCpy back failed!");
#if 0
std::cout << '\n';
for (int i = 0; i < n; i++)
{
std::cout << odata[i] << '\n';
}
#endif
hipFree(d_X);
hipFree(d_Y);
checkCUDAError("hipFree failed!");
}
else {
int* d_X;
int* d_Y;
hipMalloc((void**)&d_X, idataSizeBytes);
checkCUDAError("hipMalloc d_X failed!");
hipMalloc((void**)&d_Y, idataSizeBytes);
checkCUDAError("hipMalloc d_Y failed!");
int* d_S;
int* d_SOut;
hipMalloc((void**)&d_S, sumArraySizeBytes);
checkCUDAError("hipMalloc d_S failed!");
hipMalloc((void**)&d_SOut, sumArraySizeBytes);
checkCUDAError("hipMalloc d_SOut failed!");
hipMemcpy(d_X, idata, idataSizeBytes, hipMemcpyHostToDevice);
checkCUDAError("memCpy back failed!");
kernKoggeStoneScan << <dimGridKogge, dimBlockKogge >> > (d_X, d_Y, d_S, n);
scanRecursiveHelper(n / SECTION_SIZE, d_SOut, d_S);
kernKoggeStoneScanAddUpSumArray << <dimGridKogge, dimBlockKogge >> > (
d_SOut, d_Y, n);
hipMemcpy(odata, d_Y, idataSizeBytes, hipMemcpyDeviceToHost);
checkCUDAError("memCpy back failed!");
hipFree(d_X);
hipFree(d_Y);
hipFree(d_S);
hipFree(d_SOut);
checkCUDAError("hipFree failed!");
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// n could be larger than SECTION_SIZE
int idataSizeBytes = n * sizeof(int);
int sumArraySizeBytes = (n / SECTION_SIZE) * sizeof(int);
dim3 dimGridKogge((n + SECTION_SIZE - 1) / SECTION_SIZE, 1, 1);
dim3 dimBlockKogge(SECTION_SIZE, 1, 1);
int* sumArrayOutput = new int[n / SECTION_SIZE];
int* d_X;
int* d_Y;
int* d_S;
int* d_SOut;
int* d_YExclusive;
hipMalloc((void**)&d_X, idataSizeBytes);
checkCUDAError("hipMalloc d_X failed!");
hipMalloc((void**)&d_Y, idataSizeBytes);
checkCUDAError("hipMalloc d_Y failed!");
hipMalloc((void**)&d_YExclusive, idataSizeBytes);
checkCUDAError("hipMalloc d_YExclusive failed!");
hipMalloc((void**)&d_S, sumArraySizeBytes);
checkCUDAError("hipMalloc d_S failed!");
hipMalloc((void**)&d_SOut, sumArraySizeBytes);
checkCUDAError("hipMalloc d_SOut failed!");
hipMemcpy(d_X, idata, idataSizeBytes, hipMemcpyHostToDevice);
timer().startGpuTimer();
hipLaunchKernelGGL(( kernKoggeStoneScan) , dim3(dimGridKogge), dim3(dimBlockKogge) , 0, 0, d_X, d_Y, d_S, n);
scanRecursiveHelper(n / SECTION_SIZE, d_SOut, d_S);
hipLaunchKernelGGL(( kernKoggeStoneScanAddUpSumArray) , dim3(dimGridKogge), dim3(dimBlockKogge) , 0, 0,
d_SOut, d_Y, n);
convertFromInclusiveToExclusive << <dimGridKogge, dimBlockKogge >> > (
d_Y, d_YExclusive, n);
timer().endGpuTimer();
hipMemcpy(odata, d_YExclusive, idataSizeBytes, hipMemcpyDeviceToHost);
checkCUDAError("memCpy back failed!");
hipFree(d_X);
hipFree(d_Y);
hipFree(d_S);
hipFree(d_SOut);
hipFree(d_YExclusive);
checkCUDAError("hipFree failed!");
}
}
}
#if 0
void unitTestConversion()
{
// for testing
int numObject = 8;
int size = numObject * sizeof(int);
int* toyExclusiveArray = new int[numObject];
int* toyInclusiveArray = new int[numObject] {3, 4, 11, 11, 15, 16, 22, 25};
int* dev_toyExclusiveArray;
int* dev_toyInclusiveArray;
hipMalloc((void**)&dev_toyExclusiveArray, size);
checkCUDAError("hipMalloc dev_toyExclusiveArray failed!");
hipMalloc((void**)&dev_toyInclusiveArray, size);
checkCUDAError("hipMalloc dev_toyInclusiveArray failed!");
hipMemcpy(dev_toyInclusiveArray, toyInclusiveArray, size,
hipMemcpyHostToDevice);
dim3 dimGridArray((numObject + blockSize - 1) / blockSize, 1, 1);
dim3 dimBlockArray(blockSize, 1, 1);
convertFromInclusiveToExclusive << <dimGridArray, dimBlockArray >> > (
dev_toyInclusiveArray, dev_toyExclusiveArray, numObject);
hipMemcpy(toyExclusiveArray, dev_toyExclusiveArray, size,
hipMemcpyDeviceToHost);
checkCUDAError("memCpy back failed!");
printf("\n");
for (int i = 0; i < numObject; i++)
{
std::cout << toyExclusiveArray[i] << '\n';
}
printf("\n");
}
#endif | 8671ce4e241ca02f921e2fc779373c3e20d29a69.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "naive.h"
#include <iostream> // testing
#include <cassert> // for assert()
#define SECTION_SIZE 1024
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
__global__ void convertFromInclusiveToExclusive(const int* inputArray,
int* outputArray, int inputSize)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
// convert inclusive scan into exclusive scan by shifting
// all elements to the right by one position and fill the frist
// element and out-of-bound elements with 0.
if (i < inputSize && i != 0)
{
outputArray[i] = inputArray[i - 1];
}
else {
outputArray[i] = 0;
}
}
__global__ void kernKoggeStoneScanAddUpSumArray(const int* S,
int* Y, int inputSize)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < inputSize && blockIdx.x > 0)
{
Y[i] += S[blockIdx.x - 1];
}
}
__global__ void kernKoggeStoneScan(int* X, int* Y, int* S, int inputSize)
{
__shared__ int XY[SECTION_SIZE];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < inputSize)
{
XY[threadIdx.x] = X[i];
}
else {
XY[threadIdx.x] = 0;
}
// performs iterative scan on XY
// note that it is stride < blockDim.x, not stride <= blockDim.x:
// if you have 16 elements, stride could only be 1,2,4,8
for (unsigned int stride = 1; stride < blockDim.x; stride *= 2)
{
// make sure that input is in place
__syncthreads();
bool written = false;
int temp = 0;
if (threadIdx.x >= stride)
{
temp = XY[threadIdx.x] + XY[threadIdx.x - stride];
written = true;
}
// make sure previous output has been consumed
__syncthreads();
if (written)
{
XY[threadIdx.x] = temp;
}
}
Y[i] = XY[threadIdx.x];
// the last thread in the block should write the output value of
// the last XY element in the block to the blockIdx.x position of
// SumArray
// make sure XY[sectionSize - 1] has the correct partial sum
__syncthreads();
if (threadIdx.x == blockDim.x - 1)
{
S[blockIdx.x] = XY[SECTION_SIZE - 1];
}
}
__global__ void kernKoggeStoneScan(int* X, int* Y, int inputSize)
{
__shared__ int XY[SECTION_SIZE];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < inputSize)
{
XY[threadIdx.x] = X[i];
}
else {
XY[threadIdx.x] = 0;
}
// performs iterative scan on XY
// note that it is stride < blockDim.x, not stride <= blockDim.x:
// if you have 16 elements, stride could only be 1,2,4,8
for (unsigned int stride = 1; stride < blockDim.x; stride *= 2)
{
// make sure that input is in place
__syncthreads();
bool written = false;
int temp = 0;
if (threadIdx.x >= stride)
{
temp = XY[threadIdx.x] + XY[threadIdx.x - stride];
written = true;
}
// make sure previous output has been consumed
__syncthreads();
if (written)
{
XY[threadIdx.x] = temp;
}
}
Y[i] = XY[threadIdx.x];
}
void scanRecursiveHelper(int n, int* odata, const int* idata)
{
int blockSize = (n + SECTION_SIZE - 1) / SECTION_SIZE;
int idataSizeBytes = n * sizeof(int);
int sumArraySizeBytes = n <= 1024 ? n * sizeof(int)
: (n / SECTION_SIZE) * sizeof(int);
dim3 dimGridKogge(blockSize, 1, 1);
dim3 dimBlockKogge(SECTION_SIZE, 1, 1);
if (blockSize == 1)
{
int* d_X;
int* d_Y;
cudaMalloc((void**)&d_X, idataSizeBytes);
checkCUDAError("cudaMalloc d_X failed!");
cudaMalloc((void**)&d_Y, idataSizeBytes);
checkCUDAError("cudaMalloc d_Y failed!");
cudaMemcpy(d_X, idata, idataSizeBytes, cudaMemcpyHostToDevice);
kernKoggeStoneScan << <dimGridKogge, dimBlockKogge >> > (
d_X, d_Y, n);
cudaMemcpy(odata, d_Y, idataSizeBytes, cudaMemcpyDeviceToHost);
checkCUDAError("memCpy back failed!");
#if 0
std::cout << '\n';
for (int i = 0; i < n; i++)
{
std::cout << odata[i] << '\n';
}
#endif
cudaFree(d_X);
cudaFree(d_Y);
checkCUDAError("cudaFree failed!");
}
else {
int* d_X;
int* d_Y;
cudaMalloc((void**)&d_X, idataSizeBytes);
checkCUDAError("cudaMalloc d_X failed!");
cudaMalloc((void**)&d_Y, idataSizeBytes);
checkCUDAError("cudaMalloc d_Y failed!");
int* d_S;
int* d_SOut;
cudaMalloc((void**)&d_S, sumArraySizeBytes);
checkCUDAError("cudaMalloc d_S failed!");
cudaMalloc((void**)&d_SOut, sumArraySizeBytes);
checkCUDAError("cudaMalloc d_SOut failed!");
cudaMemcpy(d_X, idata, idataSizeBytes, cudaMemcpyHostToDevice);
checkCUDAError("memCpy back failed!");
kernKoggeStoneScan << <dimGridKogge, dimBlockKogge >> > (d_X, d_Y, d_S, n);
scanRecursiveHelper(n / SECTION_SIZE, d_SOut, d_S);
kernKoggeStoneScanAddUpSumArray << <dimGridKogge, dimBlockKogge >> > (
d_SOut, d_Y, n);
cudaMemcpy(odata, d_Y, idataSizeBytes, cudaMemcpyDeviceToHost);
checkCUDAError("memCpy back failed!");
cudaFree(d_X);
cudaFree(d_Y);
cudaFree(d_S);
cudaFree(d_SOut);
checkCUDAError("cudaFree failed!");
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// n could be larger than SECTION_SIZE
int idataSizeBytes = n * sizeof(int);
int sumArraySizeBytes = (n / SECTION_SIZE) * sizeof(int);
dim3 dimGridKogge((n + SECTION_SIZE - 1) / SECTION_SIZE, 1, 1);
dim3 dimBlockKogge(SECTION_SIZE, 1, 1);
int* sumArrayOutput = new int[n / SECTION_SIZE];
int* d_X;
int* d_Y;
int* d_S;
int* d_SOut;
int* d_YExclusive;
cudaMalloc((void**)&d_X, idataSizeBytes);
checkCUDAError("cudaMalloc d_X failed!");
cudaMalloc((void**)&d_Y, idataSizeBytes);
checkCUDAError("cudaMalloc d_Y failed!");
cudaMalloc((void**)&d_YExclusive, idataSizeBytes);
checkCUDAError("cudaMalloc d_YExclusive failed!");
cudaMalloc((void**)&d_S, sumArraySizeBytes);
checkCUDAError("cudaMalloc d_S failed!");
cudaMalloc((void**)&d_SOut, sumArraySizeBytes);
checkCUDAError("cudaMalloc d_SOut failed!");
cudaMemcpy(d_X, idata, idataSizeBytes, cudaMemcpyHostToDevice);
timer().startGpuTimer();
kernKoggeStoneScan <<<dimGridKogge, dimBlockKogge >>> (d_X, d_Y, d_S, n);
scanRecursiveHelper(n / SECTION_SIZE, d_SOut, d_S);
kernKoggeStoneScanAddUpSumArray <<<dimGridKogge, dimBlockKogge >>> (
d_SOut, d_Y, n);
convertFromInclusiveToExclusive << <dimGridKogge, dimBlockKogge >> > (
d_Y, d_YExclusive, n);
timer().endGpuTimer();
cudaMemcpy(odata, d_YExclusive, idataSizeBytes, cudaMemcpyDeviceToHost);
checkCUDAError("memCpy back failed!");
cudaFree(d_X);
cudaFree(d_Y);
cudaFree(d_S);
cudaFree(d_SOut);
cudaFree(d_YExclusive);
checkCUDAError("cudaFree failed!");
}
}
}
#if 0
void unitTestConversion()
{
// for testing
int numObject = 8;
int size = numObject * sizeof(int);
int* toyExclusiveArray = new int[numObject];
int* toyInclusiveArray = new int[numObject] {3, 4, 11, 11, 15, 16, 22, 25};
int* dev_toyExclusiveArray;
int* dev_toyInclusiveArray;
cudaMalloc((void**)&dev_toyExclusiveArray, size);
checkCUDAError("cudaMalloc dev_toyExclusiveArray failed!");
cudaMalloc((void**)&dev_toyInclusiveArray, size);
checkCUDAError("cudaMalloc dev_toyInclusiveArray failed!");
cudaMemcpy(dev_toyInclusiveArray, toyInclusiveArray, size,
cudaMemcpyHostToDevice);
dim3 dimGridArray((numObject + blockSize - 1) / blockSize, 1, 1);
dim3 dimBlockArray(blockSize, 1, 1);
convertFromInclusiveToExclusive << <dimGridArray, dimBlockArray >> > (
dev_toyInclusiveArray, dev_toyExclusiveArray, numObject);
cudaMemcpy(toyExclusiveArray, dev_toyExclusiveArray, size,
cudaMemcpyDeviceToHost);
checkCUDAError("memCpy back failed!");
printf("\n");
for (int i = 0; i < numObject; i++)
{
std::cout << toyExclusiveArray[i] << '\n';
}
printf("\n");
}
#endif |
5375d40d3c10ffa1b375dc06a453a51b273a4703.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// The translation unit for reduction `min`
#include <cudf/detail/reduction_functions.hpp>
#include "simple_hip.cuh"
std::unique_ptr<cudf::scalar> cudf::reduction::min(column_view const& col,
data_type const output_dtype,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
using reducer = cudf::reduction::simple::element_type_dispatcher<cudf::reduction::op::min>;
return cudf::type_dispatcher(col.type(), reducer(), col, output_dtype, mr, stream);
}
| 5375d40d3c10ffa1b375dc06a453a51b273a4703.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// The translation unit for reduction `min`
#include <cudf/detail/reduction_functions.hpp>
#include "simple.cuh"
std::unique_ptr<cudf::scalar> cudf::reduction::min(column_view const& col,
data_type const output_dtype,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
using reducer = cudf::reduction::simple::element_type_dispatcher<cudf::reduction::op::min>;
return cudf::type_dispatcher(col.type(), reducer(), col, output_dtype, mr, stream);
}
|
9d30a2ac11ae8fff8d380fea3ced3f4c5bea3500.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <time.h>
#include <hip/hip_runtime.h>
#include "customer_functions.h"
#include "cudaEGL.h"
#include "iva_metadata.h"
unsigned long counter = 0L;
/**
* Dummy custom pre-process API implematation.
* It just access mapped surface userspace pointer &
* memset with specific pattern modifying pixel-data in-place.
*
* @param sBaseAddr : Mapped Surfaces pointers
* @param smemsize : surfaces size array
* @param swidth : surfaces width array
* @param sheight : surfaces height array
* @param spitch : surfaces pitch array
* @param nsurfcount : surfaces count
*/
static void
pre_process (void **sBaseAddr,
unsigned int *smemsize,
unsigned int *swidth,
unsigned int *sheight,
unsigned int *spitch,
ColorFormat *sformat,
unsigned int nsurfcount,
void ** usrptr)
{
return;
}
/**
* Dummy custom post-process API implematation.
* It just access mapped surface userspace pointer &
* memset with specific pattern modifying pixel-data in-place.
*
* @param sBaseAddr : Mapped Surfaces pointers
* @param smemsize : surfaces size array
* @param swidth : surfaces width array
* @param sheight : surfaces height array
* @param spitch : surfaces pitch array
* @param nsurfcount : surfaces count
*/
static void
post_process (void **sBaseAddr,
unsigned int *smemsize,
unsigned int *swidth,
unsigned int *sheight,
unsigned int *spitch,
ColorFormat *sformat,
unsigned int nsurfcount,
void ** usrptr)
{
counter++;
if ( counter % 30 == 0 ) {
unsigned long long parsedTimestamp = 0LL;
if (sformat[0] == COLOR_FORMAT_Y8) {
for ( int I = 0; I < 64; I++ ) {
char * pixelPtr = (char *)sBaseAddr[0] + (100 * spitch[0]) + 100 + (4 * I);
if ( *pixelPtr < 128 ) {
parsedTimestamp |= (0x1LL << (63 - I));
}
}
std::chrono::time_point<std::chrono::system_clock> now = std::chrono::system_clock::now();
auto duration = now.time_since_epoch();
unsigned long long micros = std::chrono::duration_cast<std::chrono::milliseconds>(duration).count();
printf("HUH Thirty frames: %llu %llu %lld\n", micros, parsedTimestamp, (long long)(micros - parsedTimestamp));
}
}
return;
}
__global__ void addTimestampOverlayKernel(int * pYPlanePtr, int * pUvPlanePtr, int pitch, unsigned long long ts) {
int bitNumber = 63 - (( threadIdx.y & 0x000000FF ) >> 2);
bool bitval = (( 1LL << bitNumber ) & ts ) == 0;
if ( threadIdx.x <= 3 ) {
int row = threadIdx.x + 100;
int column = threadIdx.y + 100;
char * pYpixel = (char*)pYPlanePtr + (row * pitch) + column;
if ( bitval ) *pYpixel = 255; else *pYpixel = 0;
}
if ( threadIdx.x == 0 ) {
int row = 50;
int column = threadIdx.y + 100;
char * pUvpixel = (char*)pUvPlanePtr + (row * pitch) + column;
*pUvpixel = 128;
}
if ( threadIdx.x == 1 ) {
int row = 51;
int column = threadIdx.y + 100;
char * pUvpixel = (char*)pUvPlanePtr + (row * pitch) + column;
*pUvpixel = 128;
}
return;
}
static int addTimestampOverlay(hipDeviceptr_t pYPlanePtr, hipDeviceptr_t pUvPlanePtr, int pitch){
std::chrono::time_point<std::chrono::system_clock> now = std::chrono::system_clock::now();
auto duration = now.time_since_epoch();
unsigned long long micros = std::chrono::duration_cast<std::chrono::milliseconds>(duration).count();
//printf("Thirty frames: %llu\n", micros);
dim3 threadsPerBlock(4,256);
dim3 blocks(1,1);
hipLaunchKernelGGL(( addTimestampOverlayKernel), dim3(blocks),dim3(threadsPerBlock), 0, 0, (int*)pYPlanePtr, (int*)pUvPlanePtr, pitch, micros);
return 0;
}
/**
* Performs CUDA Operations on egl image.
*
* @param image : EGL image
*/
static void
gpu_process (EGLImageKHR image, void ** usrptr)
{
hipError_t status;
CUeglFrame eglFrame;
hipGraphicsResource_t pResource = NULL;
counter++;
//if ( counter % 30 != 0 ) {
// return;
//}
hipFree(0);
status = hipGraphicsEGLRegisterImage(&pResource, image, hipGraphicsMapFlagsNone);
if (status != hipSuccess) {
printf("hipGraphicsEGLRegisterImage failed : %d \n", status);
return;
}
status = hipGraphicsResourceGetMappedEglFrame( &eglFrame, pResource, 0, 0);
if (status != hipSuccess) {
printf ("hipGraphicsSubResourceGetMappedArray failed\n");
}
status = hipCtxSynchronize();
if (status != hipSuccess) {
printf ("hipCtxSynchronize failed \n");
}
if (eglFrame.frameType == CU_EGL_FRAME_TYPE_PITCH) {
if (eglFrame.eglColorFormat == CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR) {
addTimestampOverlay((hipDeviceptr_t) eglFrame.frame.pPitch[0], (hipDeviceptr_t) eglFrame.frame.pPitch[1], eglFrame.pitch);
} else {
printf ("Invalid eglcolorformat\n");
}
} else {
printf("Invalid frame type!!\n");
}
{
hipError_t cudaerr = hipDeviceSynchronize();
if (cudaerr != hipSuccess)
printf("kernel launch failed with error \"%s\".\n",
hipGetErrorString(cudaerr));
}
status = hipCtxSynchronize();
if (status != hipSuccess) {
printf ("hipCtxSynchronize failed after memcpy \n");
}
status = hipGraphicsUnregisterResource(pResource);
if (status != hipSuccess) {
printf("cuGraphicsEGLUnRegisterResource failed: %d \n", status);
}
if ( counter % 30 == 0 ) {
std::chrono::time_point<std::chrono::system_clock> nowAfterOverlay = std::chrono::system_clock::now();
auto durationAfterOverlay = nowAfterOverlay.time_since_epoch();
unsigned long long microsAfterOverlay = std::chrono::duration_cast<std::chrono::milliseconds>(durationAfterOverlay).count();
printf("Micros after overlay: %llu\n", microsAfterOverlay);
}
}
extern "C" void
init (CustomerFunction * pFuncs)
{
pFuncs->fPreProcess = pre_process;
pFuncs->fGPUProcess = gpu_process;
pFuncs->fPostProcess = post_process;
printf("libnvcuda_timestamp_overlay.so::init(): The video timestamp processing library has been initialized.\n");
}
extern "C" void
deinit (void)
{
/* deinitialization */
}
| 9d30a2ac11ae8fff8d380fea3ced3f4c5bea3500.cu | /*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <time.h>
#include <cuda.h>
#include "customer_functions.h"
#include "cudaEGL.h"
#include "iva_metadata.h"
unsigned long counter = 0L;
/**
* Dummy custom pre-process API implematation.
* It just access mapped surface userspace pointer &
* memset with specific pattern modifying pixel-data in-place.
*
* @param sBaseAddr : Mapped Surfaces pointers
* @param smemsize : surfaces size array
* @param swidth : surfaces width array
* @param sheight : surfaces height array
* @param spitch : surfaces pitch array
* @param nsurfcount : surfaces count
*/
static void
pre_process (void **sBaseAddr,
unsigned int *smemsize,
unsigned int *swidth,
unsigned int *sheight,
unsigned int *spitch,
ColorFormat *sformat,
unsigned int nsurfcount,
void ** usrptr)
{
return;
}
/**
* Dummy custom post-process API implematation.
* It just access mapped surface userspace pointer &
* memset with specific pattern modifying pixel-data in-place.
*
* @param sBaseAddr : Mapped Surfaces pointers
* @param smemsize : surfaces size array
* @param swidth : surfaces width array
* @param sheight : surfaces height array
* @param spitch : surfaces pitch array
* @param nsurfcount : surfaces count
*/
static void
post_process (void **sBaseAddr,
unsigned int *smemsize,
unsigned int *swidth,
unsigned int *sheight,
unsigned int *spitch,
ColorFormat *sformat,
unsigned int nsurfcount,
void ** usrptr)
{
counter++;
if ( counter % 30 == 0 ) {
unsigned long long parsedTimestamp = 0LL;
if (sformat[0] == COLOR_FORMAT_Y8) {
for ( int I = 0; I < 64; I++ ) {
char * pixelPtr = (char *)sBaseAddr[0] + (100 * spitch[0]) + 100 + (4 * I);
if ( *pixelPtr < 128 ) {
parsedTimestamp |= (0x1LL << (63 - I));
}
}
std::chrono::time_point<std::chrono::system_clock> now = std::chrono::system_clock::now();
auto duration = now.time_since_epoch();
unsigned long long micros = std::chrono::duration_cast<std::chrono::milliseconds>(duration).count();
printf("HUH Thirty frames: %llu %llu %lld\n", micros, parsedTimestamp, (long long)(micros - parsedTimestamp));
}
}
return;
}
__global__ void addTimestampOverlayKernel(int * pYPlanePtr, int * pUvPlanePtr, int pitch, unsigned long long ts) {
int bitNumber = 63 - (( threadIdx.y & 0x000000FF ) >> 2);
bool bitval = (( 1LL << bitNumber ) & ts ) == 0;
if ( threadIdx.x <= 3 ) {
int row = threadIdx.x + 100;
int column = threadIdx.y + 100;
char * pYpixel = (char*)pYPlanePtr + (row * pitch) + column;
if ( bitval ) *pYpixel = 255; else *pYpixel = 0;
}
if ( threadIdx.x == 0 ) {
int row = 50;
int column = threadIdx.y + 100;
char * pUvpixel = (char*)pUvPlanePtr + (row * pitch) + column;
*pUvpixel = 128;
}
if ( threadIdx.x == 1 ) {
int row = 51;
int column = threadIdx.y + 100;
char * pUvpixel = (char*)pUvPlanePtr + (row * pitch) + column;
*pUvpixel = 128;
}
return;
}
static int addTimestampOverlay(CUdeviceptr pYPlanePtr, CUdeviceptr pUvPlanePtr, int pitch){
std::chrono::time_point<std::chrono::system_clock> now = std::chrono::system_clock::now();
auto duration = now.time_since_epoch();
unsigned long long micros = std::chrono::duration_cast<std::chrono::milliseconds>(duration).count();
//printf("Thirty frames: %llu\n", micros);
dim3 threadsPerBlock(4,256);
dim3 blocks(1,1);
addTimestampOverlayKernel<<<blocks,threadsPerBlock>>>((int*)pYPlanePtr, (int*)pUvPlanePtr, pitch, micros);
return 0;
}
/**
* Performs CUDA Operations on egl image.
*
* @param image : EGL image
*/
static void
gpu_process (EGLImageKHR image, void ** usrptr)
{
CUresult status;
CUeglFrame eglFrame;
CUgraphicsResource pResource = NULL;
counter++;
//if ( counter % 30 != 0 ) {
// return;
//}
cudaFree(0);
status = cuGraphicsEGLRegisterImage(&pResource, image, CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
if (status != CUDA_SUCCESS) {
printf("cuGraphicsEGLRegisterImage failed : %d \n", status);
return;
}
status = cuGraphicsResourceGetMappedEglFrame( &eglFrame, pResource, 0, 0);
if (status != CUDA_SUCCESS) {
printf ("cuGraphicsSubResourceGetMappedArray failed\n");
}
status = cuCtxSynchronize();
if (status != CUDA_SUCCESS) {
printf ("cuCtxSynchronize failed \n");
}
if (eglFrame.frameType == CU_EGL_FRAME_TYPE_PITCH) {
if (eglFrame.eglColorFormat == CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR) {
addTimestampOverlay((CUdeviceptr) eglFrame.frame.pPitch[0], (CUdeviceptr) eglFrame.frame.pPitch[1], eglFrame.pitch);
} else {
printf ("Invalid eglcolorformat\n");
}
} else {
printf("Invalid frame type!!\n");
}
{
cudaError_t cudaerr = cudaDeviceSynchronize();
if (cudaerr != cudaSuccess)
printf("kernel launch failed with error \"%s\".\n",
cudaGetErrorString(cudaerr));
}
status = cuCtxSynchronize();
if (status != CUDA_SUCCESS) {
printf ("cuCtxSynchronize failed after memcpy \n");
}
status = cuGraphicsUnregisterResource(pResource);
if (status != CUDA_SUCCESS) {
printf("cuGraphicsEGLUnRegisterResource failed: %d \n", status);
}
if ( counter % 30 == 0 ) {
std::chrono::time_point<std::chrono::system_clock> nowAfterOverlay = std::chrono::system_clock::now();
auto durationAfterOverlay = nowAfterOverlay.time_since_epoch();
unsigned long long microsAfterOverlay = std::chrono::duration_cast<std::chrono::milliseconds>(durationAfterOverlay).count();
printf("Micros after overlay: %llu\n", microsAfterOverlay);
}
}
extern "C" void
init (CustomerFunction * pFuncs)
{
pFuncs->fPreProcess = pre_process;
pFuncs->fGPUProcess = gpu_process;
pFuncs->fPostProcess = post_process;
printf("libnvcuda_timestamp_overlay.so::init(): The video timestamp processing library has been initialized.\n");
}
extern "C" void
deinit (void)
{
/* deinitialization */
}
|
271305ae930082ed85f2d4cdbccf4a77b335c94b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<math.h>
#include <sys/time.h>
int rowSize;
__global__ void printGpu(float *d_a, int size)
{
int i, j;
for (i = 0; i < size; i++)
{
for (j = 0; j < size; j++)
printf("%0.1f\t", d_a[i * size + j]);
printf("\n");
}
}
__global__ void Dloop_FW(float *d_a, int k, int rowSize)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= rowSize)
return;
__shared__ int intermed;
if (threadIdx.x == 0) {
intermed = d_a[rowSize * blockIdx.y + k];
}
__syncthreads();
d_a[blockIdx.y * rowSize + col] = fmin(d_a[blockIdx.y * rowSize + col], intermed + d_a[k * rowSize + col]);
}
void print_matrix(float *d, int size)
{
int i, j;
for (i = 0; i < size; i++)
{
for (j = 0; j < size; j++)
printf("%0.1f\t", d[i * size + j]);
puts("");
}
}
int main(int argc, char** argv)
{
float *d_a;
float *a;
size_t pitch;
rowSize = atoi(argv[1]);
int colSize = rowSize;
int i, j, k;
hipError_t err = hipSuccess;
size_t totalSize = rowSize * colSize * sizeof(float);
a = (float *) malloc(totalSize);
if (!a)
{
printf("Unable to allocate memory for host array\n");
return 1;
}
err = hipMallocPitch(&d_a, &pitch, rowSize * sizeof(float), colSize);
if (!d_a)
{
printf("memory failed for hipMalloc");
return 1;
}
if (err != 0) {
printf("%s-%d", hipGetErrorString(err), 3);
return 1;
}
for (i = 0; i < rowSize; i++)
for (j = 0; j < colSize; j++)
{
if (i == j) {
a[i * rowSize + j] = 0;
}
else {
a[i * rowSize + j] = (i + j) % 5 ? (i + j) : (i + j) % 7;
}
}
//puts("input matrix :");
//print_matrix(a,rowSize);
err = hipMemcpy(d_a, a, totalSize, hipMemcpyHostToDevice);
if (err != 0) {
printf("after h2d %s-%d", hipGetErrorString(err), 3);
return 1;
}
int threadsPerBlock;
if (rowSize < 1024) {
threadsPerBlock = rowSize;
} else {
threadsPerBlock = 1024;
}
dim3 blocksPerGrid( (colSize + threadsPerBlock - 1) / threadsPerBlock , rowSize);
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
for (k = 0; k < rowSize; k++) {
hipLaunchKernelGGL(( Dloop_FW) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_a, k, rowSize);
hipDeviceSynchronize();
}
gettimeofday(&tv2, NULL);
printf ("Total Execution time = %f seconds\n", (double)(tv2.tv_usec - tv1.tv_usec) / 1000000 + (double)(tv2.tv_sec - tv1.tv_sec));
printf("error = %s\n", hipGetErrorString(hipGetLastError()));
err = hipMemcpy(a, d_a, totalSize, hipMemcpyDeviceToHost);
if (err != 0) {
printf("final %s-%d", hipGetErrorString(err), 3);
return 1;
}
//puts("output matrix :");
//print_matrix(a,rowSize);
free(a);
hipFree(d_a);
return 0;
}
| 271305ae930082ed85f2d4cdbccf4a77b335c94b.cu | #include<stdio.h>
#include<math.h>
#include <sys/time.h>
int rowSize;
__global__ void printGpu(float *d_a, int size)
{
int i, j;
for (i = 0; i < size; i++)
{
for (j = 0; j < size; j++)
printf("%0.1f\t", d_a[i * size + j]);
printf("\n");
}
}
__global__ void Dloop_FW(float *d_a, int k, int rowSize)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= rowSize)
return;
__shared__ int intermed;
if (threadIdx.x == 0) {
intermed = d_a[rowSize * blockIdx.y + k];
}
__syncthreads();
d_a[blockIdx.y * rowSize + col] = fmin(d_a[blockIdx.y * rowSize + col], intermed + d_a[k * rowSize + col]);
}
void print_matrix(float *d, int size)
{
int i, j;
for (i = 0; i < size; i++)
{
for (j = 0; j < size; j++)
printf("%0.1f\t", d[i * size + j]);
puts("");
}
}
int main(int argc, char** argv)
{
float *d_a;
float *a;
size_t pitch;
rowSize = atoi(argv[1]);
int colSize = rowSize;
int i, j, k;
cudaError_t err = cudaSuccess;
size_t totalSize = rowSize * colSize * sizeof(float);
a = (float *) malloc(totalSize);
if (!a)
{
printf("Unable to allocate memory for host array\n");
return 1;
}
err = cudaMallocPitch(&d_a, &pitch, rowSize * sizeof(float), colSize);
if (!d_a)
{
printf("memory failed for cudaMalloc");
return 1;
}
if (err != 0) {
printf("%s-%d", cudaGetErrorString(err), 3);
return 1;
}
for (i = 0; i < rowSize; i++)
for (j = 0; j < colSize; j++)
{
if (i == j) {
a[i * rowSize + j] = 0;
}
else {
a[i * rowSize + j] = (i + j) % 5 ? (i + j) : (i + j) % 7;
}
}
//puts("input matrix :");
//print_matrix(a,rowSize);
err = cudaMemcpy(d_a, a, totalSize, cudaMemcpyHostToDevice);
if (err != 0) {
printf("after h2d %s-%d", cudaGetErrorString(err), 3);
return 1;
}
int threadsPerBlock;
if (rowSize < 1024) {
threadsPerBlock = rowSize;
} else {
threadsPerBlock = 1024;
}
dim3 blocksPerGrid( (colSize + threadsPerBlock - 1) / threadsPerBlock , rowSize);
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
for (k = 0; k < rowSize; k++) {
Dloop_FW <<< blocksPerGrid, threadsPerBlock>>>(d_a, k, rowSize);
cudaThreadSynchronize();
}
gettimeofday(&tv2, NULL);
printf ("Total Execution time = %f seconds\n", (double)(tv2.tv_usec - tv1.tv_usec) / 1000000 + (double)(tv2.tv_sec - tv1.tv_sec));
printf("error = %s\n", cudaGetErrorString(cudaGetLastError()));
err = cudaMemcpy(a, d_a, totalSize, cudaMemcpyDeviceToHost);
if (err != 0) {
printf("final %s-%d", cudaGetErrorString(err), 3);
return 1;
}
//puts("output matrix :");
//print_matrix(a,rowSize);
free(a);
cudaFree(d_a);
return 0;
}
|
26fcea53939952e6b6a96e881835b38fbf3eeff9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/top_k_kernel.h"
#include "glog/logging.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/gather.cu.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/top_k_function_cuda.h"
namespace phi {
#define FIXED_BLOCK_DIM_BASE(dim, ...) \
case (dim): { \
constexpr auto kBlockDim = (dim); \
__VA_ARGS__; \
} break
#define FIXED_MAXLENGTH_BASE(MaxLength, ...) \
case (MaxLength): { \
constexpr auto maxLength = (MaxLength); \
__VA_ARGS__; \
} break
#define FIXED_BLOCK_DIM(...) \
FIXED_BLOCK_DIM_BASE(1024, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(512, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(256, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(128, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(64, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(32, ##__VA_ARGS__)
#define FIXED_MAXLENGTH(...) \
FIXED_MAXLENGTH_BASE(1, ##__VA_ARGS__); \
FIXED_MAXLENGTH_BASE(2, ##__VA_ARGS__); \
FIXED_MAXLENGTH_BASE(3, ##__VA_ARGS__); \
FIXED_MAXLENGTH_BASE(4, ##__VA_ARGS__); \
FIXED_MAXLENGTH_BASE(5, ##__VA_ARGS__)
template <typename T, typename Context>
void TopkKernel(const Context& dev_ctx,
const DenseTensor& x,
const Scalar& k_scalar,
int axis,
bool largest,
bool sorted,
DenseTensor* out,
DenseTensor* indices) {
const auto* input = &x;
// get the input dims
const auto& in_dims = input->dims();
// 0d input tensor
if (in_dims.size() == 0) {
phi::Copy<Context>(dev_ctx, x, dev_ctx.GetPlace(), false, out);
dev_ctx.template Alloc<int64_t>(indices);
phi::funcs::set_constant(dev_ctx, indices, 0.0);
return;
}
// calcluate the real axis
if (axis < 0) axis += in_dims.size();
int k = k_scalar.to<int>();
PADDLE_ENFORCE_GE(
x.numel(),
k,
errors::InvalidArgument(
"x has only %d element, can not find %d top values.", x.numel(), k));
if (k_scalar.FromTensor()) {
phi::DDim out_dims = out->dims();
out_dims[axis] = k;
out->Resize(out_dims);
indices->Resize(out_dims);
}
const auto& out_dims = out->dims();
const T* input_data = input->data<T>();
T* output_data = dev_ctx.template Alloc<T>(out);
int64_t* indices_data = dev_ctx.template Alloc<int64_t>(indices);
if (axis == in_dims.size() - 1) {
// if get the topK from the last axis
const int64_t& input_height =
phi::product(phi::slice_ddim(in_dims, 0, in_dims.size() - 1));
const int64_t& input_width = in_dims[in_dims.size() - 1];
if (k > input_width) {
k = input_width;
}
// The conclusion is drawn from the data through multiple sets of
// statistics
if (input_width >= 128 && k >= input_width * 0.25) {
auto* ctx = reinterpret_cast<const phi::GPUContext*>(&dev_ctx);
if (phi::funcs::SortTopk<T>(*ctx,
input,
input_width,
input_height,
k,
out,
indices,
largest)) {
// Successed, return.
return;
} else {
VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use "
"default topk kernel.";
}
}
#if defined(PADDLE_WITH_CUDA) && TORCH_HIP_VERSION >= 9000
if (input_width >= 1024 && in_dims.size() == 1) {
// 1. Gather TopK, but without sorting
constexpr int max_num_threads = 1024;
if (largest) {
hipLaunchKernelGGL(( phi::funcs::RadixTopK<T, true>)
, dim3(input_height), dim3(max_num_threads), 0, dev_ctx.stream(),
input_data,
k,
input_height,
input_width,
output_data,
indices_data);
} else {
hipLaunchKernelGGL(( phi::funcs::RadixTopK<T, false>)
, dim3(input_height), dim3(max_num_threads), 0, dev_ctx.stream(),
input_data,
k,
input_height,
input_width,
output_data,
indices_data);
}
// 2. Sort if needed
if (sorted) {
DenseTensor sorted_output;
DenseTensor sorted_indices;
DenseTensor gather_indices;
sorted_output.Resize(out->dims());
sorted_indices.Resize(indices->dims());
gather_indices.Resize(indices->dims());
dev_ctx.template Alloc<T>(&sorted_output);
dev_ctx.template Alloc<int64_t>(&sorted_indices);
dev_ctx.template Alloc<int64_t>(&gather_indices);
auto* ctx = reinterpret_cast<const phi::GPUContext*>(&dev_ctx);
if (phi::funcs::SortTopk<T>(*ctx,
out,
k,
input_height,
k,
&sorted_output,
&sorted_indices,
largest)) {
funcs::GPUGather<int64_t, int64_t>(
dev_ctx, *indices, sorted_indices, &gather_indices);
Copy(dev_ctx, gather_indices, indices->place(), false, indices);
Copy(dev_ctx, sorted_output, out->place(), false, out);
return;
} else {
VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use "
"default topk kernel.";
}
} else {
return;
}
}
#endif
// NOTE: pass lds and dim same to input width.
// NOTE: old matrix implementation of stride is different to eigen.
const int kMaxHeight = 2048;
int gridx = input_height < kMaxHeight ? input_height : kMaxHeight;
auto config =
phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, input_width);
switch (config.thread_per_block.x) {
#ifdef PADDLE_WITH_HIP
FIXED_BLOCK_DIM(
hipLaunchKernelGGL(( phi::funcs::KeMatrixTopK<T, 20, kBlockDim>)
, dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(), output_data,
k,
indices_data,
input_data,
input_width,
input_width,
static_cast<int>(k),
gridx,
input_height,
largest));
#else
FIXED_BLOCK_DIM(switch (phi::funcs::getMaxLength(k)) {
FIXED_MAXLENGTH(
hipLaunchKernelGGL(( phi::funcs::KeMatrixTopK<T, maxLength, kBlockDim>)
, dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(), output_data,
k,
indices_data,
input_data,
input_width,
input_width,
static_cast<int>(k),
gridx,
input_height,
largest));
default:
PADDLE_THROW(
errors::Fatal("the input k has error when use getMaxLength "
"function to get the maxLength."));
});
#endif
default:
PADDLE_THROW(errors::Fatal(
"the input data shape has error in the topk cuda kernel."));
}
} else {
// if get topK not from the last axis, will tranpose the tensor and get
// TopK
// first step, prepare the trans args for the tranpose
std::vector<int> trans;
for (int i = 0; i < axis; i++) {
trans.emplace_back(i);
}
trans.emplace_back(in_dims.size() - 1);
for (int i = axis + 1; i < in_dims.size() - 1; i++) {
trans.emplace_back(i);
}
trans.emplace_back(axis);
phi::DDim trans_dims(in_dims);
phi::DDim trans_out_dims(out->dims());
for (int i = 0; i < trans.size(); i++) {
trans_dims[i] = in_dims[trans[i]];
trans_out_dims[i] = out_dims[trans[i]];
}
// second step, tranpose the input
DenseTensor trans_input;
trans_input.Resize(trans_dims);
dev_ctx.template Alloc<T>(&trans_input);
int ndims = trans.size();
funcs::TransCompute<phi::GPUContext, T>(
ndims, dev_ctx, *input, &trans_input, trans);
// third step, calcluate the topk
// allocate the tmp cuda memory for the tmp result
DenseTensor trans_ind;
DenseTensor trans_out;
trans_ind.Resize(trans_out_dims);
trans_out.Resize(trans_out_dims);
dev_ctx.template Alloc<int64_t>(&trans_ind);
dev_ctx.template Alloc<T>(&trans_out);
const int64_t input_height =
phi::product(phi::slice_ddim(trans_dims, 0, trans_dims.size() - 1));
const int64_t input_width = trans_dims[trans_dims.size() - 1];
if (k > input_width) k = input_width;
// The conclusion is drawn from the data through multiple sets of
// statistics
if (input_width >= 128 && k >= input_width * 0.75) {
auto* ctx = reinterpret_cast<const phi::GPUContext*>(&dev_ctx);
if (phi::funcs::SortTopk<T>(*ctx,
&trans_input,
input_width,
input_height,
k,
&trans_out,
&trans_ind,
largest)) {
// last step, tranpose back the indices and output
funcs::TransCompute<phi::GPUContext, int64_t>(
ndims, dev_ctx, trans_ind, indices, trans);
funcs::TransCompute<phi::GPUContext, T>(
ndims, dev_ctx, trans_out, out, trans);
return;
} else {
VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use "
"default topk kernel.";
}
}
const int kMaxHeight = 2048;
int gridx = input_height < kMaxHeight ? input_height : kMaxHeight;
auto config =
phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, input_width);
switch (config.thread_per_block.x) {
#ifdef PADDLE_WITH_HIP
FIXED_BLOCK_DIM(
hipLaunchKernelGGL(( phi::funcs::KeMatrixTopK<T, 20, kBlockDim>)
, dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(), trans_out.data<T>(),
k,
trans_ind.data<int64_t>(),
trans_input.data<T>(),
input_width,
input_width,
static_cast<int>(k),
gridx,
input_height,
largest));
#else
FIXED_BLOCK_DIM(switch (phi::funcs::getMaxLength(k)) {
hipLaunchKernelGGL(( FIXED_MAXLENGTH(phi::funcs::KeMatrixTopK<T, maxLength, kBlockDim>)
, dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(),
trans_out.data<T>(),
k,
trans_ind.data<int64_t>(),
trans_input.data<T>(),
input_width,
input_width,
static_cast<int>(k),
gridx,
input_height,
largest));
default:
PADDLE_THROW(
errors::Fatal("the input k has error when use getMaxLength "
"function to get the maxLength."));
});
#endif
default:
PADDLE_THROW(errors::Fatal(
"the input data shape has error in the topk cuda kernel."));
}
// last step, tranpose back the indices and output
funcs::TransCompute<phi::GPUContext, int64_t>(
ndims, dev_ctx, trans_ind, indices, trans);
funcs::TransCompute<phi::GPUContext, T>(
ndims, dev_ctx, trans_out, out, trans);
}
}
#undef FIXED_BLOCK_DIM_BASE
#undef FIXED_BLOCK_DIM
} // namespace phi
PD_REGISTER_KERNEL(topk,
GPU,
ALL_LAYOUT,
phi::TopkKernel,
float,
double,
int,
int64_t,
phi::dtype::float16,
phi::dtype::bfloat16) {
kernel->OutputAt(1).SetDataType(phi::DataType::INT64);
}
| 26fcea53939952e6b6a96e881835b38fbf3eeff9.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/top_k_kernel.h"
#include "glog/logging.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/gather.cu.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/top_k_function_cuda.h"
namespace phi {
#define FIXED_BLOCK_DIM_BASE(dim, ...) \
case (dim): { \
constexpr auto kBlockDim = (dim); \
__VA_ARGS__; \
} break
#define FIXED_MAXLENGTH_BASE(MaxLength, ...) \
case (MaxLength): { \
constexpr auto maxLength = (MaxLength); \
__VA_ARGS__; \
} break
#define FIXED_BLOCK_DIM(...) \
FIXED_BLOCK_DIM_BASE(1024, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(512, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(256, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(128, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(64, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(32, ##__VA_ARGS__)
#define FIXED_MAXLENGTH(...) \
FIXED_MAXLENGTH_BASE(1, ##__VA_ARGS__); \
FIXED_MAXLENGTH_BASE(2, ##__VA_ARGS__); \
FIXED_MAXLENGTH_BASE(3, ##__VA_ARGS__); \
FIXED_MAXLENGTH_BASE(4, ##__VA_ARGS__); \
FIXED_MAXLENGTH_BASE(5, ##__VA_ARGS__)
template <typename T, typename Context>
void TopkKernel(const Context& dev_ctx,
const DenseTensor& x,
const Scalar& k_scalar,
int axis,
bool largest,
bool sorted,
DenseTensor* out,
DenseTensor* indices) {
const auto* input = &x;
// get the input dims
const auto& in_dims = input->dims();
// 0d input tensor
if (in_dims.size() == 0) {
phi::Copy<Context>(dev_ctx, x, dev_ctx.GetPlace(), false, out);
dev_ctx.template Alloc<int64_t>(indices);
phi::funcs::set_constant(dev_ctx, indices, 0.0);
return;
}
// calcluate the real axis
if (axis < 0) axis += in_dims.size();
int k = k_scalar.to<int>();
PADDLE_ENFORCE_GE(
x.numel(),
k,
errors::InvalidArgument(
"x has only %d element, can not find %d top values.", x.numel(), k));
if (k_scalar.FromTensor()) {
phi::DDim out_dims = out->dims();
out_dims[axis] = k;
out->Resize(out_dims);
indices->Resize(out_dims);
}
const auto& out_dims = out->dims();
const T* input_data = input->data<T>();
T* output_data = dev_ctx.template Alloc<T>(out);
int64_t* indices_data = dev_ctx.template Alloc<int64_t>(indices);
if (axis == in_dims.size() - 1) {
// if get the topK from the last axis
const int64_t& input_height =
phi::product(phi::slice_ddim(in_dims, 0, in_dims.size() - 1));
const int64_t& input_width = in_dims[in_dims.size() - 1];
if (k > input_width) {
k = input_width;
}
// The conclusion is drawn from the data through multiple sets of
// statistics
if (input_width >= 128 && k >= input_width * 0.25) {
auto* ctx = reinterpret_cast<const phi::GPUContext*>(&dev_ctx);
if (phi::funcs::SortTopk<T>(*ctx,
input,
input_width,
input_height,
k,
out,
indices,
largest)) {
// Successed, return.
return;
} else {
VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use "
"default topk kernel.";
}
}
#if defined(PADDLE_WITH_CUDA) && CUDA_VERSION >= 9000
if (input_width >= 1024 && in_dims.size() == 1) {
// 1. Gather TopK, but without sorting
constexpr int max_num_threads = 1024;
if (largest) {
phi::funcs::RadixTopK<T, true>
<<<input_height, max_num_threads, 0, dev_ctx.stream()>>>(
input_data,
k,
input_height,
input_width,
output_data,
indices_data);
} else {
phi::funcs::RadixTopK<T, false>
<<<input_height, max_num_threads, 0, dev_ctx.stream()>>>(
input_data,
k,
input_height,
input_width,
output_data,
indices_data);
}
// 2. Sort if needed
if (sorted) {
DenseTensor sorted_output;
DenseTensor sorted_indices;
DenseTensor gather_indices;
sorted_output.Resize(out->dims());
sorted_indices.Resize(indices->dims());
gather_indices.Resize(indices->dims());
dev_ctx.template Alloc<T>(&sorted_output);
dev_ctx.template Alloc<int64_t>(&sorted_indices);
dev_ctx.template Alloc<int64_t>(&gather_indices);
auto* ctx = reinterpret_cast<const phi::GPUContext*>(&dev_ctx);
if (phi::funcs::SortTopk<T>(*ctx,
out,
k,
input_height,
k,
&sorted_output,
&sorted_indices,
largest)) {
funcs::GPUGather<int64_t, int64_t>(
dev_ctx, *indices, sorted_indices, &gather_indices);
Copy(dev_ctx, gather_indices, indices->place(), false, indices);
Copy(dev_ctx, sorted_output, out->place(), false, out);
return;
} else {
VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use "
"default topk kernel.";
}
} else {
return;
}
}
#endif
// NOTE: pass lds and dim same to input width.
// NOTE: old matrix implementation of stride is different to eigen.
const int kMaxHeight = 2048;
int gridx = input_height < kMaxHeight ? input_height : kMaxHeight;
auto config =
phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, input_width);
switch (config.thread_per_block.x) {
#ifdef PADDLE_WITH_HIP
FIXED_BLOCK_DIM(
phi::funcs::KeMatrixTopK<T, 20, kBlockDim>
<<<gridx, kBlockDim, 0, dev_ctx.stream()>>>(output_data,
k,
indices_data,
input_data,
input_width,
input_width,
static_cast<int>(k),
gridx,
input_height,
largest));
#else
FIXED_BLOCK_DIM(switch (phi::funcs::getMaxLength(k)) {
FIXED_MAXLENGTH(
phi::funcs::KeMatrixTopK<T, maxLength, kBlockDim>
<<<gridx, kBlockDim, 0, dev_ctx.stream()>>>(output_data,
k,
indices_data,
input_data,
input_width,
input_width,
static_cast<int>(k),
gridx,
input_height,
largest));
default:
PADDLE_THROW(
errors::Fatal("the input k has error when use getMaxLength "
"function to get the maxLength."));
});
#endif
default:
PADDLE_THROW(errors::Fatal(
"the input data shape has error in the topk cuda kernel."));
}
} else {
// if get topK not from the last axis, will tranpose the tensor and get
// TopK
// first step, prepare the trans args for the tranpose
std::vector<int> trans;
for (int i = 0; i < axis; i++) {
trans.emplace_back(i);
}
trans.emplace_back(in_dims.size() - 1);
for (int i = axis + 1; i < in_dims.size() - 1; i++) {
trans.emplace_back(i);
}
trans.emplace_back(axis);
phi::DDim trans_dims(in_dims);
phi::DDim trans_out_dims(out->dims());
for (int i = 0; i < trans.size(); i++) {
trans_dims[i] = in_dims[trans[i]];
trans_out_dims[i] = out_dims[trans[i]];
}
// second step, tranpose the input
DenseTensor trans_input;
trans_input.Resize(trans_dims);
dev_ctx.template Alloc<T>(&trans_input);
int ndims = trans.size();
funcs::TransCompute<phi::GPUContext, T>(
ndims, dev_ctx, *input, &trans_input, trans);
// third step, calcluate the topk
// allocate the tmp cuda memory for the tmp result
DenseTensor trans_ind;
DenseTensor trans_out;
trans_ind.Resize(trans_out_dims);
trans_out.Resize(trans_out_dims);
dev_ctx.template Alloc<int64_t>(&trans_ind);
dev_ctx.template Alloc<T>(&trans_out);
const int64_t input_height =
phi::product(phi::slice_ddim(trans_dims, 0, trans_dims.size() - 1));
const int64_t input_width = trans_dims[trans_dims.size() - 1];
if (k > input_width) k = input_width;
// The conclusion is drawn from the data through multiple sets of
// statistics
if (input_width >= 128 && k >= input_width * 0.75) {
auto* ctx = reinterpret_cast<const phi::GPUContext*>(&dev_ctx);
if (phi::funcs::SortTopk<T>(*ctx,
&trans_input,
input_width,
input_height,
k,
&trans_out,
&trans_ind,
largest)) {
// last step, tranpose back the indices and output
funcs::TransCompute<phi::GPUContext, int64_t>(
ndims, dev_ctx, trans_ind, indices, trans);
funcs::TransCompute<phi::GPUContext, T>(
ndims, dev_ctx, trans_out, out, trans);
return;
} else {
VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use "
"default topk kernel.";
}
}
const int kMaxHeight = 2048;
int gridx = input_height < kMaxHeight ? input_height : kMaxHeight;
auto config =
phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, input_width);
switch (config.thread_per_block.x) {
#ifdef PADDLE_WITH_HIP
FIXED_BLOCK_DIM(
phi::funcs::KeMatrixTopK<T, 20, kBlockDim>
<<<gridx, kBlockDim, 0, dev_ctx.stream()>>>(trans_out.data<T>(),
k,
trans_ind.data<int64_t>(),
trans_input.data<T>(),
input_width,
input_width,
static_cast<int>(k),
gridx,
input_height,
largest));
#else
FIXED_BLOCK_DIM(switch (phi::funcs::getMaxLength(k)) {
FIXED_MAXLENGTH(phi::funcs::KeMatrixTopK<T, maxLength, kBlockDim>
<<<gridx, kBlockDim, 0, dev_ctx.stream()>>>(
trans_out.data<T>(),
k,
trans_ind.data<int64_t>(),
trans_input.data<T>(),
input_width,
input_width,
static_cast<int>(k),
gridx,
input_height,
largest));
default:
PADDLE_THROW(
errors::Fatal("the input k has error when use getMaxLength "
"function to get the maxLength."));
});
#endif
default:
PADDLE_THROW(errors::Fatal(
"the input data shape has error in the topk cuda kernel."));
}
// last step, tranpose back the indices and output
funcs::TransCompute<phi::GPUContext, int64_t>(
ndims, dev_ctx, trans_ind, indices, trans);
funcs::TransCompute<phi::GPUContext, T>(
ndims, dev_ctx, trans_out, out, trans);
}
}
#undef FIXED_BLOCK_DIM_BASE
#undef FIXED_BLOCK_DIM
} // namespace phi
PD_REGISTER_KERNEL(topk,
GPU,
ALL_LAYOUT,
phi::TopkKernel,
float,
double,
int,
int64_t,
phi::dtype::float16,
phi::dtype::bfloat16) {
kernel->OutputAt(1).SetDataType(phi::DataType::INT64);
}
|
dffe04c13865bf6ae9bdb8b98ffb5d5067c73436.hip | // !!! This is a file automatically generated by hipify!!!
#include "MemoryManager.cuh"
#include <stdio.h>
EXTERN_C
{
EXPORT int _HostToHostCopy(MemoryBuffer& dest, const MemoryBuffer& source)
{
return hipMemcpy((void *)dest.pointer, (void *)source.pointer, dest.TotalSize(), hipMemcpyHostToHost);
}
EXPORT int _HostToHostCopyRaw(const ptr_t destPointer, const ptr_t sourcePointer, const unsigned size, const MemorySpace memorySpace, const MathDomain mathDomain)
{
MemoryBuffer dest(destPointer, size, memorySpace, mathDomain);
MemoryBuffer source(sourcePointer, size, memorySpace, mathDomain);
return _HostToHostCopy(dest, source);
}
EXPORT int _HostToDeviceCopy(MemoryBuffer& dest, const MemoryBuffer& source)
{
return hipMemcpy((void *)dest.pointer, (void *)source.pointer, dest.TotalSize(), hipMemcpyHostToDevice);
}
EXPORT int _HostToDeviceCopyRaw(const ptr_t destPointer, const ptr_t sourcePointer, const unsigned size, const MemorySpace memorySpace, const MathDomain mathDomain)
{
MemoryBuffer dest(destPointer, size, memorySpace, mathDomain);
MemoryBuffer source(sourcePointer, size, memorySpace, mathDomain);
return _HostToDeviceCopy(dest, source);
}
EXPORT int _DeviceToHostCopy(MemoryBuffer& dest, const MemoryBuffer& source)
{
return hipMemcpy((void *)dest.pointer, (void *)source.pointer, dest.TotalSize(), hipMemcpyDeviceToHost);
}
EXPORT int _DeviceToHostCopyRaw(const ptr_t destPointer, const ptr_t sourcePointer, const unsigned size, const MemorySpace memorySpace, const MathDomain mathDomain)
{
MemoryBuffer dest(destPointer, size, memorySpace, mathDomain);
MemoryBuffer source(sourcePointer, size, memorySpace, mathDomain);
return _DeviceToHostCopy(dest, source);
}
EXPORT int _DeviceToDeviceCopy(MemoryBuffer& dest, const MemoryBuffer& source)
{
return hipMemcpy((void *)dest.pointer, (void *)source.pointer, dest.TotalSize(), hipMemcpyDeviceToDevice);
}
EXPORT int _DeviceToDeviceCopyRaw(const ptr_t destPointer, const ptr_t sourcePointer, const unsigned size, const MemorySpace memorySpace, const MathDomain mathDomain)
{
MemoryBuffer dest(destPointer, size, memorySpace, mathDomain);
MemoryBuffer source(sourcePointer, size, memorySpace, mathDomain);
return _DeviceToDeviceCopy(dest, source);
}
EXPORT int _AutoCopy(MemoryBuffer& dest, const MemoryBuffer& source)
{
return hipMemcpy((void *)dest.pointer, (void *)source.pointer, dest.TotalSize(), hipMemcpyDefault);
}
EXPORT int _AutoCopyRaw(const ptr_t destPointer, const ptr_t sourcePointer, const unsigned size, const MemorySpace memorySpace, const MathDomain mathDomain)
{
MemoryBuffer dest(destPointer, size, memorySpace, mathDomain);
MemoryBuffer source(sourcePointer, size, memorySpace, mathDomain);
return _AutoCopy(dest, source);
}
EXPORT int _Alloc(MemoryBuffer& buf)
{
return hipMalloc((void **)&buf.pointer, buf.TotalSize());
}
EXPORT int _AllocRaw(ptr_t& pointer, const unsigned size, const MemorySpace memorySpace, const MathDomain mathDomain)
{
MemoryBuffer tmp(0, size, memorySpace, mathDomain);
int ret = _Alloc(tmp);
pointer = tmp.pointer;
return ret;
}
EXPORT int _AllocHost(MemoryBuffer& buf)
{
return hipHostMalloc((void **)&buf.pointer, buf.TotalSize());
}
EXPORT int _AllocHostRaw(ptr_t& pointer, const unsigned size, const MemorySpace memorySpace, const MathDomain mathDomain)
{
MemoryBuffer tmp(0, size, memorySpace, mathDomain);
int ret = _AllocHost(tmp);
pointer = tmp.pointer;
return ret;
}
EXPORT int _Free(const MemoryBuffer& buf)
{
hipDeviceSynchronize();
return hipFree((void *)buf.pointer);
}
EXPORT int _FreeRaw(const ptr_t pointer, const unsigned size, const MemorySpace memorySpace, const MathDomain mathDomain)
{
MemoryBuffer buf(pointer, size, memorySpace, mathDomain);
return _Free(buf);
}
EXPORT int _FreeHost(const MemoryBuffer& buf)
{
return hipHostFree((void *)buf.pointer);
}
EXPORT int _FreeHostRaw(const ptr_t pointer, const unsigned size, const MemorySpace memorySpace, const MathDomain mathDomain)
{
MemoryBuffer buf(pointer, size, memorySpace, mathDomain);
return _FreeHost(buf);
}
}
| dffe04c13865bf6ae9bdb8b98ffb5d5067c73436.cu | #include "MemoryManager.cuh"
#include <stdio.h>
EXTERN_C
{
EXPORT int _HostToHostCopy(MemoryBuffer& dest, const MemoryBuffer& source)
{
return cudaMemcpy((void *)dest.pointer, (void *)source.pointer, dest.TotalSize(), cudaMemcpyHostToHost);
}
EXPORT int _HostToHostCopyRaw(const ptr_t destPointer, const ptr_t sourcePointer, const unsigned size, const MemorySpace memorySpace, const MathDomain mathDomain)
{
MemoryBuffer dest(destPointer, size, memorySpace, mathDomain);
MemoryBuffer source(sourcePointer, size, memorySpace, mathDomain);
return _HostToHostCopy(dest, source);
}
EXPORT int _HostToDeviceCopy(MemoryBuffer& dest, const MemoryBuffer& source)
{
return cudaMemcpy((void *)dest.pointer, (void *)source.pointer, dest.TotalSize(), cudaMemcpyHostToDevice);
}
EXPORT int _HostToDeviceCopyRaw(const ptr_t destPointer, const ptr_t sourcePointer, const unsigned size, const MemorySpace memorySpace, const MathDomain mathDomain)
{
MemoryBuffer dest(destPointer, size, memorySpace, mathDomain);
MemoryBuffer source(sourcePointer, size, memorySpace, mathDomain);
return _HostToDeviceCopy(dest, source);
}
EXPORT int _DeviceToHostCopy(MemoryBuffer& dest, const MemoryBuffer& source)
{
return cudaMemcpy((void *)dest.pointer, (void *)source.pointer, dest.TotalSize(), cudaMemcpyDeviceToHost);
}
EXPORT int _DeviceToHostCopyRaw(const ptr_t destPointer, const ptr_t sourcePointer, const unsigned size, const MemorySpace memorySpace, const MathDomain mathDomain)
{
MemoryBuffer dest(destPointer, size, memorySpace, mathDomain);
MemoryBuffer source(sourcePointer, size, memorySpace, mathDomain);
return _DeviceToHostCopy(dest, source);
}
EXPORT int _DeviceToDeviceCopy(MemoryBuffer& dest, const MemoryBuffer& source)
{
return cudaMemcpy((void *)dest.pointer, (void *)source.pointer, dest.TotalSize(), cudaMemcpyDeviceToDevice);
}
EXPORT int _DeviceToDeviceCopyRaw(const ptr_t destPointer, const ptr_t sourcePointer, const unsigned size, const MemorySpace memorySpace, const MathDomain mathDomain)
{
MemoryBuffer dest(destPointer, size, memorySpace, mathDomain);
MemoryBuffer source(sourcePointer, size, memorySpace, mathDomain);
return _DeviceToDeviceCopy(dest, source);
}
EXPORT int _AutoCopy(MemoryBuffer& dest, const MemoryBuffer& source)
{
return cudaMemcpy((void *)dest.pointer, (void *)source.pointer, dest.TotalSize(), cudaMemcpyDefault);
}
EXPORT int _AutoCopyRaw(const ptr_t destPointer, const ptr_t sourcePointer, const unsigned size, const MemorySpace memorySpace, const MathDomain mathDomain)
{
MemoryBuffer dest(destPointer, size, memorySpace, mathDomain);
MemoryBuffer source(sourcePointer, size, memorySpace, mathDomain);
return _AutoCopy(dest, source);
}
EXPORT int _Alloc(MemoryBuffer& buf)
{
return cudaMalloc((void **)&buf.pointer, buf.TotalSize());
}
EXPORT int _AllocRaw(ptr_t& pointer, const unsigned size, const MemorySpace memorySpace, const MathDomain mathDomain)
{
MemoryBuffer tmp(0, size, memorySpace, mathDomain);
int ret = _Alloc(tmp);
pointer = tmp.pointer;
return ret;
}
EXPORT int _AllocHost(MemoryBuffer& buf)
{
return cudaMallocHost((void **)&buf.pointer, buf.TotalSize());
}
EXPORT int _AllocHostRaw(ptr_t& pointer, const unsigned size, const MemorySpace memorySpace, const MathDomain mathDomain)
{
MemoryBuffer tmp(0, size, memorySpace, mathDomain);
int ret = _AllocHost(tmp);
pointer = tmp.pointer;
return ret;
}
EXPORT int _Free(const MemoryBuffer& buf)
{
cudaDeviceSynchronize();
return cudaFree((void *)buf.pointer);
}
EXPORT int _FreeRaw(const ptr_t pointer, const unsigned size, const MemorySpace memorySpace, const MathDomain mathDomain)
{
MemoryBuffer buf(pointer, size, memorySpace, mathDomain);
return _Free(buf);
}
EXPORT int _FreeHost(const MemoryBuffer& buf)
{
return cudaFreeHost((void *)buf.pointer);
}
EXPORT int _FreeHostRaw(const ptr_t pointer, const unsigned size, const MemorySpace memorySpace, const MathDomain mathDomain)
{
MemoryBuffer buf(pointer, size, memorySpace, mathDomain);
return _FreeHost(buf);
}
}
|
1ca3770c7e45e9b4e5fcb96821f1e1150005a295.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
// Variables
__constant__ float ConstArray1[THREADS_PER_BLOCK];
__constant__ float ConstArray2[THREADS_PER_BLOCK];
float* h_Value;
float* d_Value;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(float* Value)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
__device__ __shared__ float I1[THREADS_PER_BLOCK];
__device__ __shared__ float I2[THREADS_PER_BLOCK];
__device__ __shared__ float I3[THREADS_PER_BLOCK];
__device__ __shared__ float I4[THREADS_PER_BLOCK];
I1[i]=i;
I2[i]=i/2;
I3[i]=i;
I4[i]=i+1;
//Do Some Computation
float Value1;
float Value2;
for(unsigned k=0; k<ITERATIONS;k++) {
Value1=ConstArray1[(i+k)%THREADS_PER_BLOCK];
Value2=ConstArray2[(i+k+1)%THREADS_PER_BLOCK];
I1[i]=Value1*2+I2[i];
I2[i]=Value2+I4[i];
I3[i]=Value1/2+I3[i];
I4[i]=Value2+I1[i];
I1[i]=I2[(i+k)%THREADS_PER_BLOCK];
I2[i]=I1[(i+k+1)%THREADS_PER_BLOCK];
I3[i]=I4[(i+k)%THREADS_PER_BLOCK];
I4[i]=I3[(i+k+1)%THREADS_PER_BLOCK];
}
__syncthreads();
*Value=I1[i]+I2[i]+I3[i]+I4[i];
}
// Host code
int main()
{
printf("Power Microbenchmarks\n");
float array1[THREADS_PER_BLOCK];
h_Value = (float *) malloc(sizeof(float));
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array1[i] = rand() / RAND_MAX;
}
float array2[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array2[i] = rand() / RAND_MAX;
}
hipMemcpyToSymbol("ConstArray1", array1, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol("ConstArray2", array2, sizeof(float) * THREADS_PER_BLOCK );
checkCudaErrors( hipMalloc((void**)&d_Value, sizeof(float)) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
hipLaunchKernelGGL(( PowerKernal), dim3(dimGrid),dim3(dimBlock), 0, 0, d_Value);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutStopTimer(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
getLastCudaError("kernel launch failure");
checkCudaErrors( hipMemcpy(h_Value, d_Value, sizeof(float), hipMemcpyDeviceToHost) );
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
return 0;
}
| 1ca3770c7e45e9b4e5fcb96821f1e1150005a295.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
// Variables
__constant__ float ConstArray1[THREADS_PER_BLOCK];
__constant__ float ConstArray2[THREADS_PER_BLOCK];
float* h_Value;
float* d_Value;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(float* Value)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
__device__ __shared__ float I1[THREADS_PER_BLOCK];
__device__ __shared__ float I2[THREADS_PER_BLOCK];
__device__ __shared__ float I3[THREADS_PER_BLOCK];
__device__ __shared__ float I4[THREADS_PER_BLOCK];
I1[i]=i;
I2[i]=i/2;
I3[i]=i;
I4[i]=i+1;
//Do Some Computation
float Value1;
float Value2;
for(unsigned k=0; k<ITERATIONS;k++) {
Value1=ConstArray1[(i+k)%THREADS_PER_BLOCK];
Value2=ConstArray2[(i+k+1)%THREADS_PER_BLOCK];
I1[i]=Value1*2+I2[i];
I2[i]=Value2+I4[i];
I3[i]=Value1/2+I3[i];
I4[i]=Value2+I1[i];
I1[i]=I2[(i+k)%THREADS_PER_BLOCK];
I2[i]=I1[(i+k+1)%THREADS_PER_BLOCK];
I3[i]=I4[(i+k)%THREADS_PER_BLOCK];
I4[i]=I3[(i+k+1)%THREADS_PER_BLOCK];
}
__syncthreads();
*Value=I1[i]+I2[i]+I3[i]+I4[i];
}
// Host code
int main()
{
printf("Power Microbenchmarks\n");
float array1[THREADS_PER_BLOCK];
h_Value = (float *) malloc(sizeof(float));
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array1[i] = rand() / RAND_MAX;
}
float array2[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array2[i] = rand() / RAND_MAX;
}
cudaMemcpyToSymbol("ConstArray1", array1, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol("ConstArray2", array2, sizeof(float) * THREADS_PER_BLOCK );
checkCudaErrors( cudaMalloc((void**)&d_Value, sizeof(float)) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
PowerKernal<<<dimGrid,dimBlock>>>(d_Value);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutStopTimer(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
getLastCudaError("kernel launch failure");
checkCudaErrors( cudaMemcpy(h_Value, d_Value, sizeof(float), cudaMemcpyDeviceToHost) );
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
return 0;
}
|
32b5f7a27a964920988a02a0a74c1c2859b8c359.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2020, the YACCLAB contributors, as
// shown by the AUTHORS file. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include <opencv2/cudafeatures2d.hpp>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
#define BLOCK_ROWS 16
#define BLOCK_COLS 16
using namespace cv;
namespace {
//
// This is a block-based algorithm.
// Blocks are 2x2 sized, with internal pixels named as:
// +---+
// |a b|
// |c d|
// +---+
//
// Neighbour blocks of block X are named as:
// +-+-+-+
// |P|Q|R|
// +-+-+-+
// |S|X|
// +-+-+
//
enum class Info : unsigned char { a = 0, b = 1, c = 2, d = 3, P = 4, Q = 5, R = 6, S = 7 };
// Only use it with unsigned numeric types
template <typename T>
__device__ __forceinline__ unsigned char HasBit(T bitmap, Info pos) {
return (bitmap >> static_cast<unsigned char>(pos)) & 1;
}
template <typename T>
__device__ __forceinline__ unsigned char HasBit(T bitmap, unsigned char pos) {
return (bitmap >> pos) & 1;
}
// Only use it with unsigned numeric types
__device__ __forceinline__ void SetBit(unsigned char& bitmap, Info pos) {
bitmap |= (1 << static_cast<unsigned char>(pos));
}
// Returns the root index of the UFTree
__device__ unsigned Find(const int* s_buf, unsigned n) {
while (s_buf[n] != n) {
n = s_buf[n];
}
return n;
}
__device__ unsigned FindAndCompress(int* s_buf, unsigned n) {
unsigned id = n;
while (s_buf[n] != n) {
n = s_buf[n];
s_buf[id] = n;
}
return n;
}
// Merges the UFTrees of a and b, linking one root to the other
__device__ void Union(int* s_buf, unsigned a, unsigned b) {
bool done;
do {
a = Find(s_buf, a);
b = Find(s_buf, b);
if (a < b) {
int old = atomicMin(s_buf + b, a);
done = (old == b);
b = old;
}
else if (b < a) {
int old = atomicMin(s_buf + a, b);
done = (old == a);
a = old;
}
else {
done = true;
}
} while (!done);
}
__global__ void InitLabeling(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels, unsigned char* last_pixel) {
unsigned row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
unsigned col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
unsigned img_index = row * img.step + col;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
unsigned P = 0;
// Bitmask representing two kinds of information
// Bits 0, 1, 2, 3 are set if pixel a, b, c, d are foreground, respectively
// Bits 4, 5, 6, 7 are set if block P, Q, R, S need to be merged to X in Merge phase
unsigned char info = 0;
char buffer alignas(int)[4];
*(reinterpret_cast<int*>(buffer)) = 0;
// Read pairs of consecutive values in memory at once
if (col + 1 < img.cols) {
// This does not depend on endianness
*(reinterpret_cast<int16_t*>(buffer)) = *(reinterpret_cast<int16_t*>(img.data + img_index));
if (row + 1 < img.rows) {
*(reinterpret_cast<int16_t*>(buffer + 2)) = *(reinterpret_cast<int16_t*>(img.data + img_index + img.step));
}
}
else {
buffer[0] = img.data[img_index];
if (row + 1 < img.rows) {
buffer[2] = img.data[img_index + img.step];
}
}
if (buffer[0]) {
P |= 0x777;
SetBit(info, Info::a);
}
if (buffer[1]) {
P |= (0x777 << 1);
SetBit(info, Info::b);
}
if (buffer[2]) {
P |= (0x777 << 4);
SetBit(info, Info::c);
}
if (buffer[3]) {
SetBit(info, Info::d);
}
if (col == 0) {
P &= 0xEEEE;
}
if (col + 1 >= img.cols) {
P &= 0x3333;
}
else if (col + 2 >= img.cols) {
P &= 0x7777;
}
if (row == 0) {
P &= 0xFFF0;
}
if (row + 1 >= img.rows) {
P &= 0x00FF;
}
else if (row + 2 >= img.rows) {
P &= 0x0FFF;
}
// P is now ready to be used to find neighbour blocks
// P value avoids range errors
int father_offset = 0;
// P square
if (HasBit(P, 0) && img.data[img_index - img.step - 1]) {
father_offset = -(2 * (labels.step / labels.elem_size) + 2);
}
// Q square
if ((HasBit(P, 1) && img.data[img_index - img.step]) || (HasBit(P, 2) && img.data[img_index + 1 - img.step])) {
if (!father_offset) {
father_offset = -(2 * (labels.step / labels.elem_size));
}
else {
SetBit(info, Info::Q);
}
}
// R square
if (HasBit(P, 3) && img.data[img_index + 2 - img.step]) {
if (!father_offset) {
father_offset = -(2 * (labels.step / labels.elem_size) - 2);
}
else {
SetBit(info, Info::R);
}
}
// S square
if ((HasBit(P, 4) && img.data[img_index - 1]) || (HasBit(P, 8) && img.data[img_index + img.step - 1])) {
if (!father_offset) {
father_offset = -2;
}
else {
SetBit(info, Info::S);
}
}
labels.data[labels_index] = labels_index + father_offset;
if (col + 1 < labels.cols) {
last_pixel = reinterpret_cast<unsigned char*>(labels.data + labels_index + 1);
}
else if (row + 1 < labels.rows) {
last_pixel = reinterpret_cast<unsigned char*>(labels.data + labels_index + labels.step / labels.elem_size);
}
*last_pixel = info;
}
}
__global__ void Merge(cuda::PtrStepSzi labels, unsigned char* last_pixel) {
unsigned row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
unsigned col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
if (col + 1 < labels.cols) {
last_pixel = reinterpret_cast<unsigned char*>(labels.data + labels_index + 1);
}
else if (row + 1 < labels.rows) {
last_pixel = reinterpret_cast<unsigned char*>(labels.data + labels_index + labels.step / labels.elem_size);
}
unsigned char info = *last_pixel;
if (HasBit(info, Info::Q)) {
Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size));
}
if (HasBit(info, Info::R)) {
Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size) + 2);
}
if (HasBit(info, Info::S)) {
Union(labels.data, labels_index, labels_index - 2);
}
}
}
__global__ void Compression(cuda::PtrStepSzi labels) {
unsigned row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
unsigned col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
FindAndCompress(labels.data, labels_index);
}
}
__global__ void FinalLabeling(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
unsigned col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
int label;
unsigned char info;
unsigned long long buffer;
if (col + 1 < labels.cols) {
buffer = *reinterpret_cast<unsigned long long*>(labels.data + labels_index);
label = (buffer & (0xFFFFFFFF)) + 1;
info = (buffer >> 32) & 0xFFFFFFFF;
}
else {
label = labels[labels_index] + 1;
if (row + 1 < labels.rows) {
info = labels[labels_index + labels.step / labels.elem_size];
}
else {
// Read from the input image
// "a" is already in position 0
info = img[row * img.step + col];
}
}
if (col + 1 < labels.cols) {
*reinterpret_cast<unsigned long long*>(labels.data + labels_index) =
(static_cast<unsigned long long>(HasBit(info, Info::b) * label) << 32) | (HasBit(info, Info::a) * label);
if (row + 1 < labels.rows) {
*reinterpret_cast<unsigned long long*>(labels.data + labels_index + labels.step / labels.elem_size) =
(static_cast<unsigned long long>(HasBit(info, Info::d) * label) << 32) | (HasBit(info, Info::c) * label);
}
}
else {
labels[labels_index] = HasBit(info, Info::a) * label;
if (row + 1 < labels.rows) {
labels[labels_index + (labels.step / labels.elem_size)] = HasBit(info, Info::c) * label;
}
}
}
}
}
class BKE : public GpuLabeling2D<Connectivity2D::CONN_8> {
private:
dim3 grid_size_;
dim3 block_size_;
unsigned char* last_pixel_;
bool last_pixel_allocated_;
public:
BKE() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.size(), CV_32SC1);
last_pixel_allocated_ = false;
if ((d_img_.rows == 1 || d_img_.cols == 1) && !((d_img_.rows + d_img_.cols) % 2)) {
hipMalloc(&last_pixel_, sizeof(unsigned char));
last_pixel_allocated_ = true;
}
else {
last_pixel_ = d_img_labels_.data + ((d_img_labels_.rows - 2) * d_img_labels_.step) + (d_img_labels_.cols - 2) * d_img_labels_.elemSize();
}
grid_size_ = dim3((((d_img_.cols + 1) / 2) + BLOCK_COLS - 1) / BLOCK_COLS, (((d_img_.rows + 1) / 2) + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
InitLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_pixel_);
//Mat1i init_blocks;
//d_img_labels_.download(init_blocks);
//cuda::GpuMat d_init_labels = d_img_labels_.clone();
//FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_init_labels);
//Mat1i init_labels;
//d_init_labels.download(init_labels);
//d_init_labels.release();
Compression << <grid_size_, block_size_ >> > (d_img_labels_);
//Mat1i compr_blocks;
//d_img_labels_.download(compr_blocks);
//cuda::GpuMat d_compr_labels = d_img_labels_.clone();
//FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_compr_labels);
//Mat1i compr_labels;
//d_compr_labels.download(compr_labels);
//d_compr_labels.release();
//cuda::GpuMat d_expanded_connections;
//d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1);
//ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections);
//Mat1b expanded_connections;
//d_expanded_connections.download(expanded_connections);
//d_expanded_connections.release();
Merge << <grid_size_, block_size_ >> > (d_img_labels_, last_pixel_);
//Mat1i merge_blocks;
//d_img_labels_.download(merge_blocks);
//cuda::GpuMat d_merge_labels = d_img_labels_.clone();
//FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_merge_labels);
//Mat1i merge_labels;
//d_merge_labels.download(merge_labels);
//d_merge_labels.release();
Compression << <grid_size_, block_size_ >> > (d_img_labels_);
//Mat1i final_blocks;
//d_img_labels_.download(final_blocks);
FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
//d_img_labels_.download(img_labels_);
if (last_pixel_allocated_) {
hipFree(last_pixel_);
}
hipDeviceSynchronize();
}
void PerformLabelingBlocksize(int x, int y, int z) override {
d_img_labels_.create(d_img_.size(), CV_32SC1);
last_pixel_allocated_ = false;
if ((d_img_.rows == 1 || d_img_.cols == 1) && !((d_img_.rows + d_img_.cols) % 2)) {
hipMalloc(&last_pixel_, sizeof(unsigned char));
last_pixel_allocated_ = true;
}
else {
last_pixel_ = d_img_labels_.data + ((d_img_labels_.rows - 2) * d_img_labels_.step) + (d_img_labels_.cols - 2) * d_img_labels_.elemSize();
}
grid_size_ = dim3((((d_img_.cols + 1) / 2) + x - 1) / x, (((d_img_.rows + 1) / 2) + y - 1) / y, 1);
block_size_ = dim3(x, y, 1);
BLOCKSIZE_KERNEL(InitLabeling, grid_size_, block_size_, 0, d_img_, d_img_labels_, last_pixel_)
BLOCKSIZE_KERNEL(Compression, grid_size_, block_size_, 0, d_img_labels_)
BLOCKSIZE_KERNEL(Merge, grid_size_, block_size_, 0, d_img_labels_, last_pixel_)
BLOCKSIZE_KERNEL(Compression, grid_size_, block_size_, 0, d_img_labels_)
BLOCKSIZE_KERNEL(FinalLabeling, grid_size_, block_size_, 0, d_img_, d_img_labels_)
if (last_pixel_allocated_) {
hipFree(last_pixel_);
}
}
private:
double Alloc() {
perf_.start();
d_img_labels_.create(d_img_.size(), CV_32SC1);
if ((d_img_.rows == 1 || d_img_.cols == 1) && !((d_img_.rows + d_img_.cols) % 2)) {
hipMalloc(&last_pixel_, sizeof(unsigned char));
last_pixel_allocated_ = true;
}
else {
last_pixel_ = d_img_labels_.data + ((d_img_labels_.rows - 2) * d_img_labels_.step) + (d_img_labels_.cols - 2) * d_img_labels_.elemSize();
}
hipMemset2D(d_img_labels_.data, d_img_labels_.step, 0, d_img_labels_.cols * 4, d_img_labels_.rows);
if (last_pixel_allocated_) {
hipMemset(last_pixel_, 0, 1);
}
hipDeviceSynchronize();
double t = perf_.stop();
perf_.start();
hipMemset2D(d_img_labels_.data, d_img_labels_.step, 0, d_img_labels_.cols * 4, d_img_labels_.rows);
if (last_pixel_allocated_) {
hipMemset(last_pixel_, 0, 1);
}
hipDeviceSynchronize();
t -= perf_.stop();
return t;
}
void Dealloc() {
if (last_pixel_allocated_) {
hipFree(last_pixel_);
}
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void AllScans() {
grid_size_ = dim3((((d_img_.cols + 1) / 2) + BLOCK_COLS - 1) / BLOCK_COLS, (((d_img_.rows + 1) / 2) + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
InitLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_pixel_);
Compression << <grid_size_, block_size_ >> > (d_img_labels_);
//cuda::GpuMat d_expanded_connections;
//d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1);
//ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections);
//Mat1b expanded_connections;
//d_expanded_connections.download(expanded_connections);
//d_expanded_connections.release();
//Mat1i init_labels;
//d_block_labels_.download(init_labels);
Merge << <grid_size_, block_size_ >> > (d_img_labels_, last_pixel_);
//Mat1i block_info_final;
//d_img_labels_.download(block_info_final);
Compression << <grid_size_, block_size_ >> > (d_img_labels_);
FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
hipDeviceSynchronize();
}
public:
void PerformLabelingWithSteps()
{
double alloc_timing = Alloc();
perf_.start();
AllScans();
perf_.stop();
perf_.store(Step(StepType::ALL_SCANS), perf_.last());
perf_.start();
Dealloc();
perf_.stop();
double dealloc_timing = perf_.last();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(BKE)
REGISTER_KERNELS(BKE, InitLabeling, Compression, Merge, FinalLabeling) | 32b5f7a27a964920988a02a0a74c1c2859b8c359.cu | // Copyright (c) 2020, the YACCLAB contributors, as
// shown by the AUTHORS file. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include <opencv2/cudafeatures2d.hpp>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
#define BLOCK_ROWS 16
#define BLOCK_COLS 16
using namespace cv;
namespace {
//
// This is a block-based algorithm.
// Blocks are 2x2 sized, with internal pixels named as:
// +---+
// |a b|
// |c d|
// +---+
//
// Neighbour blocks of block X are named as:
// +-+-+-+
// |P|Q|R|
// +-+-+-+
// |S|X|
// +-+-+
//
enum class Info : unsigned char { a = 0, b = 1, c = 2, d = 3, P = 4, Q = 5, R = 6, S = 7 };
// Only use it with unsigned numeric types
template <typename T>
__device__ __forceinline__ unsigned char HasBit(T bitmap, Info pos) {
return (bitmap >> static_cast<unsigned char>(pos)) & 1;
}
template <typename T>
__device__ __forceinline__ unsigned char HasBit(T bitmap, unsigned char pos) {
return (bitmap >> pos) & 1;
}
// Only use it with unsigned numeric types
__device__ __forceinline__ void SetBit(unsigned char& bitmap, Info pos) {
bitmap |= (1 << static_cast<unsigned char>(pos));
}
// Returns the root index of the UFTree
__device__ unsigned Find(const int* s_buf, unsigned n) {
while (s_buf[n] != n) {
n = s_buf[n];
}
return n;
}
__device__ unsigned FindAndCompress(int* s_buf, unsigned n) {
unsigned id = n;
while (s_buf[n] != n) {
n = s_buf[n];
s_buf[id] = n;
}
return n;
}
// Merges the UFTrees of a and b, linking one root to the other
__device__ void Union(int* s_buf, unsigned a, unsigned b) {
bool done;
do {
a = Find(s_buf, a);
b = Find(s_buf, b);
if (a < b) {
int old = atomicMin(s_buf + b, a);
done = (old == b);
b = old;
}
else if (b < a) {
int old = atomicMin(s_buf + a, b);
done = (old == a);
a = old;
}
else {
done = true;
}
} while (!done);
}
__global__ void InitLabeling(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels, unsigned char* last_pixel) {
unsigned row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
unsigned col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
unsigned img_index = row * img.step + col;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
unsigned P = 0;
// Bitmask representing two kinds of information
// Bits 0, 1, 2, 3 are set if pixel a, b, c, d are foreground, respectively
// Bits 4, 5, 6, 7 are set if block P, Q, R, S need to be merged to X in Merge phase
unsigned char info = 0;
char buffer alignas(int)[4];
*(reinterpret_cast<int*>(buffer)) = 0;
// Read pairs of consecutive values in memory at once
if (col + 1 < img.cols) {
// This does not depend on endianness
*(reinterpret_cast<int16_t*>(buffer)) = *(reinterpret_cast<int16_t*>(img.data + img_index));
if (row + 1 < img.rows) {
*(reinterpret_cast<int16_t*>(buffer + 2)) = *(reinterpret_cast<int16_t*>(img.data + img_index + img.step));
}
}
else {
buffer[0] = img.data[img_index];
if (row + 1 < img.rows) {
buffer[2] = img.data[img_index + img.step];
}
}
if (buffer[0]) {
P |= 0x777;
SetBit(info, Info::a);
}
if (buffer[1]) {
P |= (0x777 << 1);
SetBit(info, Info::b);
}
if (buffer[2]) {
P |= (0x777 << 4);
SetBit(info, Info::c);
}
if (buffer[3]) {
SetBit(info, Info::d);
}
if (col == 0) {
P &= 0xEEEE;
}
if (col + 1 >= img.cols) {
P &= 0x3333;
}
else if (col + 2 >= img.cols) {
P &= 0x7777;
}
if (row == 0) {
P &= 0xFFF0;
}
if (row + 1 >= img.rows) {
P &= 0x00FF;
}
else if (row + 2 >= img.rows) {
P &= 0x0FFF;
}
// P is now ready to be used to find neighbour blocks
// P value avoids range errors
int father_offset = 0;
// P square
if (HasBit(P, 0) && img.data[img_index - img.step - 1]) {
father_offset = -(2 * (labels.step / labels.elem_size) + 2);
}
// Q square
if ((HasBit(P, 1) && img.data[img_index - img.step]) || (HasBit(P, 2) && img.data[img_index + 1 - img.step])) {
if (!father_offset) {
father_offset = -(2 * (labels.step / labels.elem_size));
}
else {
SetBit(info, Info::Q);
}
}
// R square
if (HasBit(P, 3) && img.data[img_index + 2 - img.step]) {
if (!father_offset) {
father_offset = -(2 * (labels.step / labels.elem_size) - 2);
}
else {
SetBit(info, Info::R);
}
}
// S square
if ((HasBit(P, 4) && img.data[img_index - 1]) || (HasBit(P, 8) && img.data[img_index + img.step - 1])) {
if (!father_offset) {
father_offset = -2;
}
else {
SetBit(info, Info::S);
}
}
labels.data[labels_index] = labels_index + father_offset;
if (col + 1 < labels.cols) {
last_pixel = reinterpret_cast<unsigned char*>(labels.data + labels_index + 1);
}
else if (row + 1 < labels.rows) {
last_pixel = reinterpret_cast<unsigned char*>(labels.data + labels_index + labels.step / labels.elem_size);
}
*last_pixel = info;
}
}
__global__ void Merge(cuda::PtrStepSzi labels, unsigned char* last_pixel) {
unsigned row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
unsigned col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
if (col + 1 < labels.cols) {
last_pixel = reinterpret_cast<unsigned char*>(labels.data + labels_index + 1);
}
else if (row + 1 < labels.rows) {
last_pixel = reinterpret_cast<unsigned char*>(labels.data + labels_index + labels.step / labels.elem_size);
}
unsigned char info = *last_pixel;
if (HasBit(info, Info::Q)) {
Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size));
}
if (HasBit(info, Info::R)) {
Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size) + 2);
}
if (HasBit(info, Info::S)) {
Union(labels.data, labels_index, labels_index - 2);
}
}
}
__global__ void Compression(cuda::PtrStepSzi labels) {
unsigned row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
unsigned col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
FindAndCompress(labels.data, labels_index);
}
}
__global__ void FinalLabeling(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
unsigned col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
int label;
unsigned char info;
unsigned long long buffer;
if (col + 1 < labels.cols) {
buffer = *reinterpret_cast<unsigned long long*>(labels.data + labels_index);
label = (buffer & (0xFFFFFFFF)) + 1;
info = (buffer >> 32) & 0xFFFFFFFF;
}
else {
label = labels[labels_index] + 1;
if (row + 1 < labels.rows) {
info = labels[labels_index + labels.step / labels.elem_size];
}
else {
// Read from the input image
// "a" is already in position 0
info = img[row * img.step + col];
}
}
if (col + 1 < labels.cols) {
*reinterpret_cast<unsigned long long*>(labels.data + labels_index) =
(static_cast<unsigned long long>(HasBit(info, Info::b) * label) << 32) | (HasBit(info, Info::a) * label);
if (row + 1 < labels.rows) {
*reinterpret_cast<unsigned long long*>(labels.data + labels_index + labels.step / labels.elem_size) =
(static_cast<unsigned long long>(HasBit(info, Info::d) * label) << 32) | (HasBit(info, Info::c) * label);
}
}
else {
labels[labels_index] = HasBit(info, Info::a) * label;
if (row + 1 < labels.rows) {
labels[labels_index + (labels.step / labels.elem_size)] = HasBit(info, Info::c) * label;
}
}
}
}
}
class BKE : public GpuLabeling2D<Connectivity2D::CONN_8> {
private:
dim3 grid_size_;
dim3 block_size_;
unsigned char* last_pixel_;
bool last_pixel_allocated_;
public:
BKE() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.size(), CV_32SC1);
last_pixel_allocated_ = false;
if ((d_img_.rows == 1 || d_img_.cols == 1) && !((d_img_.rows + d_img_.cols) % 2)) {
cudaMalloc(&last_pixel_, sizeof(unsigned char));
last_pixel_allocated_ = true;
}
else {
last_pixel_ = d_img_labels_.data + ((d_img_labels_.rows - 2) * d_img_labels_.step) + (d_img_labels_.cols - 2) * d_img_labels_.elemSize();
}
grid_size_ = dim3((((d_img_.cols + 1) / 2) + BLOCK_COLS - 1) / BLOCK_COLS, (((d_img_.rows + 1) / 2) + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
InitLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_pixel_);
//Mat1i init_blocks;
//d_img_labels_.download(init_blocks);
//cuda::GpuMat d_init_labels = d_img_labels_.clone();
//FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_init_labels);
//Mat1i init_labels;
//d_init_labels.download(init_labels);
//d_init_labels.release();
Compression << <grid_size_, block_size_ >> > (d_img_labels_);
//Mat1i compr_blocks;
//d_img_labels_.download(compr_blocks);
//cuda::GpuMat d_compr_labels = d_img_labels_.clone();
//FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_compr_labels);
//Mat1i compr_labels;
//d_compr_labels.download(compr_labels);
//d_compr_labels.release();
//cuda::GpuMat d_expanded_connections;
//d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1);
//ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections);
//Mat1b expanded_connections;
//d_expanded_connections.download(expanded_connections);
//d_expanded_connections.release();
Merge << <grid_size_, block_size_ >> > (d_img_labels_, last_pixel_);
//Mat1i merge_blocks;
//d_img_labels_.download(merge_blocks);
//cuda::GpuMat d_merge_labels = d_img_labels_.clone();
//FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_merge_labels);
//Mat1i merge_labels;
//d_merge_labels.download(merge_labels);
//d_merge_labels.release();
Compression << <grid_size_, block_size_ >> > (d_img_labels_);
//Mat1i final_blocks;
//d_img_labels_.download(final_blocks);
FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
//d_img_labels_.download(img_labels_);
if (last_pixel_allocated_) {
cudaFree(last_pixel_);
}
cudaDeviceSynchronize();
}
void PerformLabelingBlocksize(int x, int y, int z) override {
d_img_labels_.create(d_img_.size(), CV_32SC1);
last_pixel_allocated_ = false;
if ((d_img_.rows == 1 || d_img_.cols == 1) && !((d_img_.rows + d_img_.cols) % 2)) {
cudaMalloc(&last_pixel_, sizeof(unsigned char));
last_pixel_allocated_ = true;
}
else {
last_pixel_ = d_img_labels_.data + ((d_img_labels_.rows - 2) * d_img_labels_.step) + (d_img_labels_.cols - 2) * d_img_labels_.elemSize();
}
grid_size_ = dim3((((d_img_.cols + 1) / 2) + x - 1) / x, (((d_img_.rows + 1) / 2) + y - 1) / y, 1);
block_size_ = dim3(x, y, 1);
BLOCKSIZE_KERNEL(InitLabeling, grid_size_, block_size_, 0, d_img_, d_img_labels_, last_pixel_)
BLOCKSIZE_KERNEL(Compression, grid_size_, block_size_, 0, d_img_labels_)
BLOCKSIZE_KERNEL(Merge, grid_size_, block_size_, 0, d_img_labels_, last_pixel_)
BLOCKSIZE_KERNEL(Compression, grid_size_, block_size_, 0, d_img_labels_)
BLOCKSIZE_KERNEL(FinalLabeling, grid_size_, block_size_, 0, d_img_, d_img_labels_)
if (last_pixel_allocated_) {
cudaFree(last_pixel_);
}
}
private:
double Alloc() {
perf_.start();
d_img_labels_.create(d_img_.size(), CV_32SC1);
if ((d_img_.rows == 1 || d_img_.cols == 1) && !((d_img_.rows + d_img_.cols) % 2)) {
cudaMalloc(&last_pixel_, sizeof(unsigned char));
last_pixel_allocated_ = true;
}
else {
last_pixel_ = d_img_labels_.data + ((d_img_labels_.rows - 2) * d_img_labels_.step) + (d_img_labels_.cols - 2) * d_img_labels_.elemSize();
}
cudaMemset2D(d_img_labels_.data, d_img_labels_.step, 0, d_img_labels_.cols * 4, d_img_labels_.rows);
if (last_pixel_allocated_) {
cudaMemset(last_pixel_, 0, 1);
}
cudaDeviceSynchronize();
double t = perf_.stop();
perf_.start();
cudaMemset2D(d_img_labels_.data, d_img_labels_.step, 0, d_img_labels_.cols * 4, d_img_labels_.rows);
if (last_pixel_allocated_) {
cudaMemset(last_pixel_, 0, 1);
}
cudaDeviceSynchronize();
t -= perf_.stop();
return t;
}
void Dealloc() {
if (last_pixel_allocated_) {
cudaFree(last_pixel_);
}
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void AllScans() {
grid_size_ = dim3((((d_img_.cols + 1) / 2) + BLOCK_COLS - 1) / BLOCK_COLS, (((d_img_.rows + 1) / 2) + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
InitLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_pixel_);
Compression << <grid_size_, block_size_ >> > (d_img_labels_);
//cuda::GpuMat d_expanded_connections;
//d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1);
//ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections);
//Mat1b expanded_connections;
//d_expanded_connections.download(expanded_connections);
//d_expanded_connections.release();
//Mat1i init_labels;
//d_block_labels_.download(init_labels);
Merge << <grid_size_, block_size_ >> > (d_img_labels_, last_pixel_);
//Mat1i block_info_final;
//d_img_labels_.download(block_info_final);
Compression << <grid_size_, block_size_ >> > (d_img_labels_);
FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
cudaDeviceSynchronize();
}
public:
void PerformLabelingWithSteps()
{
double alloc_timing = Alloc();
perf_.start();
AllScans();
perf_.stop();
perf_.store(Step(StepType::ALL_SCANS), perf_.last());
perf_.start();
Dealloc();
perf_.stop();
double dealloc_timing = perf_.last();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(BKE)
REGISTER_KERNELS(BKE, InitLabeling, Compression, Merge, FinalLabeling) |
8ce1677cec9087fc7d6d079fc0bb34eaa4b588ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void gradientRowsKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD )
{
__shared__ float s_Data[ROWS_GRAD_BLOCKDIM_Z][ROWS_GRAD_BLOCKDIM_Y][(ROWS_GRAD_RESULT_STEPS + 2 * ROWS_GRAD_HALO_STEPS) * ROWS_GRAD_BLOCKDIM_X];
//Offset to the left halo edge
const int baseX = (blockIdx.x * ROWS_GRAD_RESULT_STEPS - ROWS_GRAD_HALO_STEPS) * ROWS_GRAD_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * ROWS_GRAD_BLOCKDIM_Y + threadIdx.y;
const int baseZ = blockIdx.z * ROWS_GRAD_BLOCKDIM_Z + threadIdx.z;
d_Src += (baseZ * imageH + baseY) * imageW + baseX;
d_Dst += (baseZ * imageH + baseY) * imageW + baseX;
//Load main data
#pragma unroll
for (int i = ROWS_GRAD_HALO_STEPS; i < ROWS_GRAD_HALO_STEPS + ROWS_GRAD_RESULT_STEPS; i++) {
s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_GRAD_BLOCKDIM_X] = d_Src[i * ROWS_GRAD_BLOCKDIM_X];
}
//Load left halo
#pragma unroll
for (int i = 0; i < ROWS_GRAD_HALO_STEPS; i++) {
s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_GRAD_BLOCKDIM_X] = (baseX + i * ROWS_GRAD_BLOCKDIM_X >= 0) ? d_Src[i * ROWS_GRAD_BLOCKDIM_X] : 0;
}
//Load right halo
#pragma unroll
for (int i = ROWS_GRAD_HALO_STEPS + ROWS_GRAD_RESULT_STEPS; i < ROWS_GRAD_HALO_STEPS + ROWS_GRAD_RESULT_STEPS + ROWS_GRAD_HALO_STEPS; i++) {
s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_GRAD_BLOCKDIM_X] = (baseX + i * ROWS_GRAD_BLOCKDIM_X < imageW) ? d_Src[i * ROWS_GRAD_BLOCKDIM_X] : 0;
}
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = ROWS_GRAD_HALO_STEPS; i < ROWS_GRAD_HALO_STEPS + ROWS_GRAD_RESULT_STEPS; i++)
{
float sum = 0;
sum += s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_GRAD_BLOCKDIM_X + 1];
sum -= s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_GRAD_BLOCKDIM_X - 1];
sum *= 0.5f;
d_Dst[i * ROWS_GRAD_BLOCKDIM_X] = sum;
}
} | 8ce1677cec9087fc7d6d079fc0bb34eaa4b588ca.cu | #include "includes.h"
__global__ void gradientRowsKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD )
{
__shared__ float s_Data[ROWS_GRAD_BLOCKDIM_Z][ROWS_GRAD_BLOCKDIM_Y][(ROWS_GRAD_RESULT_STEPS + 2 * ROWS_GRAD_HALO_STEPS) * ROWS_GRAD_BLOCKDIM_X];
//Offset to the left halo edge
const int baseX = (blockIdx.x * ROWS_GRAD_RESULT_STEPS - ROWS_GRAD_HALO_STEPS) * ROWS_GRAD_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * ROWS_GRAD_BLOCKDIM_Y + threadIdx.y;
const int baseZ = blockIdx.z * ROWS_GRAD_BLOCKDIM_Z + threadIdx.z;
d_Src += (baseZ * imageH + baseY) * imageW + baseX;
d_Dst += (baseZ * imageH + baseY) * imageW + baseX;
//Load main data
#pragma unroll
for (int i = ROWS_GRAD_HALO_STEPS; i < ROWS_GRAD_HALO_STEPS + ROWS_GRAD_RESULT_STEPS; i++) {
s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_GRAD_BLOCKDIM_X] = d_Src[i * ROWS_GRAD_BLOCKDIM_X];
}
//Load left halo
#pragma unroll
for (int i = 0; i < ROWS_GRAD_HALO_STEPS; i++) {
s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_GRAD_BLOCKDIM_X] = (baseX + i * ROWS_GRAD_BLOCKDIM_X >= 0) ? d_Src[i * ROWS_GRAD_BLOCKDIM_X] : 0;
}
//Load right halo
#pragma unroll
for (int i = ROWS_GRAD_HALO_STEPS + ROWS_GRAD_RESULT_STEPS; i < ROWS_GRAD_HALO_STEPS + ROWS_GRAD_RESULT_STEPS + ROWS_GRAD_HALO_STEPS; i++) {
s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_GRAD_BLOCKDIM_X] = (baseX + i * ROWS_GRAD_BLOCKDIM_X < imageW) ? d_Src[i * ROWS_GRAD_BLOCKDIM_X] : 0;
}
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = ROWS_GRAD_HALO_STEPS; i < ROWS_GRAD_HALO_STEPS + ROWS_GRAD_RESULT_STEPS; i++)
{
float sum = 0;
sum += s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_GRAD_BLOCKDIM_X + 1];
sum -= s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_GRAD_BLOCKDIM_X - 1];
sum *= 0.5f;
d_Dst[i * ROWS_GRAD_BLOCKDIM_X] = sum;
}
} |
4cb48e230cb28cf5ac5aad867ed265fa6f613aa8.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdlib.h>
#include <time.h>
__global__ void zhuanshi(int *d_b,int *d_bt,int n)
{
int ITdx = threadIdx.x;
int IBdx = blockIdx.x;
d_bt[ITdx * n + IBdx] = d_b[IBdx * n + ITdx];
}
__global__ void neiji(int *d_a,int *d_bt,int *d_c,int *d_data,int ICTdx,int ICBdx,int n)
{
/*
int INTdx = threadIdx.x;
int i = 2, j = 1;
d_data[INTdx] = d_a[ICTdx * n + INTdx] * d_bt[ICBdx * n + INTdx];
__syncthreads();
while(i <= n)
{
if(INTdx % 2 == 0)
{
d_data[INTdx] += d_data[INTdx + j];
}
i *= 2;
j *= 2;
}
d_c[ICTdx * n + ICBdx] = d_data[0];
*/
}
__global__ void chengfa(int *d_a,int *d_bt,int *d_c,int *d_data,int n)
{
/*
int ICTdx = threadIdx.x;
int ICBdx = blockIdx.x;
neiji<<<1,n>>>(d_a,d_bt,d_c,d_data,ICTdx,ICBdx,n);
__syncthreads();
*/
}
int main()
{
int blag = 1;//
int n = 0;
/******************/
do{
std::cout << ":" << std::endl;
std::cin >> n;
if(n <= 0)
{
std::cout << ",!" << std::endl;
}else{
blag = 0;
}
}while(blag);
/****************/
int *h_a = (int*)malloc(sizeof(int) * n * n);
int *h_b = (int*)malloc(sizeof(int) * n * n);
int *h_c = (int*)malloc(sizeof(int) * n * n);
int *h_bt = (int*)malloc(sizeof(int) * n * n);
/***************/
srand(time(NULL));//
for(int i = 0; i < n * n; ++i)
{
h_a[i] = rand() % 11;
h_b[i] = rand() % 11;
h_c[i] = 0;
h_bt[i] = 0;
}
/**************/
int *d_a,*d_b,*d_c,*d_bt,*d_data;
hipMalloc((void**)&d_a,sizeof(int) * n * n);
hipMalloc((void**)&d_b,sizeof(int) * n * n);
hipMalloc((void**)&d_c,sizeof(int) * n * n);
hipMalloc((void**)&d_bt,sizeof(int) * n * n);
hipMalloc((void**)&d_data,sizeof(int)*n);
/******************/
hipMemcpy(d_a,h_a,sizeof(int) * n * n,hipMemcpyHostToDevice);
hipMemcpy(d_b,h_b,sizeof(int) * n * n,hipMemcpyHostToDevice);
std::cout << "" << std::endl;
/*************/
hipLaunchKernelGGL(( zhuanshi), dim3(n),dim3(n), 0, 0, d_b,d_bt,n);
hipLaunchKernelGGL(( chengfa), dim3(n),dim3(n), 0, 0, d_a,d_bt,d_c,d_data,n);
/**********/
hipMemcpy(h_bt,d_bt,sizeof(int) * n * n,hipMemcpyDeviceToHost);
hipMemcpy(h_c,d_c,sizeof(int) * n * n,hipMemcpyDeviceToHost);
std::cout << "CPUh_a:" << std::endl;
for(int i = 0; i < n; ++i)
{
for(int j = 0; j < n; ++j)
{
printf("h_a[%d][%d] = %d\t",i,j,h_a[n * i + j]);
}
printf("\n");
}
std::cout << "CPUh_b:" << std::endl;
for(int i = 0; i < n; ++i)
{
for(int j = 0; j < n; ++j)
{
printf("h_b[%d][%d] = %d\t",i,j,h_b[n * i + j]);
}
printf("\n");
}
std::cout << "CPUh_bt:" << std::endl;
for(int i = 0; i < n; ++i)
{
for(int j = 0; j < n; ++j)
{
printf("h_bt[%d][%d] = %d\t",i,j,h_bt[n * i + j]);
}
printf("\n");
}
std::cout << "GPU:" << std::endl;
for(int i = 0; i < n; ++i)
{
for(int j = 0; j < n; ++j)
{
printf("h_c[%d][%d] = %d\t",i,j,h_c[n * i + j]);
}
printf("\n");
}
/****************/
free(h_a);
free(h_b);
free(h_c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
std::cout << "" << std::endl;
return 0;
}
| 4cb48e230cb28cf5ac5aad867ed265fa6f613aa8.cu | #include <cuda_runtime.h>
#include <iostream>
#include <stdlib.h>
#include <time.h>
__global__ void zhuanshi(int *d_b,int *d_bt,int n)
{
int ITdx = threadIdx.x;
int IBdx = blockIdx.x;
d_bt[ITdx * n + IBdx] = d_b[IBdx * n + ITdx];
}
__global__ void neiji(int *d_a,int *d_bt,int *d_c,int *d_data,int ICTdx,int ICBdx,int n)
{
/*
int INTdx = threadIdx.x;
int i = 2, j = 1;
d_data[INTdx] = d_a[ICTdx * n + INTdx] * d_bt[ICBdx * n + INTdx];
__syncthreads();
while(i <= n)
{
if(INTdx % 2 == 0)
{
d_data[INTdx] += d_data[INTdx + j];
}
i *= 2;
j *= 2;
}
d_c[ICTdx * n + ICBdx] = d_data[0];
*/
}
__global__ void chengfa(int *d_a,int *d_bt,int *d_c,int *d_data,int n)
{
/*
int ICTdx = threadIdx.x;
int ICBdx = blockIdx.x;
neiji<<<1,n>>>(d_a,d_bt,d_c,d_data,ICTdx,ICBdx,n);
__syncthreads();
*/
}
int main()
{
int blag = 1;//标志位
int n = 0;
/******判断输入数据是否合法************/
do{
std::cout << "请输入矩阵的维度:" << std::endl;
std::cin >> n;
if(n <= 0)
{
std::cout << "你输入的矩阵维度有误,请重新输入!" << std::endl;
}else{
blag = 0;
}
}while(blag);
/*******申请主机内存*********/
int *h_a = (int*)malloc(sizeof(int) * n * n);
int *h_b = (int*)malloc(sizeof(int) * n * n);
int *h_c = (int*)malloc(sizeof(int) * n * n);
int *h_bt = (int*)malloc(sizeof(int) * n * n);
/*******初始化主机内存数据********/
srand(time(NULL));//设置随机数值
for(int i = 0; i < n * n; ++i)
{
h_a[i] = rand() % 11;
h_b[i] = rand() % 11;
h_c[i] = 0;
h_bt[i] = 0;
}
/*******申请设备内存*******/
int *d_a,*d_b,*d_c,*d_bt,*d_data;
cudaMalloc((void**)&d_a,sizeof(int) * n * n);
cudaMalloc((void**)&d_b,sizeof(int) * n * n);
cudaMalloc((void**)&d_c,sizeof(int) * n * n);
cudaMalloc((void**)&d_bt,sizeof(int) * n * n);
cudaMalloc((void**)&d_data,sizeof(int)*n);
/******主机内存数据复制到设备内存中************/
cudaMemcpy(d_a,h_a,sizeof(int) * n * n,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,h_b,sizeof(int) * n * n,cudaMemcpyHostToDevice);
std::cout << "测试点" << std::endl;
/*******执行核函数******/
zhuanshi<<<n,n>>>(d_b,d_bt,n);
chengfa<<<n,n>>>(d_a,d_bt,d_c,d_data,n);
/*****设备内存数据复制到主机内存中*****/
cudaMemcpy(h_bt,d_bt,sizeof(int) * n * n,cudaMemcpyDeviceToHost);
cudaMemcpy(h_c,d_c,sizeof(int) * n * n,cudaMemcpyDeviceToHost);
std::cout << "CPU内存数据h_a:" << std::endl;
for(int i = 0; i < n; ++i)
{
for(int j = 0; j < n; ++j)
{
printf("h_a[%d][%d] = %d\t",i,j,h_a[n * i + j]);
}
printf("\n");
}
std::cout << "CPU内存数据h_b:" << std::endl;
for(int i = 0; i < n; ++i)
{
for(int j = 0; j < n; ++j)
{
printf("h_b[%d][%d] = %d\t",i,j,h_b[n * i + j]);
}
printf("\n");
}
std::cout << "CPU内存数据h_bt:" << std::endl;
for(int i = 0; i < n; ++i)
{
for(int j = 0; j < n; ++j)
{
printf("h_bt[%d][%d] = %d\t",i,j,h_bt[n * i + j]);
}
printf("\n");
}
std::cout << "GPU内存数据:" << std::endl;
for(int i = 0; i < n; ++i)
{
for(int j = 0; j < n; ++j)
{
printf("h_c[%d][%d] = %d\t",i,j,h_c[n * i + j]);
}
printf("\n");
}
/*******释放内存*********/
free(h_a);
free(h_b);
free(h_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
std::cout << "运行结束" << std::endl;
return 0;
}
|
30f6e01ce9000eb1305b3ef144d5c3532ea32c72.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "gpu_histo_kernel_naive.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
u_char *Source = NULL;
hipMalloc(&Source, XSIZE*YSIZE);
int *res = NULL;
hipMalloc(&res, XSIZE*YSIZE);
unsigned height = YSIZE;
unsigned width = XSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
gpu_histo_kernel_naive), dim3(gridBlock),dim3(threadBlock), 0, 0, Source,res,height,width);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
gpu_histo_kernel_naive), dim3(gridBlock),dim3(threadBlock), 0, 0, Source,res,height,width);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
gpu_histo_kernel_naive), dim3(gridBlock),dim3(threadBlock), 0, 0, Source,res,height,width);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 30f6e01ce9000eb1305b3ef144d5c3532ea32c72.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "gpu_histo_kernel_naive.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
u_char *Source = NULL;
cudaMalloc(&Source, XSIZE*YSIZE);
int *res = NULL;
cudaMalloc(&res, XSIZE*YSIZE);
unsigned height = YSIZE;
unsigned width = XSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
gpu_histo_kernel_naive<<<gridBlock,threadBlock>>>(Source,res,height,width);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
gpu_histo_kernel_naive<<<gridBlock,threadBlock>>>(Source,res,height,width);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
gpu_histo_kernel_naive<<<gridBlock,threadBlock>>>(Source,res,height,width);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
e8eedef97e93ef7b60ac3a0c243d7b51dc798ab0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* 2013
* Maciej Szeptuch
* II UWr
* ----------
* bez shared, pozbywanie sie jak najwiecej pamieci + loop unrolling
* czasy okolo 10x szybciej niz na CPU.
word | gpu | cpu | distance
------------------|--------------------------------|--------------------------------|----------
kot | kot [ 13.373088] | kot [ 120.616997] | 0
czesc | czescy [ 17.563328] | czescy [ 182.584000] | 1
onomatopeja | onomatopeja [ 31.341473] | onomatopeja [ 367.598022] | 0
*/
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <string.h>
#include <errno.h>
#define WORD_MAXLEN 16
#define STEP_0_THREADS 128
#define STEP_R_THREADS 128
#define __CUDA__
#define __CPU__
__device__ __host__ inline unsigned char MIN(const unsigned char a, const unsigned char b) { return a<b?a:b; }
__host__ unsigned char LevenshteinDistanceH(const unsigned char *const A, const unsigned char *const B);
__device__ unsigned char LevenshteinDistanceD(const unsigned char *const A, const unsigned char *const B);
unsigned char *loadDictionary(const char *const file, unsigned int &words, unsigned int &size);
void printHead(void);
#ifdef __CUDA__
__global__ void LevenshteinCUDA_STEP_0(const unsigned char *const dictionary, const unsigned int words, const unsigned char *const pattern, unsigned int *result);
__global__ void LevenshteinCUDA_STEP_R(const unsigned int *from, unsigned int *to, const unsigned int words);
#endif // __CUDA__
#ifdef __CPU__
unsigned int LevenshteinCPU(const unsigned char *const dictionary, const unsigned int words, const unsigned char *const pattern);
#endif // __CPU__
int main(const int argc, const char *const* argv)
{
if(argc < 3)
{
fprintf(stderr, "usage: %s dictionary words...\nError: not enough arguments\n", argv[0]);
return 1;
}
unsigned int dictionarySize = 0,
dictionaryWords = 0;
unsigned char *dictionary = loadDictionary(argv[1], dictionaryWords, dictionarySize);
if(!dictionary)
{
fprintf(stderr, "usage: %s dictionary words...\nError: loading dictionary: %s\n", argv[0], strerror(errno));
return 2;
}
#ifdef __CUDA__
// GPU INIT
unsigned char *cudaDictionary = NULL,
*cudaPattern = NULL;
unsigned int *cudaResult = NULL;
hipMalloc(&cudaDictionary, dictionarySize * sizeof(unsigned char));
hipMemcpy(cudaDictionary, dictionary, dictionarySize * sizeof(unsigned char), hipMemcpyHostToDevice);
hipMalloc(&cudaPattern, WORD_MAXLEN * sizeof(unsigned char));
hipMalloc(&cudaResult, dictionarySize * 2 * sizeof(unsigned int));
#endif // __CUDA__
printHead();
for(unsigned int a = 2; a < argc; ++ a)
{
unsigned int result[2] = {1 << 30, 1 << 30};
unsigned char pattern[WORD_MAXLEN + 2] = {};
memcpy(pattern, argv[a], strlen(argv[a]) * sizeof(unsigned char));
printf(" %-16s | ", pattern);
#ifdef __CUDA__
{
// GPU TEST
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start, NULL);
hipMemcpy(cudaPattern, pattern, WORD_MAXLEN * sizeof(unsigned char), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( LevenshteinCUDA_STEP_0), dim3((dictionaryWords + STEP_0_THREADS - 1) / STEP_0_THREADS), dim3(STEP_0_THREADS), 0, 0, cudaDictionary, dictionaryWords, cudaPattern, cudaResult);
for(unsigned int size = STEP_R_THREADS; size < dictionaryWords; size <<= 1)
hipLaunchKernelGGL(( LevenshteinCUDA_STEP_R), dim3((dictionaryWords + size - 1) / size), dim3(STEP_R_THREADS), 0, 0, cudaResult, cudaResult, dictionaryWords);
hipMemcpy(result, cudaResult, 2 * sizeof(unsigned int), hipMemcpyDeviceToHost);
hipEventRecord(end, NULL);
hipEventSynchronize(end);
float gputotal = 0;
hipEventElapsedTime(&gputotal, start, end);
printf("%-16s [%11.6f] | ", &dictionary[result[0] * WORD_MAXLEN], gputotal, result[1]);
}
#endif // __CUDA__
#ifdef __CPU__
{
// CPU TEST
timeval start, end;
gettimeofday(&start, NULL);
result[0] = LevenshteinCPU(dictionary, dictionaryWords, pattern);
gettimeofday(&end, NULL);
float cputotal = (end.tv_sec - start.tv_sec) * 1000.0f + (end.tv_usec - start.tv_usec) / 1000.0f;
printf("%-16s [%11.6f] | ", dictionary + result[0] * WORD_MAXLEN, cputotal);
}
#endif // __CPU__
printf("%u\n", LevenshteinDistanceH(pattern, dictionary + result[0] * WORD_MAXLEN));
}
#ifdef __CUDA__
hipFree(cudaDictionary);
#endif // __CUDA__
free(dictionary);
return 0;
}
unsigned char *loadDictionary(const char *const file, unsigned int &words, unsigned int &size)
{
FILE *handle = fopen(file, "rb");
if(!handle)
return NULL;
unsigned char *dictionary = NULL,
*current = NULL;
char buffer[64] = {};
words = 0;
while(fgets(buffer, 64, handle))
++ words;
fseek(handle, 0, SEEK_SET);
size = words * WORD_MAXLEN;
current = dictionary = new unsigned char[size];
memset(dictionary, 0, size * sizeof(unsigned char));
while(fgets((char *) current, WORD_MAXLEN + 8, handle))
{
current[strlen((const char *) current) - 1] = 0; // remove \n
current[strlen((const char *) current) - 1] = 0; // remove \r
current += WORD_MAXLEN;
}
fclose(handle);
return dictionary;
}
#ifdef __CPU__
unsigned int LevenshteinCPU(const unsigned char *const dictionary, const unsigned int words, const unsigned char *const pattern)
{
const unsigned char *word = dictionary;
unsigned int best = 1 << 30,
r = 0;
for(unsigned int w = 0; w < words; ++ w, word += WORD_MAXLEN)
{
unsigned int dist = LevenshteinDistanceH(pattern, word);
if(dist < best)
{
best = dist;
r = w;
}
}
return r;
}
#endif // __CPU__
__host__ unsigned char LevenshteinDistanceH(const unsigned char *const A, const unsigned char *const B)
{
unsigned char sb = strlen((const char *) B);
unsigned char *AA = (unsigned char *) A,
*BB = (unsigned char *) B;
unsigned char temp[2][WORD_MAXLEN + 1];
unsigned char t = 1;
for(unsigned char a = 0; a <= sb; ++ a)
temp[0][a] = a;
AA = (unsigned char *) A;
for(unsigned char a = 1; *AA > 0; ++ a, t ^= 1, ++ AA)
{
temp[t][0] = a;
BB = (unsigned char *) B;
for(unsigned char b = 1; b <= sb; ++ b, ++ BB)
temp[t][b] = MIN(temp[t ^ 1][ b ] + 1,
MIN(temp[ t ][b - 1] + 1,
temp[t ^ 1][b - 1] + (*AA != *BB)));
}
return temp[t ^ 1][sb];
}
__device__ unsigned char LevenshteinDistanceD(const unsigned char *const A, const unsigned char *const B)
{
unsigned char sb = 0;
unsigned char *AA = (unsigned char *) A,
*BB = (unsigned char *) B;
while(*BB ++ > 0)
++ sb;
unsigned char temp[2][WORD_MAXLEN + 1];
unsigned char t = 1;
#pragma unroll
for(unsigned char a = 0; a <= WORD_MAXLEN; ++ a)
temp[0][a] = a;
for(unsigned char a = 1; *AA > 0; ++ a, t ^= 1, ++ AA)
{
temp[t][0] = a;
BB = (unsigned char *) B;
#pragma unroll
for(unsigned char b = 1; b <= WORD_MAXLEN; ++ b, ++ BB)
temp[t][b] = MIN(temp[t ^ 1][ b ] + 1,
MIN(temp[ t ][b - 1] + 1,
temp[t ^ 1][b - 1] + (*AA != *BB)));
}
return temp[t ^ 1][sb];
}
void printHead(void)
{
printf(" word | ");
#ifdef __CUDA__
printf(" gpu | ");
#endif // __CUDA__
#ifdef __CPU__
printf(" cpu | ");
#endif // __CPU__
printf("distance\n");
printf("------------------|-");
#ifdef __CUDA__
printf("-------------------------------|-");
#endif // __CUDA__
#ifdef __CPU__
printf("-------------------------------|-");
#endif // __CPU__
printf("---------\n");
}
#ifdef __CUDA__
__global__ void LevenshteinCUDA_STEP_0(const unsigned char *dictionary, const unsigned int words, const unsigned char *pattern, unsigned int *result)
{
int word = blockIdx.x * STEP_0_THREADS + threadIdx.x;
if(word >= words)
return;
result[word * 2] = word;
result[word * 2 + 1] = LevenshteinDistanceD(pattern, dictionary + word * WORD_MAXLEN);
}
__global__ void LevenshteinCUDA_STEP_R(const unsigned int *from, unsigned int *to, const unsigned int words)
{
__shared__ unsigned int local_data[STEP_R_THREADS * 2];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + tid;
local_data[tid * 2] = from[i * 2];
local_data[tid * 2 + 1] = from[i * 2 + 1];
__syncthreads();
for(unsigned int s = 1; s < blockDim.x && tid + s < words; s <<= 1)
{
if(tid % (2 * s) == 0 && local_data[tid * 2 + 1] > local_data[(tid + s) * 2 + 1])
{
local_data[tid * 2] = local_data[(tid + s) * 2];
local_data[tid * 2 + 1] = local_data[(tid + s) * 2 + 1];
}
__syncthreads();
}
if(tid == 0)
{
to[blockIdx.x * 2] = local_data[0];
to[blockIdx.x * 2 + 1] = local_data[1];
}
}
#endif // __CUDA__
| e8eedef97e93ef7b60ac3a0c243d7b51dc798ab0.cu | /* 2013
* Maciej Szeptuch
* II UWr
* ----------
* bez shared, pozbywanie sie jak najwiecej pamieci + loop unrolling
* czasy okolo 10x szybciej niz na CPU.
word | gpu | cpu | distance
------------------|--------------------------------|--------------------------------|----------
kot | kot [ 13.373088] | kot [ 120.616997] | 0
czesc | czescy [ 17.563328] | czescy [ 182.584000] | 1
onomatopeja | onomatopeja [ 31.341473] | onomatopeja [ 367.598022] | 0
*/
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <string.h>
#include <errno.h>
#define WORD_MAXLEN 16
#define STEP_0_THREADS 128
#define STEP_R_THREADS 128
#define __CUDA__
#define __CPU__
__device__ __host__ inline unsigned char MIN(const unsigned char a, const unsigned char b) { return a<b?a:b; }
__host__ unsigned char LevenshteinDistanceH(const unsigned char *const A, const unsigned char *const B);
__device__ unsigned char LevenshteinDistanceD(const unsigned char *const A, const unsigned char *const B);
unsigned char *loadDictionary(const char *const file, unsigned int &words, unsigned int &size);
void printHead(void);
#ifdef __CUDA__
__global__ void LevenshteinCUDA_STEP_0(const unsigned char *const dictionary, const unsigned int words, const unsigned char *const pattern, unsigned int *result);
__global__ void LevenshteinCUDA_STEP_R(const unsigned int *from, unsigned int *to, const unsigned int words);
#endif // __CUDA__
#ifdef __CPU__
unsigned int LevenshteinCPU(const unsigned char *const dictionary, const unsigned int words, const unsigned char *const pattern);
#endif // __CPU__
int main(const int argc, const char *const* argv)
{
if(argc < 3)
{
fprintf(stderr, "usage: %s dictionary words...\nError: not enough arguments\n", argv[0]);
return 1;
}
unsigned int dictionarySize = 0,
dictionaryWords = 0;
unsigned char *dictionary = loadDictionary(argv[1], dictionaryWords, dictionarySize);
if(!dictionary)
{
fprintf(stderr, "usage: %s dictionary words...\nError: loading dictionary: %s\n", argv[0], strerror(errno));
return 2;
}
#ifdef __CUDA__
// GPU INIT
unsigned char *cudaDictionary = NULL,
*cudaPattern = NULL;
unsigned int *cudaResult = NULL;
cudaMalloc(&cudaDictionary, dictionarySize * sizeof(unsigned char));
cudaMemcpy(cudaDictionary, dictionary, dictionarySize * sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMalloc(&cudaPattern, WORD_MAXLEN * sizeof(unsigned char));
cudaMalloc(&cudaResult, dictionarySize * 2 * sizeof(unsigned int));
#endif // __CUDA__
printHead();
for(unsigned int a = 2; a < argc; ++ a)
{
unsigned int result[2] = {1 << 30, 1 << 30};
unsigned char pattern[WORD_MAXLEN + 2] = {};
memcpy(pattern, argv[a], strlen(argv[a]) * sizeof(unsigned char));
printf(" %-16s | ", pattern);
#ifdef __CUDA__
{
// GPU TEST
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, NULL);
cudaMemcpy(cudaPattern, pattern, WORD_MAXLEN * sizeof(unsigned char), cudaMemcpyHostToDevice);
LevenshteinCUDA_STEP_0<<<(dictionaryWords + STEP_0_THREADS - 1) / STEP_0_THREADS, STEP_0_THREADS>>> (cudaDictionary, dictionaryWords, cudaPattern, cudaResult);
for(unsigned int size = STEP_R_THREADS; size < dictionaryWords; size <<= 1)
LevenshteinCUDA_STEP_R<<<(dictionaryWords + size - 1) / size, STEP_R_THREADS>>> (cudaResult, cudaResult, dictionaryWords);
cudaMemcpy(result, cudaResult, 2 * sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaEventRecord(end, NULL);
cudaEventSynchronize(end);
float gputotal = 0;
cudaEventElapsedTime(&gputotal, start, end);
printf("%-16s [%11.6f] | ", &dictionary[result[0] * WORD_MAXLEN], gputotal, result[1]);
}
#endif // __CUDA__
#ifdef __CPU__
{
// CPU TEST
timeval start, end;
gettimeofday(&start, NULL);
result[0] = LevenshteinCPU(dictionary, dictionaryWords, pattern);
gettimeofday(&end, NULL);
float cputotal = (end.tv_sec - start.tv_sec) * 1000.0f + (end.tv_usec - start.tv_usec) / 1000.0f;
printf("%-16s [%11.6f] | ", dictionary + result[0] * WORD_MAXLEN, cputotal);
}
#endif // __CPU__
printf("%u\n", LevenshteinDistanceH(pattern, dictionary + result[0] * WORD_MAXLEN));
}
#ifdef __CUDA__
cudaFree(cudaDictionary);
#endif // __CUDA__
free(dictionary);
return 0;
}
unsigned char *loadDictionary(const char *const file, unsigned int &words, unsigned int &size)
{
FILE *handle = fopen(file, "rb");
if(!handle)
return NULL;
unsigned char *dictionary = NULL,
*current = NULL;
char buffer[64] = {};
words = 0;
while(fgets(buffer, 64, handle))
++ words;
fseek(handle, 0, SEEK_SET);
size = words * WORD_MAXLEN;
current = dictionary = new unsigned char[size];
memset(dictionary, 0, size * sizeof(unsigned char));
while(fgets((char *) current, WORD_MAXLEN + 8, handle))
{
current[strlen((const char *) current) - 1] = 0; // remove \n
current[strlen((const char *) current) - 1] = 0; // remove \r
current += WORD_MAXLEN;
}
fclose(handle);
return dictionary;
}
#ifdef __CPU__
unsigned int LevenshteinCPU(const unsigned char *const dictionary, const unsigned int words, const unsigned char *const pattern)
{
const unsigned char *word = dictionary;
unsigned int best = 1 << 30,
r = 0;
for(unsigned int w = 0; w < words; ++ w, word += WORD_MAXLEN)
{
unsigned int dist = LevenshteinDistanceH(pattern, word);
if(dist < best)
{
best = dist;
r = w;
}
}
return r;
}
#endif // __CPU__
__host__ unsigned char LevenshteinDistanceH(const unsigned char *const A, const unsigned char *const B)
{
unsigned char sb = strlen((const char *) B);
unsigned char *AA = (unsigned char *) A,
*BB = (unsigned char *) B;
unsigned char temp[2][WORD_MAXLEN + 1];
unsigned char t = 1;
for(unsigned char a = 0; a <= sb; ++ a)
temp[0][a] = a;
AA = (unsigned char *) A;
for(unsigned char a = 1; *AA > 0; ++ a, t ^= 1, ++ AA)
{
temp[t][0] = a;
BB = (unsigned char *) B;
for(unsigned char b = 1; b <= sb; ++ b, ++ BB)
temp[t][b] = MIN(temp[t ^ 1][ b ] + 1,
MIN(temp[ t ][b - 1] + 1,
temp[t ^ 1][b - 1] + (*AA != *BB)));
}
return temp[t ^ 1][sb];
}
__device__ unsigned char LevenshteinDistanceD(const unsigned char *const A, const unsigned char *const B)
{
unsigned char sb = 0;
unsigned char *AA = (unsigned char *) A,
*BB = (unsigned char *) B;
while(*BB ++ > 0)
++ sb;
unsigned char temp[2][WORD_MAXLEN + 1];
unsigned char t = 1;
#pragma unroll
for(unsigned char a = 0; a <= WORD_MAXLEN; ++ a)
temp[0][a] = a;
for(unsigned char a = 1; *AA > 0; ++ a, t ^= 1, ++ AA)
{
temp[t][0] = a;
BB = (unsigned char *) B;
#pragma unroll
for(unsigned char b = 1; b <= WORD_MAXLEN; ++ b, ++ BB)
temp[t][b] = MIN(temp[t ^ 1][ b ] + 1,
MIN(temp[ t ][b - 1] + 1,
temp[t ^ 1][b - 1] + (*AA != *BB)));
}
return temp[t ^ 1][sb];
}
void printHead(void)
{
printf(" word | ");
#ifdef __CUDA__
printf(" gpu | ");
#endif // __CUDA__
#ifdef __CPU__
printf(" cpu | ");
#endif // __CPU__
printf("distance\n");
printf("------------------|-");
#ifdef __CUDA__
printf("-------------------------------|-");
#endif // __CUDA__
#ifdef __CPU__
printf("-------------------------------|-");
#endif // __CPU__
printf("---------\n");
}
#ifdef __CUDA__
__global__ void LevenshteinCUDA_STEP_0(const unsigned char *dictionary, const unsigned int words, const unsigned char *pattern, unsigned int *result)
{
int word = blockIdx.x * STEP_0_THREADS + threadIdx.x;
if(word >= words)
return;
result[word * 2] = word;
result[word * 2 + 1] = LevenshteinDistanceD(pattern, dictionary + word * WORD_MAXLEN);
}
__global__ void LevenshteinCUDA_STEP_R(const unsigned int *from, unsigned int *to, const unsigned int words)
{
__shared__ unsigned int local_data[STEP_R_THREADS * 2];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + tid;
local_data[tid * 2] = from[i * 2];
local_data[tid * 2 + 1] = from[i * 2 + 1];
__syncthreads();
for(unsigned int s = 1; s < blockDim.x && tid + s < words; s <<= 1)
{
if(tid % (2 * s) == 0 && local_data[tid * 2 + 1] > local_data[(tid + s) * 2 + 1])
{
local_data[tid * 2] = local_data[(tid + s) * 2];
local_data[tid * 2 + 1] = local_data[(tid + s) * 2 + 1];
}
__syncthreads();
}
if(tid == 0)
{
to[blockIdx.x * 2] = local_data[0];
to[blockIdx.x * 2 + 1] = local_data[1];
}
}
#endif // __CUDA__
|
361ca6177c6ee6aeec81e98ddf358ff3fe1dabd0.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include "caffe/common_layers.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Dtype>
void ReductionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* mult_data = NULL;
int_tp bottom_data_off = 0;
int_tp top_data_off = 0;
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
if (sum_multiplier_.count() > 0) {
mult_data = sum_multiplier_.gpu_data();
}
Dtype* top_data = top[0]->mutable_cpu_data();
for (int_tp i = 0; i < num_; ++i) {
switch (op_) {
case ReductionParameter_ReductionOp_SUM:
case ReductionParameter_ReductionOp_MEAN:
caffe_gpu_dot(dim_, mult_data, bottom_data + bottom_data_off,
top_data + top_data_off);
break;
case ReductionParameter_ReductionOp_ASUM:
caffe_gpu_asum(dim_, bottom_data + bottom_data_off,
top_data + top_data_off);
break;
case ReductionParameter_ReductionOp_SUMSQ:
caffe_gpu_dot(dim_, bottom_data + bottom_data_off,
bottom_data + bottom_data_off, top_data + top_data_off);
break;
default:
LOG(FATAL)<< "Unknown reduction op: "
<< ReductionParameter_ReductionOp_Name(op_);
}
bottom_data_off += dim_;
++top_data_off;
}
if (coeff_ != Dtype(1)) {
// Reset the top_data pointer.
top_data = top[0]->mutable_gpu_data();
caffe_gpu_scal(num_, coeff_, top_data);
}
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
if (sum_multiplier_.count() > 0) {
mult_data = sum_multiplier_.gpu_data();
}
Dtype* top_data = top[0]->mutable_cpu_data();
for (int_tp i = 0; i < num_; ++i) {
switch (op_) {
case ReductionParameter_ReductionOp_SUM:
case ReductionParameter_ReductionOp_MEAN:
greentea_gpu_dot<Dtype>(this->device_->id(), dim_,
(cl_mem) mult_data, 0, (cl_mem) bottom_data,
bottom_data_off, top_data + top_data_off);
break;
case ReductionParameter_ReductionOp_ASUM:
greentea_gpu_asum<Dtype>(this->device_->id(), dim_,
(cl_mem) bottom_data, bottom_data_off,
top_data + top_data_off);
break;
case ReductionParameter_ReductionOp_SUMSQ:
greentea_gpu_dot<Dtype>(this->device_->id(), dim_,
(cl_mem) bottom_data, bottom_data_off,
(cl_mem) bottom_data, bottom_data_off,
top_data + top_data_off);
break;
default:
LOG(FATAL)<< "Unknown reduction op: "
<< ReductionParameter_ReductionOp_Name(op_);
}
bottom_data_off += dim_;
++top_data_off;
}
if (coeff_ != Dtype(1)) {
// Reset the top_data pointer.
top_data = top[0]->mutable_gpu_data();
greentea_gpu_scal<Dtype>(this->device_->id(), num_, coeff_,
(cl_mem) top_data, 0);
}
#endif // USE_GREENTEA
}
}
template<typename Dtype>
void ReductionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
// Get bottom_data, if needed.
const Dtype* bottom_data = NULL;
switch (op_) {
// Operations that don't need bottom_data
case ReductionParameter_ReductionOp_SUM:
case ReductionParameter_ReductionOp_MEAN:
break;
// Operations that need bottom_data
case ReductionParameter_ReductionOp_ASUM:
case ReductionParameter_ReductionOp_SUMSQ:
bottom_data = bottom[0]->gpu_data();
break;
default:
LOG(FATAL)<< "Unknown reduction op: "
<< ReductionParameter_ReductionOp_Name(op_);
}
const Dtype* top_diff = top[0]->cpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
int_tp bottom_data_off = 0;
int_tp bottom_diff_off = 0;
int_tp top_diff_off = 0;
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
for (int_tp i = 0; i < num_; ++i) {
const Dtype bottom_coeff = (*(top_diff + top_diff_off)) * coeff_;
switch (op_) {
case ReductionParameter_ReductionOp_SUM:
case ReductionParameter_ReductionOp_MEAN:
caffe_gpu_set(dim_, bottom_coeff, bottom_diff + bottom_diff_off);
break;
case ReductionParameter_ReductionOp_ASUM:
caffe_gpu_sign(dim_, bottom_data + bottom_data_off,
bottom_diff + bottom_diff_off);
caffe_gpu_scal(dim_, bottom_coeff, bottom_diff + bottom_diff_off);
break;
case ReductionParameter_ReductionOp_SUMSQ:
caffe_gpu_scale(dim_, 2 * bottom_coeff, bottom_data + bottom_data_off,
bottom_diff + bottom_diff_off);
break;
default:
LOG(FATAL)<< "Unknown reduction op: "
<< ReductionParameter_ReductionOp_Name(op_);
}
bottom_data_off += dim_;
bottom_diff_off += dim_;
++top_diff_off;
}
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
for (int_tp i = 0; i < num_; ++i) {
const Dtype bottom_coeff = (*(top_diff + top_diff_off)) * coeff_;
switch (op_) {
case ReductionParameter_ReductionOp_SUM:
case ReductionParameter_ReductionOp_MEAN:
greentea_gpu_set<Dtype>(this->device_->id(), dim_,
bottom_coeff, (cl_mem) bottom_diff,
bottom_diff_off);
break;
case ReductionParameter_ReductionOp_ASUM:
greentea_gpu_sign<Dtype>(this->device_->id(), dim_,
(cl_mem) bottom_data, bottom_data_off,
(cl_mem) bottom_diff, bottom_diff_off);
greentea_gpu_scal<Dtype>(this->device_->id(), dim_,
bottom_coeff, (cl_mem) bottom_diff,
bottom_diff_off);
break;
case ReductionParameter_ReductionOp_SUMSQ:
greentea_gpu_scale<Dtype>(this->device_->id(), dim_,
2 * bottom_coeff, (cl_mem) bottom_data,
bottom_data_off, (cl_mem) bottom_diff,
bottom_diff_off);
break;
default:
LOG(FATAL)<< "Unknown reduction op: "
<< ReductionParameter_ReductionOp_Name(op_);
}
bottom_data_off += dim_;
bottom_diff_off += dim_;
++top_diff_off;
}
#endif // USE_GREENTEA
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ReductionLayer);
} // namespace caffe
| 361ca6177c6ee6aeec81e98ddf358ff3fe1dabd0.cu | #include <vector>
#include "caffe/common_layers.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Dtype>
void ReductionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* mult_data = NULL;
int_tp bottom_data_off = 0;
int_tp top_data_off = 0;
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
if (sum_multiplier_.count() > 0) {
mult_data = sum_multiplier_.gpu_data();
}
Dtype* top_data = top[0]->mutable_cpu_data();
for (int_tp i = 0; i < num_; ++i) {
switch (op_) {
case ReductionParameter_ReductionOp_SUM:
case ReductionParameter_ReductionOp_MEAN:
caffe_gpu_dot(dim_, mult_data, bottom_data + bottom_data_off,
top_data + top_data_off);
break;
case ReductionParameter_ReductionOp_ASUM:
caffe_gpu_asum(dim_, bottom_data + bottom_data_off,
top_data + top_data_off);
break;
case ReductionParameter_ReductionOp_SUMSQ:
caffe_gpu_dot(dim_, bottom_data + bottom_data_off,
bottom_data + bottom_data_off, top_data + top_data_off);
break;
default:
LOG(FATAL)<< "Unknown reduction op: "
<< ReductionParameter_ReductionOp_Name(op_);
}
bottom_data_off += dim_;
++top_data_off;
}
if (coeff_ != Dtype(1)) {
// Reset the top_data pointer.
top_data = top[0]->mutable_gpu_data();
caffe_gpu_scal(num_, coeff_, top_data);
}
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
if (sum_multiplier_.count() > 0) {
mult_data = sum_multiplier_.gpu_data();
}
Dtype* top_data = top[0]->mutable_cpu_data();
for (int_tp i = 0; i < num_; ++i) {
switch (op_) {
case ReductionParameter_ReductionOp_SUM:
case ReductionParameter_ReductionOp_MEAN:
greentea_gpu_dot<Dtype>(this->device_->id(), dim_,
(cl_mem) mult_data, 0, (cl_mem) bottom_data,
bottom_data_off, top_data + top_data_off);
break;
case ReductionParameter_ReductionOp_ASUM:
greentea_gpu_asum<Dtype>(this->device_->id(), dim_,
(cl_mem) bottom_data, bottom_data_off,
top_data + top_data_off);
break;
case ReductionParameter_ReductionOp_SUMSQ:
greentea_gpu_dot<Dtype>(this->device_->id(), dim_,
(cl_mem) bottom_data, bottom_data_off,
(cl_mem) bottom_data, bottom_data_off,
top_data + top_data_off);
break;
default:
LOG(FATAL)<< "Unknown reduction op: "
<< ReductionParameter_ReductionOp_Name(op_);
}
bottom_data_off += dim_;
++top_data_off;
}
if (coeff_ != Dtype(1)) {
// Reset the top_data pointer.
top_data = top[0]->mutable_gpu_data();
greentea_gpu_scal<Dtype>(this->device_->id(), num_, coeff_,
(cl_mem) top_data, 0);
}
#endif // USE_GREENTEA
}
}
template<typename Dtype>
void ReductionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
// Get bottom_data, if needed.
const Dtype* bottom_data = NULL;
switch (op_) {
// Operations that don't need bottom_data
case ReductionParameter_ReductionOp_SUM:
case ReductionParameter_ReductionOp_MEAN:
break;
// Operations that need bottom_data
case ReductionParameter_ReductionOp_ASUM:
case ReductionParameter_ReductionOp_SUMSQ:
bottom_data = bottom[0]->gpu_data();
break;
default:
LOG(FATAL)<< "Unknown reduction op: "
<< ReductionParameter_ReductionOp_Name(op_);
}
const Dtype* top_diff = top[0]->cpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
int_tp bottom_data_off = 0;
int_tp bottom_diff_off = 0;
int_tp top_diff_off = 0;
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
for (int_tp i = 0; i < num_; ++i) {
const Dtype bottom_coeff = (*(top_diff + top_diff_off)) * coeff_;
switch (op_) {
case ReductionParameter_ReductionOp_SUM:
case ReductionParameter_ReductionOp_MEAN:
caffe_gpu_set(dim_, bottom_coeff, bottom_diff + bottom_diff_off);
break;
case ReductionParameter_ReductionOp_ASUM:
caffe_gpu_sign(dim_, bottom_data + bottom_data_off,
bottom_diff + bottom_diff_off);
caffe_gpu_scal(dim_, bottom_coeff, bottom_diff + bottom_diff_off);
break;
case ReductionParameter_ReductionOp_SUMSQ:
caffe_gpu_scale(dim_, 2 * bottom_coeff, bottom_data + bottom_data_off,
bottom_diff + bottom_diff_off);
break;
default:
LOG(FATAL)<< "Unknown reduction op: "
<< ReductionParameter_ReductionOp_Name(op_);
}
bottom_data_off += dim_;
bottom_diff_off += dim_;
++top_diff_off;
}
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
for (int_tp i = 0; i < num_; ++i) {
const Dtype bottom_coeff = (*(top_diff + top_diff_off)) * coeff_;
switch (op_) {
case ReductionParameter_ReductionOp_SUM:
case ReductionParameter_ReductionOp_MEAN:
greentea_gpu_set<Dtype>(this->device_->id(), dim_,
bottom_coeff, (cl_mem) bottom_diff,
bottom_diff_off);
break;
case ReductionParameter_ReductionOp_ASUM:
greentea_gpu_sign<Dtype>(this->device_->id(), dim_,
(cl_mem) bottom_data, bottom_data_off,
(cl_mem) bottom_diff, bottom_diff_off);
greentea_gpu_scal<Dtype>(this->device_->id(), dim_,
bottom_coeff, (cl_mem) bottom_diff,
bottom_diff_off);
break;
case ReductionParameter_ReductionOp_SUMSQ:
greentea_gpu_scale<Dtype>(this->device_->id(), dim_,
2 * bottom_coeff, (cl_mem) bottom_data,
bottom_data_off, (cl_mem) bottom_diff,
bottom_diff_off);
break;
default:
LOG(FATAL)<< "Unknown reduction op: "
<< ReductionParameter_ReductionOp_Name(op_);
}
bottom_data_off += dim_;
bottom_diff_off += dim_;
++top_diff_off;
}
#endif // USE_GREENTEA
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ReductionLayer);
} // namespace caffe
|
cf567bfcd93fc04f0ba4e9813c483dfd1c10b55b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
/*
* A simple introduction to programming in CUDA. This program prints "Hello
* World from GPU! from 10 CUDA threads running on the GPU.
*/
__global__ void helloFromGPU()
{
printf("Hello World from GPU!\n");
}
int main(int argc, char **argv)
{
printf("Hello World from CPU!\n");
helloFromGPU << <2, 10 >> > ();
hipDeviceReset();
return 0;
}
| cf567bfcd93fc04f0ba4e9813c483dfd1c10b55b.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
/*
* A simple introduction to programming in CUDA. This program prints "Hello
* World from GPU! from 10 CUDA threads running on the GPU.
*/
__global__ void helloFromGPU()
{
printf("Hello World from GPU!\n");
}
int main(int argc, char **argv)
{
printf("Hello World from CPU!\n");
helloFromGPU << <2, 10 >> > ();
cudaDeviceReset();
return 0;
}
|
875b1bff42104eb409288c8a8343590802429c08.hip | // !!! This is a file automatically generated by hipify!!!
// CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
// This file includes code from:
// Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097
// Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/
// Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include "sceneStructs.h"
#include "glm/glm.hpp"
#include "utilities.h"
#include "raytraceKernel.h"
#include "intersections.h"
#include "interactions.h"
#include <vector>
using namespace glm;
#if TORCH_HIP_VERSION >= 5000
#include <helper_math.h>
#else
#include <cutil_math.h>
#endif
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//LOOK: This function demonstrates how to use thrust for random number generation on the GPU!
//Function that generates static.
__host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){
int index = x + (y * resolution.x);
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
//TODO: IMPLEMENT THIS FUNCTION
//Function that does the initial raycast from the camera
__host__ __device__ ray raycastFromCameraKernel(glm::vec2 resolution, float time, int x, int y, glm::vec3 eye, glm::vec3 view, glm::vec3 up, glm::vec2 fov){
//vec3 jitter = 2.0f*generateRandomNumberFromThread(resolution, time, x, y);
vec3 jitter = vec3(0,0,0); //no antialiasing
float NDCx = ((float)x +jitter.x)/resolution.x;
float NDCy = ((float)y +jitter.y )/resolution.y;
//float NDCx = ((float)x )/resolution.x;
//float NDCy = ((float)y )/resolution.y;
vec3 A = cross(view, up);
vec3 B = cross(A, view);
vec3 M = eye+view;
vec3 V = B * (1.0f/length(B)) * length(view)*tan(radians(fov.y));
vec3 H = A * (1.0f/length(A)) * length(view)*tan(radians(fov.x));
vec3 point = M + (2*NDCx -1)*H + (1-2*NDCy)*V;
ray r;
r.origin = eye;
r.direction = normalize(point-eye);
return r;
}
//Kernel that blacks out a given image buffer
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = glm::vec3(0,0,0);
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image, float iterations){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = image[index].x/iterations*255.0;
color.y = image[index].y/iterations*255.0;
color.z = image[index].z/iterations*255.0;
if(color.x>255){
color.x = 255;
}
if(color.y>255){
color.y = 255;
}
if(color.z>255){
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
//TODO: IMPLEMENT THIS FUNCTION
//Core raytracer kernel
__global__ void raytraceRay(ray* cudarays, glm::vec2 resolution, float time, cameraData cam, int rayDepth, glm::vec3* colors,
staticGeom* geoms, int numberOfGeoms, int numberOfCubes, int numberOfSpheres, material* cudamaterials,
int numberOfMaterials, int numBounce, int* cudaalive, int initialMaxRays){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int y = cudarays[index].pixelID/resolution.y;
int x = cudarays[index].pixelID-resolution.x*y;
if (index < initialMaxRays){
float tempLength, closest = 1e26, indexOfRefraction = 0;
int closestObjectid = -1;
vec3 tempIntersectionPoint = vec3(0,0,0), tempNormal = vec3(0,0,0), normal = vec3(0,0,0), intersectionPoint = vec3(0,0,0);
vec3 objectColor = vec3(0,0,0), specColor = vec3(0,0,0);
float specExponent = 0,
bool isReflective = 0, isRefractive = 0;
bool inside = false, tempInside = false;
//input text file must load cubes first before loading spheres
for (int i = 0; i < numberOfCubes; i++){
if(geoms[i].type == CUBE){
tempLength = boxIntersectionTest( geoms[i], cudarays[index], tempIntersectionPoint, tempNormal, tempInside);
}
if (tempLength < closest && tempLength >= 0){
closest = tempLength;
normal = tempNormal;
intersectionPoint = tempIntersectionPoint;
closestObjectid = i;
inside = tempInside;
}
}
for(int i = numberOfCubes; i < numberOfGeoms; i++){
if(geoms[i].type == SPHERE){
tempLength = sphereIntersectionTest( geoms[i], cudarays[index], tempIntersectionPoint, tempNormal, tempInside);
}
if (tempLength < closest && tempLength >= 0){
closest = tempLength;
normal = tempNormal;
intersectionPoint = tempIntersectionPoint;
closestObjectid = i;
inside = tempInside;
}
}
if (closest < 1e26 && closest >= 0){
objectColor = cudamaterials[geoms[closestObjectid].materialid].color;
specExponent = cudamaterials[geoms[closestObjectid].materialid].specularExponent;
specColor = cudamaterials[geoms[closestObjectid].materialid].specularColor;
isReflective = cudamaterials[geoms[closestObjectid].materialid].hasReflective;
isRefractive = cudamaterials[geoms[closestObjectid].materialid].hasRefractive;
indexOfRefraction = cudamaterials[geoms[closestObjectid].materialid].indexOfRefraction;
vec3 reflectedDir = cudarays[index].direction - vec3(2*vec4(normal*(dot(cudarays[index].direction,normal)),0));
reflectedDir = normalize(reflectedDir);
vec3 refractedDir = vec3(0,0,0);
if (cudamaterials[geoms[closestObjectid].materialid].emittance > 0){
cudarays[index].color *= cudamaterials[geoms[closestObjectid].materialid].color*cudamaterials[geoms[closestObjectid].materialid].emittance;
cudarays[index].alive = false;
colors[cudarays[index].pixelID] += cudarays[index].color;
cudaalive[index] = 0; //dead
return;
}
float n1 = 0, n2 = 0;
float costheta_i = 0; float costheta_t = 0;
float sin2theta_t = 0;
float R = 0;
bool TIR = false;
float schlicksR = 0;
float random = 0;
if (isRefractive){
//graphics.stanford.edu/courses/cs148-10-summer/docs/2006--degreve--reflection_refraction.pdf
if (inside){
n1 = indexOfRefraction;
n2 = 1.0f;
normal = -normal;
}else{
n1 = 1.0f;
n2 = indexOfRefraction;
}
costheta_i = glm::dot(-1.0f*cudarays[index].direction, normal);
sin2theta_t = pow(n1/n2,2)*(1-pow(costheta_i,2));
R = pow((n1-n2)/(n1+n2),2);
if (sin2theta_t > 1){
TIR = true;
}else{
costheta_t = sqrt(1-sin2theta_t);
refractedDir = (n1/n2)*cudarays[index].direction + ((n1/n2)*costheta_i - sqrt(1-sin2theta_t))*normal;
}
if (n1 <= n2){
schlicksR = R + (1-R)*(1-costheta_i)*(1-costheta_i)*(1-costheta_i)*(1-costheta_i)*(1-costheta_i);
}else if (n1 > n2 && !TIR){
schlicksR = R + (1-R)*(1-costheta_t)*(1-costheta_t)*(1-costheta_t)*(1-costheta_t)*(1-costheta_t);
}else{
schlicksR = 1;
}
thrust::default_random_engine rng(hash((cudarays[index].pixelID)*time));
thrust::uniform_real_distribution<float> u01(0,1);
random = (float) u01(rng);
cudarays[index].origin = intersectionPoint+0.01f*refractedDir;
cudarays[index].direction = refractedDir;
if (random <= schlicksR){
cudarays[index].origin = intersectionPoint+0.0001f*reflectedDir;
cudarays[index].direction = reflectedDir;
}
}else if (isReflective){
cudarays[index].origin = intersectionPoint+0.01f*reflectedDir;
cudarays[index].direction = reflectedDir;
}else{ //just diffuse
//vec3 rand = generateRandomNumberFromThread(resolution, time*(numBounce+1), x, y);
thrust::default_random_engine rng1(hash(time*(numBounce + 1)* index));
thrust::uniform_real_distribution<float> u02(0,time);
thrust::default_random_engine rng(hash((float)u02(rng1)*(numBounce + 1)* index));
thrust::uniform_real_distribution<float> u01(0,1);
if((float)u01(rng) < 0.1) // russian roulette rule: ray is absorbed
{
cudarays[index].color *= cudamaterials[geoms[closestObjectid].materialid].color*cudamaterials[geoms[closestObjectid].materialid].emittance;
cudarays[index].alive = false;
colors[cudarays[index].pixelID] += cudarays[index].color;
cudaalive[index] = 0; //dead
return;
}
else
{
vec3 outgoingDir = calculateRandomDirectionInHemisphere(normal, (float)u01(rng), (float)u01(rng));
cudarays[index].origin = intersectionPoint+0.01f*outgoingDir;
cudarays[index].direction = outgoingDir;
}
/*vec3 outgoingDir = calculateRandomDirectionInHemisphere(normal, (float)u01(rng), (float)u01(rng));
cudarays[index].origin = intersectionPoint+0.001f*outgoingDir;
cudarays[index].direction = outgoingDir;*/
}
cudarays[index].color *= objectColor;
}//if intersects with anything
else{
cudarays[index].color *= vec3(0,0,0);
cudarays[index].alive = false;
colors[cudarays[index].pixelID] += cudarays[index].color;
cudaalive[index] = 0; //dead
//numAliveRays[0]--;
return;
}
}//end of ifstatement
}
//INITIALIZES A POOL OF RAYS
__global__ void initializeRays(glm::vec2 resolution, float time, cameraData cam, ray* cudarays){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if((x<=resolution.x && y<=resolution.y)){
ray rayFromCamera = raycastFromCameraKernel(resolution, time, x, y, cam.position, cam.view, cam.up, cam.fov);
//find aim point
vec3 aimPoint = rayFromCamera.origin + cam.focalLength*rayFromCamera.direction;
//jittered ray (DOF)
float degOfJitter = 1;
vec3 jitter = generateRandomNumberFromThread(resolution, time, x, y);
ray jitteredRay;
jitteredRay.origin = vec3(rayFromCamera.origin.x+degOfJitter*jitter.x, rayFromCamera.origin.y+degOfJitter*jitter.y, rayFromCamera.origin.z);
jitteredRay.direction = normalize(aimPoint-jitteredRay.origin);
ray currentRay = rayFromCamera; //jitteredRay;
currentRay.pixelID = index;
currentRay.color = vec3(1,1,1);
currentRay.alive = true;
cudarays[index] = currentRay; //stores ray
}
}
__global__ void scan(int* cudacondition, int* cudatemp, int d){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index == 0)
cudatemp[0] = cudacondition[0];
int e = pow(2.0f,d-1); //speed up this later
if (index >= e){
cudatemp[index] = cudacondition[index-e] + cudacondition[index];
}else{
cudatemp[index] = cudacondition[index];
}
}
__global__ void streamCompact( int* cudaalive, ray* cudarays, ray* cudaraysTemp, int numRays){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < numRays){
if(cudarays[index].alive){ //compare to see if ray is alive or dead
cudaraysTemp[cudaalive[index]-1] = cudarays[index];
}
}
}
__global__ void resetAliveConditionArray( int* cudaalive){
int index = blockIdx.x * blockDim.x + threadIdx.x;
cudaalive[index] = 1;
}
//TODO: FINISH THIS FUNCTION
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
void cudaRaytraceCore(uchar4* PBOpos, camera* renderCam, int frame, int iterations, material* materials, int numberOfMaterials, geom* geoms, int numberOfGeoms, int numberOfCubes, int numberOfSpheres, bool cameraMoved){
int traceDepth = 200; //determines how many bounces the pathtracer traces
std::vector<int> lightsid;
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock2d(tileSize, tileSize);
dim3 fullBlocksPerGrid2d((int)ceil(float(renderCam->resolution.x)/float(tileSize)), (int)ceil(float(renderCam->resolution.y)/float(tileSize)));
dim3 threadsPerBlock1d(tileSize*tileSize);
float s = renderCam->resolution.x*renderCam->resolution.y;
dim3 fullBlocksPerGrid1d((int)ceil((float(renderCam->resolution.x)/float(tileSize))*(float(renderCam->resolution.y)/float(tileSize))));
//send image to GPU
glm::vec3* cudaimage = NULL;
hipMalloc((void**)&cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
hipMemcpy( cudaimage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyHostToDevice);
//package geometry and materials and sent to GPU
staticGeom* geomList = new staticGeom[numberOfGeoms];
for(int i=0; i<numberOfGeoms; i++){
staticGeom newStaticGeom;
newStaticGeom.type = geoms[i].type;
newStaticGeom.materialid = geoms[i].materialid;
newStaticGeom.translation = geoms[i].translations[frame];
newStaticGeom.rotation = geoms[i].rotations[frame];
newStaticGeom.scale = geoms[i].scales[frame];
newStaticGeom.transform = geoms[i].transforms[frame];
newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame];
geomList[i] = newStaticGeom;
}
staticGeom* cudageoms = NULL;
hipMalloc((void**)&cudageoms, numberOfGeoms*sizeof(staticGeom));
hipMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), hipMemcpyHostToDevice);
material* cudamaterials = NULL;
hipMalloc((void**)&cudamaterials, numberOfMaterials*sizeof(material));
hipMemcpy( cudamaterials, materials, numberOfMaterials*sizeof(material), hipMemcpyHostToDevice);
int numberOfPixels = renderCam->resolution.x*renderCam->resolution.y;
ray* cudarays = NULL;
hipMalloc((void**)&cudarays, numberOfPixels*sizeof(ray));
//package camera
cameraData cam;
cam.resolution = renderCam->resolution;
cam.position = renderCam->positions[frame];
cam.view = renderCam->views[frame];
cam.up = renderCam->ups[frame];
cam.fov = renderCam->fov;
cam.focalLength = renderCam->focalLengths[frame];
//clear image
if (cameraMoved)
hipLaunchKernelGGL(( clearImage), dim3(fullBlocksPerGrid2d), dim3(threadsPerBlock2d), 0, 0, renderCam->resolution,cudaimage);
if (numberOfGeoms != numberOfCubes+numberOfSpheres){
std::cout<<"ERROR numberOfGeoms != numberOfCubes+numberOfSpheres"<<std::endl;
std::cout<<numberOfGeoms<<", "<<numberOfCubes<<", "<<numberOfSpheres<<std::endl;
}
//initial pool of rays
hipLaunchKernelGGL(( initializeRays), dim3(fullBlocksPerGrid2d), dim3(threadsPerBlock2d), 0, 0, renderCam->resolution, (float)iterations, cam, cudarays);
//intialize the alive array
int* cudaalive = NULL;
hipMalloc((void**)&cudaalive, numberOfPixels*sizeof(int));
hipLaunchKernelGGL(( resetAliveConditionArray), dim3(fullBlocksPerGrid1d), dim3(threadsPerBlock1d), 0, 0, cudaalive);
int* cudatemp = NULL;
hipMalloc((void**)&cudatemp, numberOfPixels*sizeof(int));
ray* cudaraysTemp = NULL;
hipMalloc((void**)&cudaraysTemp, numberOfPixels*sizeof(ray));
int numRays = renderCam->resolution.x*renderCam->resolution.y;
//kernel launches
for (int i = 0; i < traceDepth && numRays > 0; i++){
hipLaunchKernelGGL(( raytraceRay), dim3(fullBlocksPerGrid1d), dim3(threadsPerBlock1d), 0, 0, cudarays, renderCam->resolution, (float)iterations, cam, traceDepth,
cudaimage, cudageoms, numberOfGeoms, numberOfCubes, numberOfSpheres, cudamaterials, numberOfMaterials, i, cudaalive, numRays);
int log2n = (int)ceil(log(float(numRays)) / log(2.0f));
for (int d = 1; d <= log2n; d++){
hipLaunchKernelGGL(( scan), dim3(fullBlocksPerGrid1d), dim3(threadsPerBlock1d), 0, 0, cudaalive, cudatemp, d); //scan
int* temp = cudaalive;
cudaalive = cudatemp;
cudatemp = temp;
}
int numAliveRaysCPU = 0;
//cudaalive now has the summed corresponding new indices for the alive rays
hipMemcpy(&numAliveRaysCPU, &cudaalive[numRays-1], sizeof(int), hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( streamCompact), dim3(fullBlocksPerGrid1d), dim3(threadsPerBlock1d), 0, 0, cudaalive, cudarays, cudaraysTemp, numRays);
ray* tempR = cudarays;
cudarays = cudaraysTemp;
cudaraysTemp = tempR;
hipLaunchKernelGGL(( resetAliveConditionArray), dim3(fullBlocksPerGrid1d), dim3(threadsPerBlock1d), 0, 0, cudaalive);
numRays = numAliveRaysCPU;
fullBlocksPerGrid1d = dim3((int)ceil((float(numRays)/float(tileSize*tileSize))));
}
hipLaunchKernelGGL(( sendImageToPBO), dim3(fullBlocksPerGrid2d), dim3(threadsPerBlock2d), 0, 0, PBOpos, renderCam->resolution, cudaimage, (float)iterations);
//retrieve image from GPU
hipMemcpy( renderCam->image, cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyDeviceToHost);
//free up stuff, or else we'll leak memory like a madman
hipFree( cudaimage );
hipFree( cudageoms );
hipFree( cudarays );
hipFree(cudaalive);
hipFree(cudatemp);
hipFree(cudaraysTemp);
delete geomList;
// make certain the kernel has completed
hipDeviceSynchronize();
checkCUDAError("Kernel failed!");
} | 875b1bff42104eb409288c8a8343590802429c08.cu | // CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
// This file includes code from:
// Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097
// Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/
// Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include "sceneStructs.h"
#include "glm/glm.hpp"
#include "utilities.h"
#include "raytraceKernel.h"
#include "intersections.h"
#include "interactions.h"
#include <vector>
using namespace glm;
#if CUDA_VERSION >= 5000
#include <helper_math.h>
#else
#include <cutil_math.h>
#endif
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//LOOK: This function demonstrates how to use thrust for random number generation on the GPU!
//Function that generates static.
__host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){
int index = x + (y * resolution.x);
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
//TODO: IMPLEMENT THIS FUNCTION
//Function that does the initial raycast from the camera
__host__ __device__ ray raycastFromCameraKernel(glm::vec2 resolution, float time, int x, int y, glm::vec3 eye, glm::vec3 view, glm::vec3 up, glm::vec2 fov){
//vec3 jitter = 2.0f*generateRandomNumberFromThread(resolution, time, x, y);
vec3 jitter = vec3(0,0,0); //no antialiasing
float NDCx = ((float)x +jitter.x)/resolution.x;
float NDCy = ((float)y +jitter.y )/resolution.y;
//float NDCx = ((float)x )/resolution.x;
//float NDCy = ((float)y )/resolution.y;
vec3 A = cross(view, up);
vec3 B = cross(A, view);
vec3 M = eye+view;
vec3 V = B * (1.0f/length(B)) * length(view)*tan(radians(fov.y));
vec3 H = A * (1.0f/length(A)) * length(view)*tan(radians(fov.x));
vec3 point = M + (2*NDCx -1)*H + (1-2*NDCy)*V;
ray r;
r.origin = eye;
r.direction = normalize(point-eye);
return r;
}
//Kernel that blacks out a given image buffer
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = glm::vec3(0,0,0);
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image, float iterations){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = image[index].x/iterations*255.0;
color.y = image[index].y/iterations*255.0;
color.z = image[index].z/iterations*255.0;
if(color.x>255){
color.x = 255;
}
if(color.y>255){
color.y = 255;
}
if(color.z>255){
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
//TODO: IMPLEMENT THIS FUNCTION
//Core raytracer kernel
__global__ void raytraceRay(ray* cudarays, glm::vec2 resolution, float time, cameraData cam, int rayDepth, glm::vec3* colors,
staticGeom* geoms, int numberOfGeoms, int numberOfCubes, int numberOfSpheres, material* cudamaterials,
int numberOfMaterials, int numBounce, int* cudaalive, int initialMaxRays){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int y = cudarays[index].pixelID/resolution.y;
int x = cudarays[index].pixelID-resolution.x*y;
if (index < initialMaxRays){
float tempLength, closest = 1e26, indexOfRefraction = 0;
int closestObjectid = -1;
vec3 tempIntersectionPoint = vec3(0,0,0), tempNormal = vec3(0,0,0), normal = vec3(0,0,0), intersectionPoint = vec3(0,0,0);
vec3 objectColor = vec3(0,0,0), specColor = vec3(0,0,0);
float specExponent = 0,
bool isReflective = 0, isRefractive = 0;
bool inside = false, tempInside = false;
//input text file must load cubes first before loading spheres
for (int i = 0; i < numberOfCubes; i++){
if(geoms[i].type == CUBE){
tempLength = boxIntersectionTest( geoms[i], cudarays[index], tempIntersectionPoint, tempNormal, tempInside);
}
if (tempLength < closest && tempLength >= 0){
closest = tempLength;
normal = tempNormal;
intersectionPoint = tempIntersectionPoint;
closestObjectid = i;
inside = tempInside;
}
}
for(int i = numberOfCubes; i < numberOfGeoms; i++){
if(geoms[i].type == SPHERE){
tempLength = sphereIntersectionTest( geoms[i], cudarays[index], tempIntersectionPoint, tempNormal, tempInside);
}
if (tempLength < closest && tempLength >= 0){
closest = tempLength;
normal = tempNormal;
intersectionPoint = tempIntersectionPoint;
closestObjectid = i;
inside = tempInside;
}
}
if (closest < 1e26 && closest >= 0){
objectColor = cudamaterials[geoms[closestObjectid].materialid].color;
specExponent = cudamaterials[geoms[closestObjectid].materialid].specularExponent;
specColor = cudamaterials[geoms[closestObjectid].materialid].specularColor;
isReflective = cudamaterials[geoms[closestObjectid].materialid].hasReflective;
isRefractive = cudamaterials[geoms[closestObjectid].materialid].hasRefractive;
indexOfRefraction = cudamaterials[geoms[closestObjectid].materialid].indexOfRefraction;
vec3 reflectedDir = cudarays[index].direction - vec3(2*vec4(normal*(dot(cudarays[index].direction,normal)),0));
reflectedDir = normalize(reflectedDir);
vec3 refractedDir = vec3(0,0,0);
if (cudamaterials[geoms[closestObjectid].materialid].emittance > 0){
cudarays[index].color *= cudamaterials[geoms[closestObjectid].materialid].color*cudamaterials[geoms[closestObjectid].materialid].emittance;
cudarays[index].alive = false;
colors[cudarays[index].pixelID] += cudarays[index].color;
cudaalive[index] = 0; //dead
return;
}
float n1 = 0, n2 = 0;
float costheta_i = 0; float costheta_t = 0;
float sin2theta_t = 0;
float R = 0;
bool TIR = false;
float schlicksR = 0;
float random = 0;
if (isRefractive){
//graphics.stanford.edu/courses/cs148-10-summer/docs/2006--degreve--reflection_refraction.pdf
if (inside){
n1 = indexOfRefraction;
n2 = 1.0f;
normal = -normal;
}else{
n1 = 1.0f;
n2 = indexOfRefraction;
}
costheta_i = glm::dot(-1.0f*cudarays[index].direction, normal);
sin2theta_t = pow(n1/n2,2)*(1-pow(costheta_i,2));
R = pow((n1-n2)/(n1+n2),2);
if (sin2theta_t > 1){
TIR = true;
}else{
costheta_t = sqrt(1-sin2theta_t);
refractedDir = (n1/n2)*cudarays[index].direction + ((n1/n2)*costheta_i - sqrt(1-sin2theta_t))*normal;
}
if (n1 <= n2){
schlicksR = R + (1-R)*(1-costheta_i)*(1-costheta_i)*(1-costheta_i)*(1-costheta_i)*(1-costheta_i);
}else if (n1 > n2 && !TIR){
schlicksR = R + (1-R)*(1-costheta_t)*(1-costheta_t)*(1-costheta_t)*(1-costheta_t)*(1-costheta_t);
}else{
schlicksR = 1;
}
thrust::default_random_engine rng(hash((cudarays[index].pixelID)*time));
thrust::uniform_real_distribution<float> u01(0,1);
random = (float) u01(rng);
cudarays[index].origin = intersectionPoint+0.01f*refractedDir;
cudarays[index].direction = refractedDir;
if (random <= schlicksR){
cudarays[index].origin = intersectionPoint+0.0001f*reflectedDir;
cudarays[index].direction = reflectedDir;
}
}else if (isReflective){
cudarays[index].origin = intersectionPoint+0.01f*reflectedDir;
cudarays[index].direction = reflectedDir;
}else{ //just diffuse
//vec3 rand = generateRandomNumberFromThread(resolution, time*(numBounce+1), x, y);
thrust::default_random_engine rng1(hash(time*(numBounce + 1)* index));
thrust::uniform_real_distribution<float> u02(0,time);
thrust::default_random_engine rng(hash((float)u02(rng1)*(numBounce + 1)* index));
thrust::uniform_real_distribution<float> u01(0,1);
if((float)u01(rng) < 0.1) // russian roulette rule: ray is absorbed
{
cudarays[index].color *= cudamaterials[geoms[closestObjectid].materialid].color*cudamaterials[geoms[closestObjectid].materialid].emittance;
cudarays[index].alive = false;
colors[cudarays[index].pixelID] += cudarays[index].color;
cudaalive[index] = 0; //dead
return;
}
else
{
vec3 outgoingDir = calculateRandomDirectionInHemisphere(normal, (float)u01(rng), (float)u01(rng));
cudarays[index].origin = intersectionPoint+0.01f*outgoingDir;
cudarays[index].direction = outgoingDir;
}
/*vec3 outgoingDir = calculateRandomDirectionInHemisphere(normal, (float)u01(rng), (float)u01(rng));
cudarays[index].origin = intersectionPoint+0.001f*outgoingDir;
cudarays[index].direction = outgoingDir;*/
}
cudarays[index].color *= objectColor;
}//if intersects with anything
else{
cudarays[index].color *= vec3(0,0,0);
cudarays[index].alive = false;
colors[cudarays[index].pixelID] += cudarays[index].color;
cudaalive[index] = 0; //dead
//numAliveRays[0]--;
return;
}
}//end of ifstatement
}
//INITIALIZES A POOL OF RAYS
__global__ void initializeRays(glm::vec2 resolution, float time, cameraData cam, ray* cudarays){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if((x<=resolution.x && y<=resolution.y)){
ray rayFromCamera = raycastFromCameraKernel(resolution, time, x, y, cam.position, cam.view, cam.up, cam.fov);
//find aim point
vec3 aimPoint = rayFromCamera.origin + cam.focalLength*rayFromCamera.direction;
//jittered ray (DOF)
float degOfJitter = 1;
vec3 jitter = generateRandomNumberFromThread(resolution, time, x, y);
ray jitteredRay;
jitteredRay.origin = vec3(rayFromCamera.origin.x+degOfJitter*jitter.x, rayFromCamera.origin.y+degOfJitter*jitter.y, rayFromCamera.origin.z);
jitteredRay.direction = normalize(aimPoint-jitteredRay.origin);
ray currentRay = rayFromCamera; //jitteredRay;
currentRay.pixelID = index;
currentRay.color = vec3(1,1,1);
currentRay.alive = true;
cudarays[index] = currentRay; //stores ray
}
}
__global__ void scan(int* cudacondition, int* cudatemp, int d){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index == 0)
cudatemp[0] = cudacondition[0];
int e = pow(2.0f,d-1); //speed up this later
if (index >= e){
cudatemp[index] = cudacondition[index-e] + cudacondition[index];
}else{
cudatemp[index] = cudacondition[index];
}
}
__global__ void streamCompact( int* cudaalive, ray* cudarays, ray* cudaraysTemp, int numRays){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < numRays){
if(cudarays[index].alive){ //compare to see if ray is alive or dead
cudaraysTemp[cudaalive[index]-1] = cudarays[index];
}
}
}
__global__ void resetAliveConditionArray( int* cudaalive){
int index = blockIdx.x * blockDim.x + threadIdx.x;
cudaalive[index] = 1;
}
//TODO: FINISH THIS FUNCTION
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
void cudaRaytraceCore(uchar4* PBOpos, camera* renderCam, int frame, int iterations, material* materials, int numberOfMaterials, geom* geoms, int numberOfGeoms, int numberOfCubes, int numberOfSpheres, bool cameraMoved){
int traceDepth = 200; //determines how many bounces the pathtracer traces
std::vector<int> lightsid;
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock2d(tileSize, tileSize);
dim3 fullBlocksPerGrid2d((int)ceil(float(renderCam->resolution.x)/float(tileSize)), (int)ceil(float(renderCam->resolution.y)/float(tileSize)));
dim3 threadsPerBlock1d(tileSize*tileSize);
float s = renderCam->resolution.x*renderCam->resolution.y;
dim3 fullBlocksPerGrid1d((int)ceil((float(renderCam->resolution.x)/float(tileSize))*(float(renderCam->resolution.y)/float(tileSize))));
//send image to GPU
glm::vec3* cudaimage = NULL;
cudaMalloc((void**)&cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
cudaMemcpy( cudaimage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyHostToDevice);
//package geometry and materials and sent to GPU
staticGeom* geomList = new staticGeom[numberOfGeoms];
for(int i=0; i<numberOfGeoms; i++){
staticGeom newStaticGeom;
newStaticGeom.type = geoms[i].type;
newStaticGeom.materialid = geoms[i].materialid;
newStaticGeom.translation = geoms[i].translations[frame];
newStaticGeom.rotation = geoms[i].rotations[frame];
newStaticGeom.scale = geoms[i].scales[frame];
newStaticGeom.transform = geoms[i].transforms[frame];
newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame];
geomList[i] = newStaticGeom;
}
staticGeom* cudageoms = NULL;
cudaMalloc((void**)&cudageoms, numberOfGeoms*sizeof(staticGeom));
cudaMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), cudaMemcpyHostToDevice);
material* cudamaterials = NULL;
cudaMalloc((void**)&cudamaterials, numberOfMaterials*sizeof(material));
cudaMemcpy( cudamaterials, materials, numberOfMaterials*sizeof(material), cudaMemcpyHostToDevice);
int numberOfPixels = renderCam->resolution.x*renderCam->resolution.y;
ray* cudarays = NULL;
cudaMalloc((void**)&cudarays, numberOfPixels*sizeof(ray));
//package camera
cameraData cam;
cam.resolution = renderCam->resolution;
cam.position = renderCam->positions[frame];
cam.view = renderCam->views[frame];
cam.up = renderCam->ups[frame];
cam.fov = renderCam->fov;
cam.focalLength = renderCam->focalLengths[frame];
//clear image
if (cameraMoved)
clearImage<<<fullBlocksPerGrid2d, threadsPerBlock2d>>>(renderCam->resolution,cudaimage);
if (numberOfGeoms != numberOfCubes+numberOfSpheres){
std::cout<<"ERROR numberOfGeoms != numberOfCubes+numberOfSpheres"<<std::endl;
std::cout<<numberOfGeoms<<", "<<numberOfCubes<<", "<<numberOfSpheres<<std::endl;
}
//initial pool of rays
initializeRays<<<fullBlocksPerGrid2d, threadsPerBlock2d>>>(renderCam->resolution, (float)iterations, cam, cudarays);
//intialize the alive array
int* cudaalive = NULL;
cudaMalloc((void**)&cudaalive, numberOfPixels*sizeof(int));
resetAliveConditionArray<<<fullBlocksPerGrid1d, threadsPerBlock1d>>>( cudaalive);
int* cudatemp = NULL;
cudaMalloc((void**)&cudatemp, numberOfPixels*sizeof(int));
ray* cudaraysTemp = NULL;
cudaMalloc((void**)&cudaraysTemp, numberOfPixels*sizeof(ray));
int numRays = renderCam->resolution.x*renderCam->resolution.y;
//kernel launches
for (int i = 0; i < traceDepth && numRays > 0; i++){
raytraceRay<<<fullBlocksPerGrid1d, threadsPerBlock1d>>>(cudarays, renderCam->resolution, (float)iterations, cam, traceDepth,
cudaimage, cudageoms, numberOfGeoms, numberOfCubes, numberOfSpheres, cudamaterials, numberOfMaterials, i, cudaalive, numRays);
int log2n = (int)ceil(log(float(numRays)) / log(2.0f));
for (int d = 1; d <= log2n; d++){
scan<<<fullBlocksPerGrid1d, threadsPerBlock1d>>>( cudaalive, cudatemp, d); //scan
int* temp = cudaalive;
cudaalive = cudatemp;
cudatemp = temp;
}
int numAliveRaysCPU = 0;
//cudaalive now has the summed corresponding new indices for the alive rays
cudaMemcpy(&numAliveRaysCPU, &cudaalive[numRays-1], sizeof(int), cudaMemcpyDeviceToHost);
streamCompact<<<fullBlocksPerGrid1d, threadsPerBlock1d>>>( cudaalive, cudarays, cudaraysTemp, numRays);
ray* tempR = cudarays;
cudarays = cudaraysTemp;
cudaraysTemp = tempR;
resetAliveConditionArray<<<fullBlocksPerGrid1d, threadsPerBlock1d>>>( cudaalive);
numRays = numAliveRaysCPU;
fullBlocksPerGrid1d = dim3((int)ceil((float(numRays)/float(tileSize*tileSize))));
}
sendImageToPBO<<<fullBlocksPerGrid2d, threadsPerBlock2d>>>(PBOpos, renderCam->resolution, cudaimage, (float)iterations);
//retrieve image from GPU
cudaMemcpy( renderCam->image, cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyDeviceToHost);
//free up stuff, or else we'll leak memory like a madman
cudaFree( cudaimage );
cudaFree( cudageoms );
cudaFree( cudarays );
cudaFree(cudaalive);
cudaFree(cudatemp);
cudaFree(cudaraysTemp);
delete geomList;
// make certain the kernel has completed
cudaThreadSynchronize();
checkCUDAError("Kernel failed!");
} |
63f090489f177c7b019d510ec2aea78e8e00213a.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2019 XGBoost contributors
*/
#include <memory>
#include <utility>
#include "../common/hist_util.h"
#include "ellpack_page.cuh"
#include "ellpack_page_source.h"
#include "sparse_page_source.h"
namespace xgboost {
namespace data {
size_t GetRowStride(DMatrix* dmat) {
if (dmat->IsDense()) return dmat->Info().num_col_;
size_t row_stride = 0;
for (const auto& batch : dmat->GetBatches<SparsePage>()) {
const auto& row_offset = batch.offset.ConstHostVector();
for (auto i = 1ull; i < row_offset.size(); i++) {
row_stride = ::max(
row_stride, static_cast<size_t>(row_offset[i] - row_offset[i - 1]));
}
}
return row_stride;
}
// Build the quantile sketch across the whole input data, then use the histogram cuts to compress
// each CSR page, and write the accumulated ELLPACK pages to disk.
EllpackPageSource::EllpackPageSource(DMatrix* dmat,
const std::string& cache_info,
const BatchParam& param) noexcept(false) {
cache_info_ = ParseCacheInfo(cache_info, kPageType_);
for (auto file : cache_info_.name_shards) {
CheckCacheFileExists(file);
}
if (param.gpu_page_size > 0) {
page_size_ = param.gpu_page_size;
}
monitor_.Init("ellpack_page_source");
dh::safe_cuda(hipSetDevice(param.gpu_id));
monitor_.StartCuda("Quantiles");
size_t row_stride = GetRowStride(dmat);
auto cuts = common::DeviceSketch(param.gpu_id, dmat, param.max_bin,
param.gpu_batch_nrows);
monitor_.StopCuda("Quantiles");
monitor_.StartCuda("WriteEllpackPages");
WriteEllpackPages(param.gpu_id, dmat, cuts, cache_info, row_stride);
monitor_.StopCuda("WriteEllpackPages");
external_prefetcher_.reset(
new ExternalMemoryPrefetcher<EllpackPage>(cache_info_));
}
// Compress each CSR page to ELLPACK, and write the accumulated pages to disk.
void EllpackPageSource::WriteEllpackPages(int device, DMatrix* dmat,
const common::HistogramCuts& cuts,
const std::string& cache_info,
size_t row_stride) const {
auto cinfo = ParseCacheInfo(cache_info, kPageType_);
const size_t extra_buffer_capacity = 6;
SparsePageWriter<EllpackPage> writer(cinfo.name_shards, cinfo.format_shards,
extra_buffer_capacity);
std::shared_ptr<EllpackPage> page;
SparsePage temp_host_page;
writer.Alloc(&page);
auto* impl = page->Impl();
size_t bytes_write = 0;
double tstart = dmlc::GetTime();
for (const auto& batch : dmat->GetBatches<SparsePage>()) {
temp_host_page.Push(batch);
size_t mem_cost_bytes =
EllpackPageImpl::MemCostBytes(temp_host_page.Size(), row_stride, cuts);
if (mem_cost_bytes >= page_size_) {
bytes_write += mem_cost_bytes;
*impl = EllpackPageImpl(device, cuts, temp_host_page, dmat->IsDense(),
row_stride);
writer.PushWrite(std::move(page));
writer.Alloc(&page);
impl = page->Impl();
temp_host_page.Clear();
double tdiff = dmlc::GetTime() - tstart;
LOG(INFO) << "Writing " << kPageType_ << " to " << cache_info << " in "
<< ((bytes_write >> 20UL) / tdiff) << " MB/s, "
<< (bytes_write >> 20UL) << " written";
}
}
if (temp_host_page.Size() != 0) {
*impl = EllpackPageImpl(device, cuts, temp_host_page, dmat->IsDense(),
row_stride);
writer.PushWrite(std::move(page));
}
}
} // namespace data
} // namespace xgboost
| 63f090489f177c7b019d510ec2aea78e8e00213a.cu | /*!
* Copyright 2019 XGBoost contributors
*/
#include <memory>
#include <utility>
#include "../common/hist_util.h"
#include "ellpack_page.cuh"
#include "ellpack_page_source.h"
#include "sparse_page_source.h"
namespace xgboost {
namespace data {
size_t GetRowStride(DMatrix* dmat) {
if (dmat->IsDense()) return dmat->Info().num_col_;
size_t row_stride = 0;
for (const auto& batch : dmat->GetBatches<SparsePage>()) {
const auto& row_offset = batch.offset.ConstHostVector();
for (auto i = 1ull; i < row_offset.size(); i++) {
row_stride = std::max(
row_stride, static_cast<size_t>(row_offset[i] - row_offset[i - 1]));
}
}
return row_stride;
}
// Build the quantile sketch across the whole input data, then use the histogram cuts to compress
// each CSR page, and write the accumulated ELLPACK pages to disk.
EllpackPageSource::EllpackPageSource(DMatrix* dmat,
const std::string& cache_info,
const BatchParam& param) noexcept(false) {
cache_info_ = ParseCacheInfo(cache_info, kPageType_);
for (auto file : cache_info_.name_shards) {
CheckCacheFileExists(file);
}
if (param.gpu_page_size > 0) {
page_size_ = param.gpu_page_size;
}
monitor_.Init("ellpack_page_source");
dh::safe_cuda(cudaSetDevice(param.gpu_id));
monitor_.StartCuda("Quantiles");
size_t row_stride = GetRowStride(dmat);
auto cuts = common::DeviceSketch(param.gpu_id, dmat, param.max_bin,
param.gpu_batch_nrows);
monitor_.StopCuda("Quantiles");
monitor_.StartCuda("WriteEllpackPages");
WriteEllpackPages(param.gpu_id, dmat, cuts, cache_info, row_stride);
monitor_.StopCuda("WriteEllpackPages");
external_prefetcher_.reset(
new ExternalMemoryPrefetcher<EllpackPage>(cache_info_));
}
// Compress each CSR page to ELLPACK, and write the accumulated pages to disk.
void EllpackPageSource::WriteEllpackPages(int device, DMatrix* dmat,
const common::HistogramCuts& cuts,
const std::string& cache_info,
size_t row_stride) const {
auto cinfo = ParseCacheInfo(cache_info, kPageType_);
const size_t extra_buffer_capacity = 6;
SparsePageWriter<EllpackPage> writer(cinfo.name_shards, cinfo.format_shards,
extra_buffer_capacity);
std::shared_ptr<EllpackPage> page;
SparsePage temp_host_page;
writer.Alloc(&page);
auto* impl = page->Impl();
size_t bytes_write = 0;
double tstart = dmlc::GetTime();
for (const auto& batch : dmat->GetBatches<SparsePage>()) {
temp_host_page.Push(batch);
size_t mem_cost_bytes =
EllpackPageImpl::MemCostBytes(temp_host_page.Size(), row_stride, cuts);
if (mem_cost_bytes >= page_size_) {
bytes_write += mem_cost_bytes;
*impl = EllpackPageImpl(device, cuts, temp_host_page, dmat->IsDense(),
row_stride);
writer.PushWrite(std::move(page));
writer.Alloc(&page);
impl = page->Impl();
temp_host_page.Clear();
double tdiff = dmlc::GetTime() - tstart;
LOG(INFO) << "Writing " << kPageType_ << " to " << cache_info << " in "
<< ((bytes_write >> 20UL) / tdiff) << " MB/s, "
<< (bytes_write >> 20UL) << " written";
}
}
if (temp_host_page.Size() != 0) {
*impl = EllpackPageImpl(device, cuts, temp_host_page, dmat->IsDense(),
row_stride);
writer.PushWrite(std::move(page));
}
}
} // namespace data
} // namespace xgboost
|
07bc909b555d50d6b825954206b0381994eb2b51.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.width + col)
#include <stdio.h>
typedef struct {
int width;
int height;
float* elements;
} Matrix;
// Thread block size
#define BLOCK_SIZE 16
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C){
// Load A and B to device memory
Matrix d_A;
d_A.width = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
hipMalloc(&d_A.elements, size);
hipMemcpy(d_A.elements, A.elements, size,
hipMemcpyHostToDevice);
Matrix d_B;
d_B.width = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
hipMalloc(&d_B.elements, size);
hipMemcpy(d_B.elements, B.elements, size,
hipMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
hipMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C);
// Read C from device memory
hipMemcpy(C.elements, d_C.elements, size,
hipMemcpyDeviceToHost);
// Free device memory
hipFree(d_A.elements);
hipFree(d_B.elements);
hipFree(d_C.elements);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < A.width; ++e)
Cvalue += A.elements[row * A.width + e]
* B.elements[e * B.width + col];
C.elements[row * C.width + col] = Cvalue;
}
int main(int argc, char* argv[]){
Matrix A, B, C;
int a1, a2, b1, b2;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Read some values from the commandline
a1 = atoi(argv[1]); /* Height of A */
a2 = atoi(argv[2]); /* Width of A */
b1 = a2; /* Height of B */
b2 = atoi(argv[3]); /* Width of B */
A.height = a1;
A.width = a2;
A.elements = (float*)malloc(A.width * A.height * sizeof(float));
B.height = b1;
B.width = b2;
B.elements = (float*)malloc(B.width * B.height * sizeof(float));
C.height = A.height;
C.width = B.width;
C.elements = (float*)malloc(C.width * C.height * sizeof(float));
for(int i = 0; i < A.height; i++)
for(int j = 0; j < A.width; j++)
A.elements[i*A.width + j] = (float)(rand() % 3);
for(int i = 0; i < B.height; i++)
for(int j = 0; j < B.width; j++)
B.elements[i*B.width + j] = (float)(rand() % 2);
hipEventRecord(start);
MatMul(A, B, C);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("%d x %d Bloque:%d -- time: %f\n",a1,a2,BLOCK_SIZE,milliseconds);
hipEventDestroy(start);
hipEventDestroy(stop);
} | 07bc909b555d50d6b825954206b0381994eb2b51.cu | // Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.width + col)
#include <stdio.h>
typedef struct {
int width;
int height;
float* elements;
} Matrix;
// Thread block size
#define BLOCK_SIZE 16
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C){
// Load A and B to device memory
Matrix d_A;
d_A.width = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size,
cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size,
cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
// Read C from device memory
cudaMemcpy(C.elements, d_C.elements, size,
cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < A.width; ++e)
Cvalue += A.elements[row * A.width + e]
* B.elements[e * B.width + col];
C.elements[row * C.width + col] = Cvalue;
}
int main(int argc, char* argv[]){
Matrix A, B, C;
int a1, a2, b1, b2;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Read some values from the commandline
a1 = atoi(argv[1]); /* Height of A */
a2 = atoi(argv[2]); /* Width of A */
b1 = a2; /* Height of B */
b2 = atoi(argv[3]); /* Width of B */
A.height = a1;
A.width = a2;
A.elements = (float*)malloc(A.width * A.height * sizeof(float));
B.height = b1;
B.width = b2;
B.elements = (float*)malloc(B.width * B.height * sizeof(float));
C.height = A.height;
C.width = B.width;
C.elements = (float*)malloc(C.width * C.height * sizeof(float));
for(int i = 0; i < A.height; i++)
for(int j = 0; j < A.width; j++)
A.elements[i*A.width + j] = (float)(rand() % 3);
for(int i = 0; i < B.height; i++)
for(int j = 0; j < B.width; j++)
B.elements[i*B.width + j] = (float)(rand() % 2);
cudaEventRecord(start);
MatMul(A, B, C);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("%d x %d Bloque:%d -- time: %f\n",a1,a2,BLOCK_SIZE,milliseconds);
cudaEventDestroy(start);
cudaEventDestroy(stop);
} |
3c5ca7eca18f5203571c459ca35c474af4a2fab2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <string.h>
#include <hip/hip_runtime.h>
#define NUM_CYCLES 250
__global__ void add( int *a, int *b, int *c ) {
int tid = blockIdx.x*blockDim.x+threadIdx.x; // handle the data at this index
c[tid] = a[tid] + b[tid];
}
int main( void ) {
int deviceCount, device;
int blocks,threads,n;
double time_s;
long start_time,end_time;
struct hipDeviceProp_t properties;
int *a, *b, *c;
struct timeval start,stop;
int *dev_a, *dev_b, *dev_c;
hipError_t cudaResultCode = hipGetDeviceCount(&deviceCount);
if (cudaResultCode != hipSuccess)
deviceCount = 0;
/* machines with no GPUs can still report one emulation device */
for (device = 0; device < deviceCount; ++device) {
hipGetDeviceProperties(&properties, device);
if (properties.major != 9999) /* 9999 means emulation only */
if (device==0)
{
printf("multiProcessorCount %d\n",properties.multiProcessorCount);
printf("maxThreadsPerMultiProcessor %d\n",properties.maxThreadsPerMultiProcessor);
blocks=properties.multiProcessorCount;
threads=properties.maxThreadsPerMultiProcessor;
n=properties.multiProcessorCount * properties.maxThreadsPerMultiProcessor;
}
}
a=(int*)malloc(n * sizeof(int));
b=(int*)malloc(n * sizeof(int));
c=(int*)malloc(n * sizeof(int));
// allocate the memory on the GPU
hipMalloc( (void**)&dev_a, n * sizeof(int) );
hipMalloc( (void**)&dev_b, n * sizeof(int) );
hipMalloc( (void**)&dev_c, n * sizeof(int) );
// fill the arrays 'a' and 'b' on the CPU
for (int i=0; i<n; i++) {
a[i] = -i;
b[i] = i * i;
}
hipMemcpy( dev_a, a, n * sizeof(int),hipMemcpyHostToDevice );
hipMemcpy( dev_b, b, n * sizeof(int),hipMemcpyHostToDevice );
gettimeofday(&start,NULL);
int l;
start_time=start.tv_sec*1000000 + start.tv_usec;//get start time
for(l=0;l<NUM_CYCLES;l++)
hipLaunchKernelGGL(( add), dim3(blocks),dim3(threads), 0, 0, dev_a, dev_b, dev_c );
gettimeofday(&stop,NULL);
end_time=stop.tv_sec*1000000 + stop.tv_usec;//get end time
// copy the array 'c' back from the GPU to the CPU
hipMemcpy( c, dev_c, n * sizeof(int),hipMemcpyDeviceToHost );
// display the results
// for (int i=0; i<N; i++) {
// printf( "%d + %d = %d\n", a[i], b[i], c[i] );
// }
// free the memory allocated on the GPU
time_s=end_time-start_time;
printf("Time taken: %lf",time_s);
printf("GFLOPS: %lf",(double)(NUM_CYCLES*n*3)/(time_s*1000000000));
hipFree( dev_a );
hipFree( dev_b );
hipFree( dev_c );
return 0;
}
| 3c5ca7eca18f5203571c459ca35c474af4a2fab2.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <string.h>
#include <cuda.h>
#define NUM_CYCLES 250
__global__ void add( int *a, int *b, int *c ) {
int tid = blockIdx.x*blockDim.x+threadIdx.x; // handle the data at this index
c[tid] = a[tid] + b[tid];
}
int main( void ) {
int deviceCount, device;
int blocks,threads,n;
double time_s;
long start_time,end_time;
struct cudaDeviceProp properties;
int *a, *b, *c;
struct timeval start,stop;
int *dev_a, *dev_b, *dev_c;
cudaError_t cudaResultCode = cudaGetDeviceCount(&deviceCount);
if (cudaResultCode != cudaSuccess)
deviceCount = 0;
/* machines with no GPUs can still report one emulation device */
for (device = 0; device < deviceCount; ++device) {
cudaGetDeviceProperties(&properties, device);
if (properties.major != 9999) /* 9999 means emulation only */
if (device==0)
{
printf("multiProcessorCount %d\n",properties.multiProcessorCount);
printf("maxThreadsPerMultiProcessor %d\n",properties.maxThreadsPerMultiProcessor);
blocks=properties.multiProcessorCount;
threads=properties.maxThreadsPerMultiProcessor;
n=properties.multiProcessorCount * properties.maxThreadsPerMultiProcessor;
}
}
a=(int*)malloc(n * sizeof(int));
b=(int*)malloc(n * sizeof(int));
c=(int*)malloc(n * sizeof(int));
// allocate the memory on the GPU
cudaMalloc( (void**)&dev_a, n * sizeof(int) );
cudaMalloc( (void**)&dev_b, n * sizeof(int) );
cudaMalloc( (void**)&dev_c, n * sizeof(int) );
// fill the arrays 'a' and 'b' on the CPU
for (int i=0; i<n; i++) {
a[i] = -i;
b[i] = i * i;
}
cudaMemcpy( dev_a, a, n * sizeof(int),cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, n * sizeof(int),cudaMemcpyHostToDevice );
gettimeofday(&start,NULL);
int l;
start_time=start.tv_sec*1000000 + start.tv_usec;//get start time
for(l=0;l<NUM_CYCLES;l++)
add<<<blocks,threads>>>( dev_a, dev_b, dev_c );
gettimeofday(&stop,NULL);
end_time=stop.tv_sec*1000000 + stop.tv_usec;//get end time
// copy the array 'c' back from the GPU to the CPU
cudaMemcpy( c, dev_c, n * sizeof(int),cudaMemcpyDeviceToHost );
// display the results
// for (int i=0; i<N; i++) {
// printf( "%d + %d = %d\n", a[i], b[i], c[i] );
// }
// free the memory allocated on the GPU
time_s=end_time-start_time;
printf("Time taken: %lf",time_s);
printf("GFLOPS: %lf",(double)(NUM_CYCLES*n*3)/(time_s*1000000000));
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
return 0;
}
|
b5fb73c346e7ba61d4c17056b66d0488b7df801b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kAddRowVector.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *mat = NULL;
hipMalloc(&mat, XSIZE*YSIZE);
float *vec = NULL;
hipMalloc(&vec, XSIZE*YSIZE);
float *tgtMat = NULL;
hipMalloc(&tgtMat, XSIZE*YSIZE);
unsigned int width = 1;
unsigned int height = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kAddRowVector), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,vec,tgtMat,width,height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kAddRowVector), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,vec,tgtMat,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kAddRowVector), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,vec,tgtMat,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b5fb73c346e7ba61d4c17056b66d0488b7df801b.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kAddRowVector.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *mat = NULL;
cudaMalloc(&mat, XSIZE*YSIZE);
float *vec = NULL;
cudaMalloc(&vec, XSIZE*YSIZE);
float *tgtMat = NULL;
cudaMalloc(&tgtMat, XSIZE*YSIZE);
unsigned int width = 1;
unsigned int height = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kAddRowVector<<<gridBlock,threadBlock>>>(mat,vec,tgtMat,width,height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kAddRowVector<<<gridBlock,threadBlock>>>(mat,vec,tgtMat,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kAddRowVector<<<gridBlock,threadBlock>>>(mat,vec,tgtMat,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
33e3f1c66bd7324e8d5b04ca421ee89ff45cacce.hip | // !!! This is a file automatically generated by hipify!!!
#include "knn_gpgpu.h"
#include <stdio.h>
#include <helper_cuda.h>
void writePoints(char *file_path, int n, struct Point *points)
{
printf("writing points...\n");
FILE *file = fopen(file_path, "w");
if (file == NULL)
{
fputs ("File error\n", stderr);
exit (1);
}
for (int i = 0; i < n; ++i)
{
fwrite(&points[i].p, sizeof(float), 3, file);
}
fclose(file);
}
void readPoints(const char *file_path, int n, struct Point *points)
{
printf("Reading points...\n");
FILE *file = fopen(file_path, "rb");
if (file == NULL)
{
fputs ("File error\n", stderr);
exit (1);
}
for (int i = 0; i < n; ++i)
{
fread(&points[i].p, sizeof(float), 3, file);
}
fclose(file);
}
void populatePoints(struct Point *points, int n)
{
srand((int)time(NULL));
for (int i = 0; i < n; ++i)
{
struct Point t;
t.p[0] = (float) rand();
t.p[1] = (float) rand();
t.p[2] = (float) rand();
points[i] = t;
}
}
int main(int argc, char const *argv[])
{
int n, nu, ni = 1024,
step = 250000,
k = 1,
no_of_runs = 1000;
bool from_file = 0,
variable_k = 0;
n = nu = ni;
if (argc == 2)
{
nu = ni = atoi(argv[1]);
printf("Running kd-search-all with n = %d, k = %d\n", nu, k);
}
else if (argc == 3)
{
nu = ni = atoi(argv[1]);
from_file = 1;
printf("Running kd-search-all from file '%s' with n = %d\n", argv[2], nu);
}
else if (argc == 4)
{
nu = atoi(argv[1]);
ni = atoi(argv[2]);
step = atoi(argv[3]);
printf("Running kd-search-all from n = %d to n = %d with step = %d\n", nu, ni, step);
}
else if (argc == 5)
{
nu = atoi(argv[1]);
ni = atoi(argv[2]);
step = atoi(argv[3]);
k = atoi(argv[4]);
variable_k = 1;
printf("Running kd-search-%d from n = %d to n = %d with step = %d, k = %d\n", no_of_runs, nu, ni, step, k);
}
else
{
printf("Running kd-search-all with n = %d\n", nu);
}
for (n = nu; n <= ni ; n += step)
{
struct Node *tree = (struct Node *) malloc(n * sizeof(Node));
struct Point *points = (struct Point *) malloc(n * sizeof(Point));
int *result = (int *) malloc(n * k * sizeof(int));
if (from_file)
{
readPoints(argv[2], n, points);
}
else
{
populatePoints(points, n);
printf("*Alok - Finished Reading Points\n");
}
hipEvent_t start, stop;
float elapsed_time_build = 0;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipEventRecord(start, 0));
buildKdTree(points, n, tree);
printf("*Alok - Finished Building Tree\n");
checkCudaErrors(hipEventRecord(stop, 0));
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time_build, start, stop);
hipDeviceReset();
float elapsed_time_search = 0;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipEventRecord(start, 0));
if (variable_k)
{
cuQueryAll(points, tree, no_of_runs, n, k, result);
}
else
{
printf("*Alok - Querying all...\n");
cuQueryAll(points, tree, n, n, k, result);
printf("*Alok - Querying all done\n");
}
checkCudaErrors(hipEventRecord(stop, 0));
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time_search, start, stop);
printf("kd-search-all, Build Time = %.5f ms, Query Time = %.5f ms, Total time = %.5f ms, Size = %u Elements, NumDevsUsed = %d\n",
elapsed_time_build, elapsed_time_search, elapsed_time_build + elapsed_time_search, n, 1);
free(points);
free(result);
free(tree);
hipDeviceReset();
}
return 0;
}
| 33e3f1c66bd7324e8d5b04ca421ee89ff45cacce.cu | #include "knn_gpgpu.h"
#include <stdio.h>
#include <helper_cuda.h>
void writePoints(char *file_path, int n, struct Point *points)
{
printf("writing points...\n");
FILE *file = fopen(file_path, "w");
if (file == NULL)
{
fputs ("File error\n", stderr);
exit (1);
}
for (int i = 0; i < n; ++i)
{
fwrite(&points[i].p, sizeof(float), 3, file);
}
fclose(file);
}
void readPoints(const char *file_path, int n, struct Point *points)
{
printf("Reading points...\n");
FILE *file = fopen(file_path, "rb");
if (file == NULL)
{
fputs ("File error\n", stderr);
exit (1);
}
for (int i = 0; i < n; ++i)
{
fread(&points[i].p, sizeof(float), 3, file);
}
fclose(file);
}
void populatePoints(struct Point *points, int n)
{
srand((int)time(NULL));
for (int i = 0; i < n; ++i)
{
struct Point t;
t.p[0] = (float) rand();
t.p[1] = (float) rand();
t.p[2] = (float) rand();
points[i] = t;
}
}
int main(int argc, char const *argv[])
{
int n, nu, ni = 1024,
step = 250000,
k = 1,
no_of_runs = 1000;
bool from_file = 0,
variable_k = 0;
n = nu = ni;
if (argc == 2)
{
nu = ni = atoi(argv[1]);
printf("Running kd-search-all with n = %d, k = %d\n", nu, k);
}
else if (argc == 3)
{
nu = ni = atoi(argv[1]);
from_file = 1;
printf("Running kd-search-all from file '%s' with n = %d\n", argv[2], nu);
}
else if (argc == 4)
{
nu = atoi(argv[1]);
ni = atoi(argv[2]);
step = atoi(argv[3]);
printf("Running kd-search-all from n = %d to n = %d with step = %d\n", nu, ni, step);
}
else if (argc == 5)
{
nu = atoi(argv[1]);
ni = atoi(argv[2]);
step = atoi(argv[3]);
k = atoi(argv[4]);
variable_k = 1;
printf("Running kd-search-%d from n = %d to n = %d with step = %d, k = %d\n", no_of_runs, nu, ni, step, k);
}
else
{
printf("Running kd-search-all with n = %d\n", nu);
}
for (n = nu; n <= ni ; n += step)
{
struct Node *tree = (struct Node *) malloc(n * sizeof(Node));
struct Point *points = (struct Point *) malloc(n * sizeof(Point));
int *result = (int *) malloc(n * k * sizeof(int));
if (from_file)
{
readPoints(argv[2], n, points);
}
else
{
populatePoints(points, n);
printf("*Alok - Finished Reading Points\n");
}
cudaEvent_t start, stop;
float elapsed_time_build = 0;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaEventRecord(start, 0));
buildKdTree(points, n, tree);
printf("*Alok - Finished Building Tree\n");
checkCudaErrors(cudaEventRecord(stop, 0));
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time_build, start, stop);
cudaDeviceReset();
float elapsed_time_search = 0;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaEventRecord(start, 0));
if (variable_k)
{
cuQueryAll(points, tree, no_of_runs, n, k, result);
}
else
{
printf("*Alok - Querying all...\n");
cuQueryAll(points, tree, n, n, k, result);
printf("*Alok - Querying all done\n");
}
checkCudaErrors(cudaEventRecord(stop, 0));
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time_search, start, stop);
printf("kd-search-all, Build Time = %.5f ms, Query Time = %.5f ms, Total time = %.5f ms, Size = %u Elements, NumDevsUsed = %d\n",
elapsed_time_build, elapsed_time_search, elapsed_time_build + elapsed_time_search, n, 1);
free(points);
free(result);
free(tree);
cudaDeviceReset();
}
return 0;
}
|
60896e1da5168675888a31113913e98136f77aae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CSI_utils.h"
#include <algorithm>
#include "cudaDeviceManager.h"
#include "complext.h"
#include <math_constants.h>
#include <stdio.h>
#include "cuNDArray_math.h"
#include "cuNDArray_fileio.h"
#include <numeric>
using namespace Gadgetron;
template<class T> static __global__ void dft_kernel(complext<T>* __restrict__ kspace, const complext<T>* __restrict__ tspace, T* __restrict__ frequencies, unsigned int spiral_length, unsigned int echoes, unsigned int nfreqs,T dte, T dtt){
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if (idx < spiral_length*nfreqs ){
complext<T> result = 0;
T frequency = frequencies[idx/spiral_length];
T time_offset = dtt*(idx%spiral_length);
unsigned int kpoint = idx%spiral_length;
for (unsigned int i =0; i < echoes; i++){
result += exp(complext<T>(0,-frequency*2*CUDART_PI_F*(dte*i+time_offset)))*tspace[kpoint+i*spiral_length];
}
kspace[idx] = result;
}
}
template<class T> static __global__ void dftH_kernel(const complext<T>* __restrict__ kspace, complext<T>* __restrict__ tspace, T* __restrict__ frequencies, unsigned int spiral_length, unsigned int echoes, unsigned int nfreqs,T dte, T dtt){
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if (idx < spiral_length*echoes ){
complext<T> result = 0;
unsigned int kpoint = idx%spiral_length;
T timeshift = dte*(idx/spiral_length)+dtt*kpoint;
for (unsigned int i =0; i < nfreqs; i++){
result += exp(complext<T>(0,frequencies[i]*2*CUDART_PI_F*timeshift))*kspace[kpoint+i*spiral_length];
}
tspace[idx] = result;
}
}
template<class T>
void Gadgetron::CSI_dft(cuNDArray<complext<T> >* kspace,
cuNDArray<complext<T> >* tspace, cuNDArray<T>* frequencies, T dtt, T dte) {
size_t elements = kspace->get_size(0)*kspace->get_size(1);
size_t batches = kspace->get_number_of_elements()/elements;
size_t t_elements = tspace->get_size(0)*tspace->get_size(1);
int threadsPerBlock = std::min<int>(elements,cudaDeviceManager::Instance()->max_blockdim());
dim3 dimBlock(threadsPerBlock);
int totalBlocksPerGrid = (elements+threadsPerBlock-1)/threadsPerBlock;
dim3 dimGrid(totalBlocksPerGrid);
std::vector<size_t> dims = *tspace->get_dimensions();
if (totalBlocksPerGrid > cudaDeviceManager::Instance()->max_griddim())
throw std::runtime_error("CSIOperator: Input dimensions too large");
hipFuncSetCacheConfig(dft_kernel<T>,hipFuncCachePreferL1);
for (int i = 0; i< batches; i++){
//size_t batchSize = dimGrid.x*dimBlock.x;
// Invoke kernel
hipLaunchKernelGGL(( dft_kernel<T>), dim3(dimGrid), dim3(dimBlock), 0, 0, kspace->get_data_ptr()+i*elements,tspace->get_data_ptr()+i*t_elements,frequencies->data(),dims[0],dims[1], frequencies->size(),dte,dtt);
CHECK_FOR_CUDA_ERROR();
hipDeviceSynchronize();
}
*kspace /= T(dims[1]);
}
template<class T>
void Gadgetron::CSI_dftH(cuNDArray<complext<T> >* kspace,
cuNDArray<complext<T> >* tspace, cuNDArray<T>* frequencies, T dtt, T dte) {
size_t k_elements = kspace->get_size(0)*kspace->get_size(1);
size_t elements = tspace->get_size(0)*tspace->get_size(1);
size_t batches = tspace->get_number_of_elements()/elements;
int threadsPerBlock = std::min<int>(elements,cudaDeviceManager::Instance()->max_blockdim());
dim3 dimBlock(threadsPerBlock);
int totalBlocksPerGrid = (elements+threadsPerBlock-1)/threadsPerBlock;
dim3 dimGrid(totalBlocksPerGrid);
if (totalBlocksPerGrid > cudaDeviceManager::Instance()->max_griddim())
throw std::runtime_error("CSIOperator: Input dimensions too large");
//size_t batchSize = dimGrid.x*dimBlock.x;
hipFuncSetCacheConfig(dftH_kernel<T>,hipFuncCachePreferL1);
std::vector<size_t> dims = *tspace->get_dimensions();
for (int i =0; i< batches; i++){
// Invoke kernel
hipLaunchKernelGGL(( dftH_kernel<T>), dim3(dimGrid), dim3(dimBlock), 0, 0, kspace->get_data_ptr()+i*k_elements,tspace->get_data_ptr()+i*elements,frequencies->data(),dims[0],dims[1], frequencies->size(),dte,dtt);
CHECK_FOR_CUDA_ERROR();
}
*tspace /= T(dims[1]);
}
template<class T>
boost::shared_ptr<cuNDArray<complext<T> > > Gadgetron::calculate_frequency_calibration(cuNDArray<complext<T> >* time_track, cuNDArray<T>* frequencies,cuNDArray<complext<T> > * csm,T dtt,T dte){
std::vector<size_t> out_dims;
out_dims.push_back(frequencies->size());
out_dims.push_back(1);
cuNDArray<complext<T> >* time2 = time_track;
if (csm){
std::vector<size_t> csm_dims = *csm->get_dimensions();
int coils = csm_dims.back();
csm_dims.pop_back();
std::vector<size_t> time_dims = *time_track->get_dimensions();
if (time_dims.back() != coils)
throw std::runtime_error("Number of coils in time data does not match number of coils in CSM");
time_dims.back() = time_dims.front();
time_dims.front() = 1;
time_dims.push_back(coils);
time2 = new cuNDArray<complext<T> >(time_dims, time_track->get_data_ptr());
out_dims.push_back(coils);
}
boost::shared_ptr<cuNDArray<complext<T > > > result(new cuNDArray<complext<T> >(out_dims));
clear(result.get());
CSI_dft(result.get(),time2,frequencies,float(0),dtt);
if (csm)
delete time2;
return result;
}
template<class T> static __global__ void mult_freq_kernel(complext<T>* in_out, complext<T>* freqs, bool conjugate){
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if (conjugate)
in_out[idx] *= conj(freqs[blockIdx.y]);
else
in_out[idx] *= freqs[blockIdx.y];
}
template< class T>
void Gadgetron::mult_freq(cuNDArray<complext<T> >* in_out, cuNDArray<complext<T> >* freqs, bool conjugate){
std::vector<size_t> dims = *in_out->get_dimensions();
if (dims.back() != freqs->get_number_of_elements()){
throw std::runtime_error("Input image dimensions do not match frequencies");
}
size_t elements = in_out->get_number_of_elements()/dims.back();
int threadsPerBlock = std::min<size_t>(elements,cudaDeviceManager::Instance()->max_blockdim());
dim3 dimBlock(threadsPerBlock);
int totalBlocksPerGrid = (elements+threadsPerBlock-1)/threadsPerBlock;
dim3 dimGrid(totalBlocksPerGrid,dims.back());
hipLaunchKernelGGL(( mult_freq_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, in_out->get_data_ptr(),freqs->get_data_ptr(),conjugate);
}
template EXPORTHYPER void Gadgetron::CSI_dft<float>(cuNDArray<float_complext>* kspace,cuNDArray<float_complext>* tspace, cuNDArray<float>* frequencies, float dtt, float dte);
template EXPORTHYPER void Gadgetron::CSI_dftH<float>(cuNDArray<float_complext>* kspace,cuNDArray<float_complext>* tspace, cuNDArray<float>* frequencies, float dtt, float dte);
template EXPORTHYPER boost::shared_ptr<cuNDArray<float_complext> > Gadgetron::calculate_frequency_calibration<float>(cuNDArray<float_complext>* time_track, cuNDArray<float>* frequencies,cuNDArray<float_complext> * csm,float dtt,float dte);
template EXPORTHYPER void Gadgetron::mult_freq<float>(cuNDArray<complext<float> >* in_out, cuNDArray<complext<float> >* freqs, bool conjugate);
| 60896e1da5168675888a31113913e98136f77aae.cu | #include "CSI_utils.h"
#include <algorithm>
#include "cudaDeviceManager.h"
#include "complext.h"
#include <math_constants.h>
#include <stdio.h>
#include "cuNDArray_math.h"
#include "cuNDArray_fileio.h"
#include <numeric>
using namespace Gadgetron;
template<class T> static __global__ void dft_kernel(complext<T>* __restrict__ kspace, const complext<T>* __restrict__ tspace, T* __restrict__ frequencies, unsigned int spiral_length, unsigned int echoes, unsigned int nfreqs,T dte, T dtt){
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if (idx < spiral_length*nfreqs ){
complext<T> result = 0;
T frequency = frequencies[idx/spiral_length];
T time_offset = dtt*(idx%spiral_length);
unsigned int kpoint = idx%spiral_length;
for (unsigned int i =0; i < echoes; i++){
result += exp(complext<T>(0,-frequency*2*CUDART_PI_F*(dte*i+time_offset)))*tspace[kpoint+i*spiral_length];
}
kspace[idx] = result;
}
}
template<class T> static __global__ void dftH_kernel(const complext<T>* __restrict__ kspace, complext<T>* __restrict__ tspace, T* __restrict__ frequencies, unsigned int spiral_length, unsigned int echoes, unsigned int nfreqs,T dte, T dtt){
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if (idx < spiral_length*echoes ){
complext<T> result = 0;
unsigned int kpoint = idx%spiral_length;
T timeshift = dte*(idx/spiral_length)+dtt*kpoint;
for (unsigned int i =0; i < nfreqs; i++){
result += exp(complext<T>(0,frequencies[i]*2*CUDART_PI_F*timeshift))*kspace[kpoint+i*spiral_length];
}
tspace[idx] = result;
}
}
template<class T>
void Gadgetron::CSI_dft(cuNDArray<complext<T> >* kspace,
cuNDArray<complext<T> >* tspace, cuNDArray<T>* frequencies, T dtt, T dte) {
size_t elements = kspace->get_size(0)*kspace->get_size(1);
size_t batches = kspace->get_number_of_elements()/elements;
size_t t_elements = tspace->get_size(0)*tspace->get_size(1);
int threadsPerBlock = std::min<int>(elements,cudaDeviceManager::Instance()->max_blockdim());
dim3 dimBlock(threadsPerBlock);
int totalBlocksPerGrid = (elements+threadsPerBlock-1)/threadsPerBlock;
dim3 dimGrid(totalBlocksPerGrid);
std::vector<size_t> dims = *tspace->get_dimensions();
if (totalBlocksPerGrid > cudaDeviceManager::Instance()->max_griddim())
throw std::runtime_error("CSIOperator: Input dimensions too large");
cudaFuncSetCacheConfig(dft_kernel<T>,cudaFuncCachePreferL1);
for (int i = 0; i< batches; i++){
//size_t batchSize = dimGrid.x*dimBlock.x;
// Invoke kernel
dft_kernel<T><<<dimGrid, dimBlock>>>(kspace->get_data_ptr()+i*elements,tspace->get_data_ptr()+i*t_elements,frequencies->data(),dims[0],dims[1], frequencies->size(),dte,dtt);
CHECK_FOR_CUDA_ERROR();
cudaThreadSynchronize();
}
*kspace /= T(dims[1]);
}
template<class T>
void Gadgetron::CSI_dftH(cuNDArray<complext<T> >* kspace,
cuNDArray<complext<T> >* tspace, cuNDArray<T>* frequencies, T dtt, T dte) {
size_t k_elements = kspace->get_size(0)*kspace->get_size(1);
size_t elements = tspace->get_size(0)*tspace->get_size(1);
size_t batches = tspace->get_number_of_elements()/elements;
int threadsPerBlock = std::min<int>(elements,cudaDeviceManager::Instance()->max_blockdim());
dim3 dimBlock(threadsPerBlock);
int totalBlocksPerGrid = (elements+threadsPerBlock-1)/threadsPerBlock;
dim3 dimGrid(totalBlocksPerGrid);
if (totalBlocksPerGrid > cudaDeviceManager::Instance()->max_griddim())
throw std::runtime_error("CSIOperator: Input dimensions too large");
//size_t batchSize = dimGrid.x*dimBlock.x;
cudaFuncSetCacheConfig(dftH_kernel<T>,cudaFuncCachePreferL1);
std::vector<size_t> dims = *tspace->get_dimensions();
for (int i =0; i< batches; i++){
// Invoke kernel
dftH_kernel<T><<<dimGrid, dimBlock>>>(kspace->get_data_ptr()+i*k_elements,tspace->get_data_ptr()+i*elements,frequencies->data(),dims[0],dims[1], frequencies->size(),dte,dtt);
CHECK_FOR_CUDA_ERROR();
}
*tspace /= T(dims[1]);
}
template<class T>
boost::shared_ptr<cuNDArray<complext<T> > > Gadgetron::calculate_frequency_calibration(cuNDArray<complext<T> >* time_track, cuNDArray<T>* frequencies,cuNDArray<complext<T> > * csm,T dtt,T dte){
std::vector<size_t> out_dims;
out_dims.push_back(frequencies->size());
out_dims.push_back(1);
cuNDArray<complext<T> >* time2 = time_track;
if (csm){
std::vector<size_t> csm_dims = *csm->get_dimensions();
int coils = csm_dims.back();
csm_dims.pop_back();
std::vector<size_t> time_dims = *time_track->get_dimensions();
if (time_dims.back() != coils)
throw std::runtime_error("Number of coils in time data does not match number of coils in CSM");
time_dims.back() = time_dims.front();
time_dims.front() = 1;
time_dims.push_back(coils);
time2 = new cuNDArray<complext<T> >(time_dims, time_track->get_data_ptr());
out_dims.push_back(coils);
}
boost::shared_ptr<cuNDArray<complext<T > > > result(new cuNDArray<complext<T> >(out_dims));
clear(result.get());
CSI_dft(result.get(),time2,frequencies,float(0),dtt);
if (csm)
delete time2;
return result;
}
template<class T> static __global__ void mult_freq_kernel(complext<T>* in_out, complext<T>* freqs, bool conjugate){
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if (conjugate)
in_out[idx] *= conj(freqs[blockIdx.y]);
else
in_out[idx] *= freqs[blockIdx.y];
}
template< class T>
void Gadgetron::mult_freq(cuNDArray<complext<T> >* in_out, cuNDArray<complext<T> >* freqs, bool conjugate){
std::vector<size_t> dims = *in_out->get_dimensions();
if (dims.back() != freqs->get_number_of_elements()){
throw std::runtime_error("Input image dimensions do not match frequencies");
}
size_t elements = in_out->get_number_of_elements()/dims.back();
int threadsPerBlock = std::min<size_t>(elements,cudaDeviceManager::Instance()->max_blockdim());
dim3 dimBlock(threadsPerBlock);
int totalBlocksPerGrid = (elements+threadsPerBlock-1)/threadsPerBlock;
dim3 dimGrid(totalBlocksPerGrid,dims.back());
mult_freq_kernel<<<dimGrid, dimBlock>>>(in_out->get_data_ptr(),freqs->get_data_ptr(),conjugate);
}
template EXPORTHYPER void Gadgetron::CSI_dft<float>(cuNDArray<float_complext>* kspace,cuNDArray<float_complext>* tspace, cuNDArray<float>* frequencies, float dtt, float dte);
template EXPORTHYPER void Gadgetron::CSI_dftH<float>(cuNDArray<float_complext>* kspace,cuNDArray<float_complext>* tspace, cuNDArray<float>* frequencies, float dtt, float dte);
template EXPORTHYPER boost::shared_ptr<cuNDArray<float_complext> > Gadgetron::calculate_frequency_calibration<float>(cuNDArray<float_complext>* time_track, cuNDArray<float>* frequencies,cuNDArray<float_complext> * csm,float dtt,float dte);
template EXPORTHYPER void Gadgetron::mult_freq<float>(cuNDArray<complext<float> >* in_out, cuNDArray<complext<float> >* freqs, bool conjugate);
|
a9eca8b3d5772a63f955f79e567a85947b1059d5.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THHNumerics.cuh>
#include <THH/THH.h>
#include <hip/hip_runtime.h>
//#define DEBUG
// calculate the IoU of a single box against another box
__device__
float calc_single_iou(const float4 b1, const float4 b2) {
// (lt), (rb)
float l = max(b1.x, b2.x);
float t = max(b1.y, b2.y);
float r = min(b1.z, b2.z);
float b = min(b1.w, b2.w);
float first = (r - l);
first = (first < 0) ? 0 : first;
float second = (b - t);
second = (second < 0) ? 0 : second;
float intersection = first * second;
float area1 = (b1.w - b1.y) * (b1.z - b1.x);
float area2 = (b2.w - b2.y) * (b2.z - b2.x);
return intersection / (area1 + area2 - intersection);
}
__global__
// boxes1 : [N x 4]
// boxes2 : [M x 4]
// ious : [N x M]
void calc_ious_kernel(const int N_img, const float4 *box1, const int *box1_offsets,
const int M, const float4 *boxes2, float *ious) {
// launch N_img blocks
const int img = blockIdx.x;
// each block, i will run over the box1_N[i] source and M target boxes
// generating box1_N[i] x M outputs
// alias to start of boxes for this image
const float4 *b1 = &box1[box1_offsets[img]];
if (threadIdx.x == 0) {
//printf("offset for img %d : %d\n", img, box1_offsets[img]);
}
// number of boxes for this image from offsets
int N = box1_offsets[img+1] - box1_offsets[img];
for (int i = 0; i < N; ++i) {
// if (threadIdx.x == 0) printf("i : %d\n", i);
const float4 source = b1[i];
// for each source, loop over targets
for (int j = threadIdx.x; j < M; j += blockDim.x) {
const float4 target = boxes2[j];
float iou = calc_single_iou(source, target);
// store the calculated IoU in the correct spot
int out_idx = box1_offsets[img] * M + i * M + j;
ious[out_idx] = iou;
}
}
}
__device__
void reduce_val_idx(int N, volatile float *vals, volatile int *idx) {
// naive: single thread for now
if (threadIdx.x == 0) {
float max_val = vals[0];
int max_idx = idx[0];
for (int i = 1; i < N; ++i) {
if (vals[i] > max_val) {
max_val = vals[i];
max_idx = idx[i];
}
}
vals[0] = max_val;
idx[0] = max_idx;
}
}
/**
* perform remaining parts, storing temporary values in global workspace
* workspace needs N_img * M values, each of 8 bytes (float, int)
**/
template <int BLOCK_SIZE, int MAX_BBOXES_PER_BLOCK>
__global__
void encode(const int N_img, const float4 *bbox_in, const long *labels_in, const int *offsets,
const int M, const float4 *dboxes, // const float *ious,
const float criteria, uint8_t *workspace, float4 *bbox_out, long *label_out) {
// Each block will take a single image's IoU set
const int img = blockIdx.x;
// shared memory for intermediate results
__shared__ volatile float best_bbox_iou_tmp[BLOCK_SIZE];
__shared__ volatile int best_bbox_idx_tmp[BLOCK_SIZE];
// shared memory for final best_bbox_{iou, idx} values
__shared__ volatile float best_bbox_iou[MAX_BBOXES_PER_BLOCK];
__shared__ volatile int best_bbox_idx[MAX_BBOXES_PER_BLOCK];
// index into the global workspace - each image needs (float + int) * M values
volatile float *best_dbox_iou = (float *)&workspace[img * M * 8];
volatile int *best_dbox_idx = (int *)&workspace[img * M * 8 + M * 4];
// number of input bboxes for this image
const int N_rows = offsets[img+1] - offsets[img];
// Check for potential crash
assert(N_rows <= MAX_BBOXES_PER_BLOCK);
#ifdef DEBUG
if (threadIdx.x == 0)
printf("N rows: %d %d to %d (%p - %p)\n", N_rows, offsets[img], offsets[img+1], best_dbox_iou, best_dbox_idx);
#endif
for (int i = threadIdx.x; i < MAX_BBOXES_PER_BLOCK; i += blockDim.x) {
best_bbox_iou[i] = -FLT_MAX;
best_bbox_idx[i] = -1;
}
__syncthreads();
// loop serially over the rows of the IoU set that correspond to this image
int row_num = 0;
for (int i = offsets[img]; i < offsets[img+1]; ++i) {
// reset shmem tallies
best_bbox_iou_tmp[threadIdx.x] = -FLT_MAX;
best_bbox_idx_tmp[threadIdx.x] = -1;
// index into the input buffer
// const float *row = &ious[i * M];
const float4 input_bbox = bbox_in[i];
#ifdef DEBUG
if (threadIdx.x == 0)
printf("%d - %p\n", img, &input_bbox);
#endif
// loop by threads over the columns
for (int j = threadIdx.x; j < M; j += blockDim.x) {
// check and store new max if necessary
const float4 input_dbox = dboxes[j];
// float new_val = row[j];
float new_val = calc_single_iou(input_bbox, input_dbox);
// handle per-row max in shared memory
if (new_val > best_bbox_iou_tmp[threadIdx.x]) {
best_bbox_iou_tmp[threadIdx.x] = new_val;
best_bbox_idx_tmp[threadIdx.x] = j;
}
// handle per-col max in global workspace
if (new_val > best_dbox_iou[j]) {
best_dbox_iou[j] = new_val;
best_dbox_idx[j] = row_num;
#ifdef DEBUG
assert(best_dbox_idx[j] >= 0);
assert(best_dbox_idx[j] < N_rows);
#endif
}
}
// Now we have all the values for this row -- reduce
__syncthreads();
// reduce - output is in max_{val, idx}_row[0]
reduce_val_idx(blockDim.x, best_bbox_iou_tmp, best_bbox_idx_tmp);
#ifdef DEBUG
__syncthreads();
#endif
// store output for row i
if (threadIdx.x == 0) {
best_bbox_iou[row_num] = best_bbox_iou_tmp[0];
best_bbox_idx[row_num] = best_bbox_idx_tmp[0];
#ifdef DEBUG
assert(best_bbox_idx[row_num] >= 0);
assert(best_bbox_idx[row_num] < M);
#endif
}
__syncthreads();
// keep track of _local_ row
row_num++;
}
#ifdef DEBUG
if (threadIdx.x == 0) {
for (int i = 0; i < N_rows; ++i) {
printf("%d - row : %d : best bbox_idx: %d\n", img, i, best_bbox_idx[i]);
}
}
#endif
#ifdef DEBUG
// make sure all best_bbox_{iou, val} are seen by everyone
__syncthreads();
#endif
// At this point we have the maximum values & indices for both bbox and dbox
/*
best_dbox_ious.index_fill_(0, best_bbox_idx, 2.0)
idx = torch.arange(0, best_bbox_idx.size(0), dtype=torch.int64)
best_dbox_idx[best_bbox_idx[idx]] = idx
*/
for (int i = threadIdx.x; i < N_rows; i += blockDim.x) {
int idx = best_bbox_idx[i];
#ifdef DEBUG
assert(idx < M);
assert(idx >= 0);
#endif
best_dbox_iou[idx] = 2.;
best_dbox_idx[idx] = i;
#ifdef DEBUG
printf("%d - set best dbox_idx[%d] to %d\n", img, best_bbox_idx[i], i);
#endif
}
/**
# filter IoU > 0.5
masks = best_dbox_ious > criteria
labels_out = torch.zeros(self.nboxes, dtype=torch.long)
#print(maxloc.shape, labels_in.shape, labels_out.shape)
labels_out[masks] = labels_in[best_dbox_idx[masks]]
bboxes_out = self.dboxes.clone()
bboxes_out[masks, :] = bboxes_in[best_dbox_idx[masks], :]
# Transform format to xywh format
x, y, w, h = 0.5*(bboxes_out[:, 0] + bboxes_out[:, 2]), \
0.5*(bboxes_out[:, 1] + bboxes_out[:, 3]), \
-bboxes_out[:, 0] + bboxes_out[:, 2], \
-bboxes_out[:, 1] + bboxes_out[:, 3]
bboxes_out[:, 0] = x
bboxes_out[:, 1] = y
bboxes_out[:, 2] = w
bboxes_out[:, 3] = h
return bboxes_out, labels_out
**/
__syncthreads();
for (int i = threadIdx.x; i < M; i += blockDim.x) {
// offset into output arrays: M values per image
// int output_idx = offsets[img] * M + i;
int output_idx = img * M + i;
// reset output labels to background
// NOTE: bbox_out is already cloned from dbox outside of this kernel
label_out[output_idx] = 0;
// Filter IoU > 0.5
bool mask = best_dbox_iou[i] > criteria;
float4 bbox = bbox_out[output_idx];
// copy some labels and bboxes
if (mask) {
// copy label
#ifdef DEBUG
printf("%d : label: local input idx: %d, value: %d\n", i, best_dbox_idx[i], labels_in[offsets[img] + best_dbox_idx[i]]);
// printf("%d : label: local input idx: %d, value: %d\n", i, best_dbox_idx[i], labels_in[offsets[img] + i]);
#endif
label_out[output_idx] = labels_in[offsets[img] + best_dbox_idx[i]];
// grab original box
bbox = bbox_in[offsets[img] + best_dbox_idx[i]];
#ifdef DEBUG
printf("mask %d : %d : %f %f %f %f\n", i, best_dbox_idx[i], bbox.x, bbox.y, bbox.z, bbox.w);
#endif
}
// transfer to xywh
float4 bbox_tmp;
bbox_tmp.x = 0.5 * (bbox.x + bbox.z);
bbox_tmp.y = 0.5 * (bbox.y + bbox.w);
bbox_tmp.z = bbox.z - bbox.x;
bbox_tmp.w = bbox.w - bbox.y;
// write out
bbox_out[output_idx] = bbox_tmp;
}
}
/**
def encode(self, bboxes_in, labels_in, criteria = 0.5):
ious = calc_iou_tensor(bboxes_in, self.dboxes)
best_dbox_ious, best_dbox_idx = ious.max(dim=0)
best_bbox_ious, best_bbox_idx = ious.max(dim=1)
# set best ious 2.0
best_dbox_ious.index_fill_(0, best_bbox_idx, 2.0)
idx = torch.arange(0, best_bbox_idx.size(0), dtype=torch.int64)
best_dbox_idx[best_bbox_idx[idx]] = idx
# filter IoU > 0.5
masks = best_dbox_ious > criteria
labels_out = torch.zeros(self.nboxes, dtype=torch.long)
#print(maxloc.shape, labels_in.shape, labels_out.shape)
labels_out[masks] = labels_in[best_dbox_idx[masks]]
bboxes_out = self.dboxes.clone()
bboxes_out[masks, :] = bboxes_in[best_dbox_idx[masks], :]
# Transform format to xywh format
x, y, w, h = 0.5*(bboxes_out[:, 0] + bboxes_out[:, 2]), \
0.5*(bboxes_out[:, 1] + bboxes_out[:, 3]), \
-bboxes_out[:, 0] + bboxes_out[:, 2], \
-bboxes_out[:, 1] + bboxes_out[:, 3]
bboxes_out[:, 0] = x
bboxes_out[:, 1] = y
bboxes_out[:, 2] = w
bboxes_out[:, 3] = h
return bboxes_out, labels_out
**/
std::vector<at::Tensor> box_encoder(const int N_img,
const at::Tensor& bbox_input,
const at::Tensor& bbox_offsets,
const at::Tensor& labels_input,
const at::Tensor& dbox,
float criteria) {
// Check everything is on the device
AT_ASSERTM(bbox_input.is_cuda(), "bboxes must be a CUDA tensor");
AT_ASSERTM(bbox_offsets.is_cuda(), "bbox offsets must be a CUDA tensor");
AT_ASSERTM(labels_input.is_cuda(), "labels must be a CUDA tensor");
AT_ASSERTM(dbox.is_cuda(), "dboxes must be a CUDA tensor");
// Check at least offsets, bboxes and labels are consistent
// Note: offsets is N+1 vs. N for labels
AT_ASSERTM(N_img + 1 == bbox_offsets.numel(), "must have N_img+1 offsets");
auto num_bbox_total = bbox_offsets[bbox_offsets.numel()-1].item<int>();
#ifdef DEBUG
printf("%d : bboxes: %d\n", (int)bbox_offsets.numel(), num_bbox_total);
#endif
AT_ASSERTM(num_bbox_total <= 2048, "total num bboxes must be <= 2048");
AT_ASSERTM(bbox_input.size(0) == labels_input.size(0), "bbox and labels must have same leading dimension");
const int N = bbox_input.size(0);
const int M = dbox.size(0);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// allocate final outputs (known size)
#ifdef DEBUG
printf("%d x %d\n", N_img * M, 4);
// at::Tensor bbox_out = dbox.scalar_type().tensor({N_img * M, 4});
printf("allocating %lu bytes for output labels\n", N_img*M*sizeof(long));
#endif
at::Tensor labels_out = at::empty({N_img * M}, labels_input.options());
THCudaCheck(hipGetLastError());
// copy default boxes to outputs
#ifdef DEBUG
printf("allocating %lu bytes for output bboxes\n", N_img*M*4*sizeof(float));
#endif
at::Tensor bbox_out = dbox.repeat({N_img, 1});
THCudaCheck(hipGetLastError());
// need to allocate some workspace
#ifdef DEBUG
printf("allocating %lu bytes for workspace\n", 8*M*N_img);
#endif
// at::Tensor workspace = at::CUDA(at::kByte).zeros({8 * M * N_img});
at::Tensor workspace = at::zeros({8 * M * N_img}, at::CUDA(at::kByte));
THCudaCheck(hipGetLastError());
// Encode the inputs
const int THREADS_PER_BLOCK = 256;
hipLaunchKernelGGL(( encode<THREADS_PER_BLOCK, 256>), dim3(N_img), dim3(THREADS_PER_BLOCK), 0, stream.stream(), N_img,
(float4*)bbox_input.data_ptr<float>(),
labels_input.data_ptr<long>(),
bbox_offsets.data_ptr<int>(),
M,
(float4*)dbox.data_ptr<float>(),
criteria,
workspace.data_ptr<uint8_t>(),
(float4*)bbox_out.data_ptr<float>(),
labels_out.data_ptr<long>());
THCudaCheck(hipGetLastError());
return {bbox_out, labels_out};
}
at::Tensor calc_ious(const int N_img,
const at::Tensor& boxes1,
const at::Tensor& boxes1_offsets,
const at::Tensor& boxes2) {
const int N = boxes1.size(0);
const int M = boxes2.size(0);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// at::Tensor ious = at::CUDA(at::kFloat).zeros({N, M});
// at::Tensor ious = at::ones(at::CUDA(at::kFloat), {N, M});
at::Tensor ious = at::empty({N, M}, boxes1.options());
// Get IoU of all source x default box pairs
hipLaunchKernelGGL(( calc_ious_kernel), dim3(N_img), dim3(256), 0, stream.stream(),
N_img,
(float4*)boxes1.data_ptr<float>(),
boxes1_offsets.data_ptr<int>(),
M,
(float4*)boxes2.data_ptr<float>(),
ious.data_ptr<float>());
THCudaCheck(hipGetLastError());
return ious;
}
/**
* Each block will handle one channel of each image
**/
template <typename T>
__global__
void HorizFlipImagesAndBoxes(
const int N,
const int C,
const int H,
const int W,
const T* img_in,
float* bboxes,
const int* offsets,
const float p,
const float* flip,
T* img_out,
const bool nhwc) {
// early return if not flipping
if (flip[blockIdx.x] < p) return;
// pointer offset into images
const int img_offset = blockIdx.x * C * H * W;
const T* img = &img_in[img_offset];
T* img_o = &img_out[img_offset];
// flip bboxes
auto bbox_offset_begin = offsets[blockIdx.x];
auto bbox_offset_end = offsets[blockIdx.x + 1];
auto num_bboxes = bbox_offset_end - bbox_offset_begin;
const int thread_idx = threadIdx.y * blockDim.x + threadIdx.x;
// bboxes in ltrb format, scaled to [0, 1]
for (int i = thread_idx; i < num_bboxes; i += blockDim.x * blockDim.y) {
float *bbox = &bboxes[(bbox_offset_begin + thread_idx) * 4];
// Could do this inplace, but not register constrained
auto bbox_0 = bbox[0];
auto bbox_2 = bbox[2];
bbox[0] = 1. - bbox_2;
bbox[2] = 1. - bbox_0;
}
if (nhwc) {
// loop over float3 pixels, handle 3 values / thread
for (int h = threadIdx.y; h < H; h += blockDim.y) {
for (int w = threadIdx.x; w < W; w += blockDim.x) {
const T* img_hw = &img[h * W * C + w * C];
T * img_out_hw = &img_o[h * W * C + (W - 1 - w) * C];
for (int c = 0; c < C; ++c) {
img_out_hw[c] = img_hw[c];
}
}
}
} else {
// loop over channels
for (int c = 0; c < C; ++c) {
const T* img_c = &img[c * H * W];
T *img_out_c = &img_o[c * H * W];
// handle tiles of (h, w) at a time
for (int h = threadIdx.y; h < H; h += blockDim.y) {
for (int w = threadIdx.x; w < W; w += blockDim.x) {
const int input_idx = h * W + w;
const int output_idx = h * W + (W - 1 - w);
img_out_c[output_idx] = img_c[input_idx];
}
}
}
}
}
/**
* Take images and their bboxes, randomly flip on horizontal axis
* In/Out: img: NCHW tensor of N, C-channel images of constant (H, W)
* In/Out: bboxes: [N_i, 4] tensor of original bboxes in ltrb format
* In: bbox_offsets: [N] offset values into bboxes
* In: p \in [0, 1): probability of flipping each (img, bbox) pair
* In: nhwc: Tensor in NHWC format
* ----
* Note: allocate temp memory, but effectively do this inplace
*/
std::vector<at::Tensor> random_horiz_flip(
at::Tensor& img,
at::Tensor& bboxes,
const at::Tensor& bbox_offsets,
const float p,
const bool nhwc) {
// dimensions
const int N = img.size(0);
int C, H, W;
if (nhwc) {
C = img.size(3);
H = img.size(1);
W = img.size(2);
} else {
C = img.size(1);
H = img.size(2);
W = img.size(3);
}
assert(img.is_cuda());
assert(bboxes.is_cuda());
assert(bbox_offsets.is_cuda());
// printf("%d %d %d %d\n", N, C, H, W);
// Need temp storage of size img
at::Tensor tmp_img = img.clone();
at::Tensor flip = at::zeros({N}, at::CUDA(at::kFloat)).uniform_(0., 1.);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
img.scalar_type(),
"HorizFlipImagesAndBoxes",
[&] {
hipLaunchKernelGGL(( HorizFlipImagesAndBoxes<scalar_t>), dim3(N), dim3(dim3(16, 16)), 0, stream.stream(),
N,
C,
H,
W,
img.data_ptr<scalar_t>(),
bboxes.data_ptr<float>(),
bbox_offsets.data_ptr<int>(),
p,
flip.data_ptr<float>(),
tmp_img.data_ptr<scalar_t>(),
nhwc);
THCudaCheck(hipGetLastError());
});
// copy tmp_img -> img
// img = tmp_img;
return {tmp_img, bboxes};
}
| a9eca8b3d5772a63f955f79e567a85947b1059d5.cu |
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THCNumerics.cuh>
#include <THC/THC.h>
#include <cuda.h>
//#define DEBUG
// calculate the IoU of a single box against another box
__device__
float calc_single_iou(const float4 b1, const float4 b2) {
// (lt), (rb)
float l = max(b1.x, b2.x);
float t = max(b1.y, b2.y);
float r = min(b1.z, b2.z);
float b = min(b1.w, b2.w);
float first = (r - l);
first = (first < 0) ? 0 : first;
float second = (b - t);
second = (second < 0) ? 0 : second;
float intersection = first * second;
float area1 = (b1.w - b1.y) * (b1.z - b1.x);
float area2 = (b2.w - b2.y) * (b2.z - b2.x);
return intersection / (area1 + area2 - intersection);
}
__global__
// boxes1 : [N x 4]
// boxes2 : [M x 4]
// ious : [N x M]
void calc_ious_kernel(const int N_img, const float4 *box1, const int *box1_offsets,
const int M, const float4 *boxes2, float *ious) {
// launch N_img blocks
const int img = blockIdx.x;
// each block, i will run over the box1_N[i] source and M target boxes
// generating box1_N[i] x M outputs
// alias to start of boxes for this image
const float4 *b1 = &box1[box1_offsets[img]];
if (threadIdx.x == 0) {
//printf("offset for img %d : %d\n", img, box1_offsets[img]);
}
// number of boxes for this image from offsets
int N = box1_offsets[img+1] - box1_offsets[img];
for (int i = 0; i < N; ++i) {
// if (threadIdx.x == 0) printf("i : %d\n", i);
const float4 source = b1[i];
// for each source, loop over targets
for (int j = threadIdx.x; j < M; j += blockDim.x) {
const float4 target = boxes2[j];
float iou = calc_single_iou(source, target);
// store the calculated IoU in the correct spot
int out_idx = box1_offsets[img] * M + i * M + j;
ious[out_idx] = iou;
}
}
}
__device__
void reduce_val_idx(int N, volatile float *vals, volatile int *idx) {
// naive: single thread for now
if (threadIdx.x == 0) {
float max_val = vals[0];
int max_idx = idx[0];
for (int i = 1; i < N; ++i) {
if (vals[i] > max_val) {
max_val = vals[i];
max_idx = idx[i];
}
}
vals[0] = max_val;
idx[0] = max_idx;
}
}
/**
* perform remaining parts, storing temporary values in global workspace
* workspace needs N_img * M values, each of 8 bytes (float, int)
**/
template <int BLOCK_SIZE, int MAX_BBOXES_PER_BLOCK>
__global__
void encode(const int N_img, const float4 *bbox_in, const long *labels_in, const int *offsets,
const int M, const float4 *dboxes, // const float *ious,
const float criteria, uint8_t *workspace, float4 *bbox_out, long *label_out) {
// Each block will take a single image's IoU set
const int img = blockIdx.x;
// shared memory for intermediate results
__shared__ volatile float best_bbox_iou_tmp[BLOCK_SIZE];
__shared__ volatile int best_bbox_idx_tmp[BLOCK_SIZE];
// shared memory for final best_bbox_{iou, idx} values
__shared__ volatile float best_bbox_iou[MAX_BBOXES_PER_BLOCK];
__shared__ volatile int best_bbox_idx[MAX_BBOXES_PER_BLOCK];
// index into the global workspace - each image needs (float + int) * M values
volatile float *best_dbox_iou = (float *)&workspace[img * M * 8];
volatile int *best_dbox_idx = (int *)&workspace[img * M * 8 + M * 4];
// number of input bboxes for this image
const int N_rows = offsets[img+1] - offsets[img];
// Check for potential crash
assert(N_rows <= MAX_BBOXES_PER_BLOCK);
#ifdef DEBUG
if (threadIdx.x == 0)
printf("N rows: %d %d to %d (%p - %p)\n", N_rows, offsets[img], offsets[img+1], best_dbox_iou, best_dbox_idx);
#endif
for (int i = threadIdx.x; i < MAX_BBOXES_PER_BLOCK; i += blockDim.x) {
best_bbox_iou[i] = -FLT_MAX;
best_bbox_idx[i] = -1;
}
__syncthreads();
// loop serially over the rows of the IoU set that correspond to this image
int row_num = 0;
for (int i = offsets[img]; i < offsets[img+1]; ++i) {
// reset shmem tallies
best_bbox_iou_tmp[threadIdx.x] = -FLT_MAX;
best_bbox_idx_tmp[threadIdx.x] = -1;
// index into the input buffer
// const float *row = &ious[i * M];
const float4 input_bbox = bbox_in[i];
#ifdef DEBUG
if (threadIdx.x == 0)
printf("%d - %p\n", img, &input_bbox);
#endif
// loop by threads over the columns
for (int j = threadIdx.x; j < M; j += blockDim.x) {
// check and store new max if necessary
const float4 input_dbox = dboxes[j];
// float new_val = row[j];
float new_val = calc_single_iou(input_bbox, input_dbox);
// handle per-row max in shared memory
if (new_val > best_bbox_iou_tmp[threadIdx.x]) {
best_bbox_iou_tmp[threadIdx.x] = new_val;
best_bbox_idx_tmp[threadIdx.x] = j;
}
// handle per-col max in global workspace
if (new_val > best_dbox_iou[j]) {
best_dbox_iou[j] = new_val;
best_dbox_idx[j] = row_num;
#ifdef DEBUG
assert(best_dbox_idx[j] >= 0);
assert(best_dbox_idx[j] < N_rows);
#endif
}
}
// Now we have all the values for this row -- reduce
__syncthreads();
// reduce - output is in max_{val, idx}_row[0]
reduce_val_idx(blockDim.x, best_bbox_iou_tmp, best_bbox_idx_tmp);
#ifdef DEBUG
__syncthreads();
#endif
// store output for row i
if (threadIdx.x == 0) {
best_bbox_iou[row_num] = best_bbox_iou_tmp[0];
best_bbox_idx[row_num] = best_bbox_idx_tmp[0];
#ifdef DEBUG
assert(best_bbox_idx[row_num] >= 0);
assert(best_bbox_idx[row_num] < M);
#endif
}
__syncthreads();
// keep track of _local_ row
row_num++;
}
#ifdef DEBUG
if (threadIdx.x == 0) {
for (int i = 0; i < N_rows; ++i) {
printf("%d - row : %d : best bbox_idx: %d\n", img, i, best_bbox_idx[i]);
}
}
#endif
#ifdef DEBUG
// make sure all best_bbox_{iou, val} are seen by everyone
__syncthreads();
#endif
// At this point we have the maximum values & indices for both bbox and dbox
/*
best_dbox_ious.index_fill_(0, best_bbox_idx, 2.0)
idx = torch.arange(0, best_bbox_idx.size(0), dtype=torch.int64)
best_dbox_idx[best_bbox_idx[idx]] = idx
*/
for (int i = threadIdx.x; i < N_rows; i += blockDim.x) {
int idx = best_bbox_idx[i];
#ifdef DEBUG
assert(idx < M);
assert(idx >= 0);
#endif
best_dbox_iou[idx] = 2.;
best_dbox_idx[idx] = i;
#ifdef DEBUG
printf("%d - set best dbox_idx[%d] to %d\n", img, best_bbox_idx[i], i);
#endif
}
/**
# filter IoU > 0.5
masks = best_dbox_ious > criteria
labels_out = torch.zeros(self.nboxes, dtype=torch.long)
#print(maxloc.shape, labels_in.shape, labels_out.shape)
labels_out[masks] = labels_in[best_dbox_idx[masks]]
bboxes_out = self.dboxes.clone()
bboxes_out[masks, :] = bboxes_in[best_dbox_idx[masks], :]
# Transform format to xywh format
x, y, w, h = 0.5*(bboxes_out[:, 0] + bboxes_out[:, 2]), \
0.5*(bboxes_out[:, 1] + bboxes_out[:, 3]), \
-bboxes_out[:, 0] + bboxes_out[:, 2], \
-bboxes_out[:, 1] + bboxes_out[:, 3]
bboxes_out[:, 0] = x
bboxes_out[:, 1] = y
bboxes_out[:, 2] = w
bboxes_out[:, 3] = h
return bboxes_out, labels_out
**/
__syncthreads();
for (int i = threadIdx.x; i < M; i += blockDim.x) {
// offset into output arrays: M values per image
// int output_idx = offsets[img] * M + i;
int output_idx = img * M + i;
// reset output labels to background
// NOTE: bbox_out is already cloned from dbox outside of this kernel
label_out[output_idx] = 0;
// Filter IoU > 0.5
bool mask = best_dbox_iou[i] > criteria;
float4 bbox = bbox_out[output_idx];
// copy some labels and bboxes
if (mask) {
// copy label
#ifdef DEBUG
printf("%d : label: local input idx: %d, value: %d\n", i, best_dbox_idx[i], labels_in[offsets[img] + best_dbox_idx[i]]);
// printf("%d : label: local input idx: %d, value: %d\n", i, best_dbox_idx[i], labels_in[offsets[img] + i]);
#endif
label_out[output_idx] = labels_in[offsets[img] + best_dbox_idx[i]];
// grab original box
bbox = bbox_in[offsets[img] + best_dbox_idx[i]];
#ifdef DEBUG
printf("mask %d : %d : %f %f %f %f\n", i, best_dbox_idx[i], bbox.x, bbox.y, bbox.z, bbox.w);
#endif
}
// transfer to xywh
float4 bbox_tmp;
bbox_tmp.x = 0.5 * (bbox.x + bbox.z);
bbox_tmp.y = 0.5 * (bbox.y + bbox.w);
bbox_tmp.z = bbox.z - bbox.x;
bbox_tmp.w = bbox.w - bbox.y;
// write out
bbox_out[output_idx] = bbox_tmp;
}
}
/**
def encode(self, bboxes_in, labels_in, criteria = 0.5):
ious = calc_iou_tensor(bboxes_in, self.dboxes)
best_dbox_ious, best_dbox_idx = ious.max(dim=0)
best_bbox_ious, best_bbox_idx = ious.max(dim=1)
# set best ious 2.0
best_dbox_ious.index_fill_(0, best_bbox_idx, 2.0)
idx = torch.arange(0, best_bbox_idx.size(0), dtype=torch.int64)
best_dbox_idx[best_bbox_idx[idx]] = idx
# filter IoU > 0.5
masks = best_dbox_ious > criteria
labels_out = torch.zeros(self.nboxes, dtype=torch.long)
#print(maxloc.shape, labels_in.shape, labels_out.shape)
labels_out[masks] = labels_in[best_dbox_idx[masks]]
bboxes_out = self.dboxes.clone()
bboxes_out[masks, :] = bboxes_in[best_dbox_idx[masks], :]
# Transform format to xywh format
x, y, w, h = 0.5*(bboxes_out[:, 0] + bboxes_out[:, 2]), \
0.5*(bboxes_out[:, 1] + bboxes_out[:, 3]), \
-bboxes_out[:, 0] + bboxes_out[:, 2], \
-bboxes_out[:, 1] + bboxes_out[:, 3]
bboxes_out[:, 0] = x
bboxes_out[:, 1] = y
bboxes_out[:, 2] = w
bboxes_out[:, 3] = h
return bboxes_out, labels_out
**/
std::vector<at::Tensor> box_encoder(const int N_img,
const at::Tensor& bbox_input,
const at::Tensor& bbox_offsets,
const at::Tensor& labels_input,
const at::Tensor& dbox,
float criteria) {
// Check everything is on the device
AT_ASSERTM(bbox_input.is_cuda(), "bboxes must be a CUDA tensor");
AT_ASSERTM(bbox_offsets.is_cuda(), "bbox offsets must be a CUDA tensor");
AT_ASSERTM(labels_input.is_cuda(), "labels must be a CUDA tensor");
AT_ASSERTM(dbox.is_cuda(), "dboxes must be a CUDA tensor");
// Check at least offsets, bboxes and labels are consistent
// Note: offsets is N+1 vs. N for labels
AT_ASSERTM(N_img + 1 == bbox_offsets.numel(), "must have N_img+1 offsets");
auto num_bbox_total = bbox_offsets[bbox_offsets.numel()-1].item<int>();
#ifdef DEBUG
printf("%d : bboxes: %d\n", (int)bbox_offsets.numel(), num_bbox_total);
#endif
AT_ASSERTM(num_bbox_total <= 2048, "total num bboxes must be <= 2048");
AT_ASSERTM(bbox_input.size(0) == labels_input.size(0), "bbox and labels must have same leading dimension");
const int N = bbox_input.size(0);
const int M = dbox.size(0);
auto stream = at::cuda::getCurrentCUDAStream();
// allocate final outputs (known size)
#ifdef DEBUG
printf("%d x %d\n", N_img * M, 4);
// at::Tensor bbox_out = dbox.scalar_type().tensor({N_img * M, 4});
printf("allocating %lu bytes for output labels\n", N_img*M*sizeof(long));
#endif
at::Tensor labels_out = at::empty({N_img * M}, labels_input.options());
THCudaCheck(cudaGetLastError());
// copy default boxes to outputs
#ifdef DEBUG
printf("allocating %lu bytes for output bboxes\n", N_img*M*4*sizeof(float));
#endif
at::Tensor bbox_out = dbox.repeat({N_img, 1});
THCudaCheck(cudaGetLastError());
// need to allocate some workspace
#ifdef DEBUG
printf("allocating %lu bytes for workspace\n", 8*M*N_img);
#endif
// at::Tensor workspace = at::CUDA(at::kByte).zeros({8 * M * N_img});
at::Tensor workspace = at::zeros({8 * M * N_img}, at::CUDA(at::kByte));
THCudaCheck(cudaGetLastError());
// Encode the inputs
const int THREADS_PER_BLOCK = 256;
encode<THREADS_PER_BLOCK, 256><<<N_img, THREADS_PER_BLOCK, 0, stream.stream()>>>(N_img,
(float4*)bbox_input.data_ptr<float>(),
labels_input.data_ptr<long>(),
bbox_offsets.data_ptr<int>(),
M,
(float4*)dbox.data_ptr<float>(),
criteria,
workspace.data_ptr<uint8_t>(),
(float4*)bbox_out.data_ptr<float>(),
labels_out.data_ptr<long>());
THCudaCheck(cudaGetLastError());
return {bbox_out, labels_out};
}
at::Tensor calc_ious(const int N_img,
const at::Tensor& boxes1,
const at::Tensor& boxes1_offsets,
const at::Tensor& boxes2) {
const int N = boxes1.size(0);
const int M = boxes2.size(0);
auto stream = at::cuda::getCurrentCUDAStream();
// at::Tensor ious = at::CUDA(at::kFloat).zeros({N, M});
// at::Tensor ious = at::ones(at::CUDA(at::kFloat), {N, M});
at::Tensor ious = at::empty({N, M}, boxes1.options());
// Get IoU of all source x default box pairs
calc_ious_kernel<<<N_img, 256, 0, stream.stream()>>>(
N_img,
(float4*)boxes1.data_ptr<float>(),
boxes1_offsets.data_ptr<int>(),
M,
(float4*)boxes2.data_ptr<float>(),
ious.data_ptr<float>());
THCudaCheck(cudaGetLastError());
return ious;
}
/**
* Each block will handle one channel of each image
**/
template <typename T>
__global__
void HorizFlipImagesAndBoxes(
const int N,
const int C,
const int H,
const int W,
const T* img_in,
float* bboxes,
const int* offsets,
const float p,
const float* flip,
T* img_out,
const bool nhwc) {
// early return if not flipping
if (flip[blockIdx.x] < p) return;
// pointer offset into images
const int img_offset = blockIdx.x * C * H * W;
const T* img = &img_in[img_offset];
T* img_o = &img_out[img_offset];
// flip bboxes
auto bbox_offset_begin = offsets[blockIdx.x];
auto bbox_offset_end = offsets[blockIdx.x + 1];
auto num_bboxes = bbox_offset_end - bbox_offset_begin;
const int thread_idx = threadIdx.y * blockDim.x + threadIdx.x;
// bboxes in ltrb format, scaled to [0, 1]
for (int i = thread_idx; i < num_bboxes; i += blockDim.x * blockDim.y) {
float *bbox = &bboxes[(bbox_offset_begin + thread_idx) * 4];
// Could do this inplace, but not register constrained
auto bbox_0 = bbox[0];
auto bbox_2 = bbox[2];
bbox[0] = 1. - bbox_2;
bbox[2] = 1. - bbox_0;
}
if (nhwc) {
// loop over float3 pixels, handle 3 values / thread
for (int h = threadIdx.y; h < H; h += blockDim.y) {
for (int w = threadIdx.x; w < W; w += blockDim.x) {
const T* img_hw = &img[h * W * C + w * C];
T * img_out_hw = &img_o[h * W * C + (W - 1 - w) * C];
for (int c = 0; c < C; ++c) {
img_out_hw[c] = img_hw[c];
}
}
}
} else {
// loop over channels
for (int c = 0; c < C; ++c) {
const T* img_c = &img[c * H * W];
T *img_out_c = &img_o[c * H * W];
// handle tiles of (h, w) at a time
for (int h = threadIdx.y; h < H; h += blockDim.y) {
for (int w = threadIdx.x; w < W; w += blockDim.x) {
const int input_idx = h * W + w;
const int output_idx = h * W + (W - 1 - w);
img_out_c[output_idx] = img_c[input_idx];
}
}
}
}
}
/**
* Take images and their bboxes, randomly flip on horizontal axis
* In/Out: img: NCHW tensor of N, C-channel images of constant (H, W)
* In/Out: bboxes: [N_i, 4] tensor of original bboxes in ltrb format
* In: bbox_offsets: [N] offset values into bboxes
* In: p \in [0, 1): probability of flipping each (img, bbox) pair
* In: nhwc: Tensor in NHWC format
* ----
* Note: allocate temp memory, but effectively do this inplace
*/
std::vector<at::Tensor> random_horiz_flip(
at::Tensor& img,
at::Tensor& bboxes,
const at::Tensor& bbox_offsets,
const float p,
const bool nhwc) {
// dimensions
const int N = img.size(0);
int C, H, W;
if (nhwc) {
C = img.size(3);
H = img.size(1);
W = img.size(2);
} else {
C = img.size(1);
H = img.size(2);
W = img.size(3);
}
assert(img.is_cuda());
assert(bboxes.is_cuda());
assert(bbox_offsets.is_cuda());
// printf("%d %d %d %d\n", N, C, H, W);
// Need temp storage of size img
at::Tensor tmp_img = img.clone();
at::Tensor flip = at::zeros({N}, at::CUDA(at::kFloat)).uniform_(0., 1.);
auto stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
img.scalar_type(),
"HorizFlipImagesAndBoxes",
[&] {
HorizFlipImagesAndBoxes<scalar_t><<<N, dim3(16, 16), 0, stream.stream()>>>(
N,
C,
H,
W,
img.data_ptr<scalar_t>(),
bboxes.data_ptr<float>(),
bbox_offsets.data_ptr<int>(),
p,
flip.data_ptr<float>(),
tmp_img.data_ptr<scalar_t>(),
nhwc);
THCudaCheck(cudaGetLastError());
});
// copy tmp_img -> img
// img = tmp_img;
return {tmp_img, bboxes};
}
|
b70722f1a8ccfc92569d53c0fa6a219ebe8a22fe.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "TransposeKernelShared.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const uint8_t *src = NULL;
hipMalloc(&src, XSIZE*YSIZE);
uint8_t *dst = NULL;
hipMalloc(&dst, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
TransposeKernelShared), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,width,height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
TransposeKernelShared), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
TransposeKernelShared), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b70722f1a8ccfc92569d53c0fa6a219ebe8a22fe.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "TransposeKernelShared.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const uint8_t *src = NULL;
cudaMalloc(&src, XSIZE*YSIZE);
uint8_t *dst = NULL;
cudaMalloc(&dst, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
TransposeKernelShared<<<gridBlock,threadBlock>>>(src,dst,width,height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
TransposeKernelShared<<<gridBlock,threadBlock>>>(src,dst,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
TransposeKernelShared<<<gridBlock,threadBlock>>>(src,dst,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
6b0bddf22c3f919f4b213f57668b96c0498a62cc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Author: rodrigo
* 2015
*/
#include "CudaUtil.h"
#include <ostream>
#include <iostream>
#define BLOCKS 20
#define THREADS 256
// Pointer to memory in device
struct Point;
Point *devPoints = NULL;
bool *devNotUsed = NULL;
struct BallCenter
{
float cx, cy, cz;
int idx1, idx2, idx3;
bool isValid;
__device__ BallCenter(const int _idx1, const int _idx2, const int _idx3)
{
idx1 = _idx1;
idx2 = _idx2;
idx3 = _idx3;
cx = cy = cz = 0;
isValid = false;
}
};
struct Point
{
float x, y, z, w;
float nx, ny, nz, nw;
float c;
float fill[3];
__device__ Point operator-(const Point &_p)
{
Point result;
result.x = x - _p.x;
result.y = y - _p.y;
result.z = z - _p.z;
return result;
}
__device__ double sqrDist(const Point &_p) const
{
double dx = x - _p.x;
double dy = y - _p.y;
double dz = z - _p.z;
return dx * dx + dy * dy + dz * dz;
}
__device__ double dist(const Point &_p) const
{
double dx = x - _p.x;
double dy = y - _p.y;
double dz = z - _p.z;
return sqrt(dx * dx + dy * dy + dz * dz);
}
};
std::ostream &operator<<(std::ostream &_stream, const BallCenter &_center)
{
_stream << "c=(" << _center.cx << ", " << _center.cy << ", " << _center.cz << ") / (" << _center.idx1 << ", " << _center.idx2 << ", " << _center.idx3 << ")";
return _stream;
}
void CudaUtil::allocPoints(const pcl::PointCloud<pcl::PointNormal>::Ptr &_cloud)
{
size_t cloudBytes = sizeof(pcl::PointNormal) * _cloud->size();
hipMalloc((void **) &devPoints, cloudBytes);
cudaCheckErrors("hipMalloc points failed");
hipMemcpy(devPoints, &_cloud->points[0], cloudBytes, hipMemcpyHostToDevice);
cudaCheckErrors("hipMemcpy points to dev failed");
}
void CudaUtil::allocUsed(const pcl::PointCloud<pcl::PointNormal>::Ptr &_cloud, const bool* _notUsed)
{
size_t bytes = sizeof(bool) * _cloud->size();
hipMalloc((void **) &devNotUsed, bytes);
cudaCheckErrors("hipMalloc notUsed failed");
hipMemset(devNotUsed, 0, bytes);
cudaCheckErrors("hipMemset notUsed failed");
}
__global__ void searchCloserPoints(const int _target, const Point *_points, const int _pointNumber, const double _searchRadius, const int _pointsPerThread, bool *_selected)
{
int startIdx = (blockIdx.x * blockDim.x + threadIdx.x) * _pointsPerThread;
double sqrRadius = _searchRadius * _searchRadius;
for (int i = startIdx; i < startIdx + _pointsPerThread && i < _pointNumber; i++)
{
_selected[i] = _points[_target].sqrDist(_points[i]) < sqrRadius;
}
}
bool CudaUtil::radiusSearch(const pcl::PointCloud<pcl::PointNormal>::Ptr &_cloud, const int _target, double _radius, std::vector<int> &_idxs)
{
int blocks = 10;
int threads = 256;
size_t cloudSize = _cloud->size();
// Copy points to device
if (devPoints == NULL)
allocPoints(_cloud);
// Array to store points within radius
bool *devSelected;
hipMalloc((void **) &devSelected, sizeof(bool) * cloudSize);
cudaCheckErrors("hipMalloc selected failed");
// Calculate adequate number of blocks and threads
while (cloudSize / blocks < 2)
blocks /= 2;
int pointsPerBlock = ceil((double) cloudSize / blocks);
while (pointsPerBlock / threads < 1)
threads /= 2;
int pointsPerThread = ceil((double) pointsPerBlock / threads);
// Execute kernel
hipLaunchKernelGGL(( searchCloserPoints), dim3(blocks), dim3(threads), 0, 0, _target, devPoints, cloudSize, _radius, pointsPerThread, devSelected);
// Copy data to host
bool *selected = (bool *) calloc(cloudSize, sizeof(bool));
hipMemcpy(selected, devSelected, sizeof(bool) * cloudSize, hipMemcpyDeviceToHost);
cudaCheckErrors("hipMemcpy selected failed");
//hipFree(devSelected);
//cudaCheckErrors("hipFree selected failed");
for (size_t i = 0; i < cloudSize; i++)
if (selected[i])
_idxs.push_back(i);
free(selected);
return true;
}
///////////////////////////////
__device__ bool getBallCenter(const Point *_p1, const Point *_p2, const Point *_p3, BallCenter *_center)
{
return false;
}
__global__ void checkForSeeds(const Point *_points, const int _pointNumber, const int *_neighbors, const int _neighborsSize, const bool *_notUsed, const int _index0)
{
int startIdx = 0; //(blockIdx.x * blockDim.x + threadIdx.x) * _pointsPerThread;
int endIdx = 0; //calcular_esto;
__shared__ bool found;
found = false;
//__syncthreads();
for (int j = startIdx; j < endIdx && j < _neighborsSize; j++)
{
if (!found)
{
int index1 = _neighbors[j];
// Skip invalid combinations
if (index1 == _index0 || !_notUsed[index1])
continue;
for (size_t k = 0; k < _neighborsSize && !found; k++)
{
int index2 = _neighbors[k];
// Skip invalid combinations
if (index1 == index2 || index2 == _index0 || !_notUsed[index2])
continue;
BallCenter center(_index0, index1, index2);
if (!found && getBallCenter(&_points[_index0], &_points[index1], &_points[index2], ¢er))
{
// pcl::PointNormal ballCenter = Helper::makePointNormal(center);
// std::vector<int> neighborhood = getNeighbors(ballCenter, ballRadius);
// if (!found && isEmpty(neighborhood, index0, index1, index2, center))
// {
// if (!found)
// {
//
// ESTO TIENE
// QUE SER
// EN UN
// BLOQUE CON
// MUTEX
// !
//
// seed = TrianglePtr(new Triangle(cloud->at((int) sequence[0]), cloud->at((int) sequence[1]), cloud->at((int) sequence[2]), sequence[0], sequence[1], sequence[2], ballCenter, ballRadius));
// devNotUsed.erase(index0);
// devNotUsed.erase(index1);
// devNotUsed.erase(index2);
//
// found = true;
//
// break;
// }
// }
}
}
}
}
}
bool CudaUtil::findSeed(const pcl::PointCloud<pcl::PointNormal>::Ptr &_cloud, const std::vector<int> &_neighbors, const bool *_notUsed, const int _index0)
{
int blocks = 10;
int threads = 256;
size_t cloudSize = _cloud->size();
// Prepare memory buffers
if (devPoints == NULL)
allocPoints(_cloud);
if (devNotUsed == NULL)
allocUsed(_cloud, _notUsed);
// Copy not used data to dev
size_t notUsedBytes = sizeof(bool) * _cloud->size();
hipMemcpy(devNotUsed, _notUsed, notUsedBytes, hipMemcpyHostToDevice);
cudaCheckErrors("hipMemcpy notUsed to dev failed");
// Create and prepare buffer with neighbors indices
int *devNeighbors;
size_t neighborsBytes = sizeof(int) * _neighbors.size();
hipMalloc((void **) &devNeighbors, neighborsBytes);
cudaCheckErrors("hipMalloc neighbors failed");
hipMemcpy(devNeighbors, &_neighbors[0], neighborsBytes, hipMemcpyHostToDevice);
cudaCheckErrors("hipMemcpy neighbors to dev failed");
hipLaunchKernelGGL(( checkForSeeds), dim3(1), dim3(1), 0, 0, devPoints, _cloud->size(), devNeighbors, _neighbors.size(), devNotUsed, _index0);
return true;
}
| 6b0bddf22c3f919f4b213f57668b96c0498a62cc.cu | /**
* Author: rodrigo
* 2015
*/
#include "CudaUtil.h"
#include <ostream>
#include <iostream>
#define BLOCKS 20
#define THREADS 256
// Pointer to memory in device
struct Point;
Point *devPoints = NULL;
bool *devNotUsed = NULL;
struct BallCenter
{
float cx, cy, cz;
int idx1, idx2, idx3;
bool isValid;
__device__ BallCenter(const int _idx1, const int _idx2, const int _idx3)
{
idx1 = _idx1;
idx2 = _idx2;
idx3 = _idx3;
cx = cy = cz = 0;
isValid = false;
}
};
struct Point
{
float x, y, z, w;
float nx, ny, nz, nw;
float c;
float fill[3];
__device__ Point operator-(const Point &_p)
{
Point result;
result.x = x - _p.x;
result.y = y - _p.y;
result.z = z - _p.z;
return result;
}
__device__ double sqrDist(const Point &_p) const
{
double dx = x - _p.x;
double dy = y - _p.y;
double dz = z - _p.z;
return dx * dx + dy * dy + dz * dz;
}
__device__ double dist(const Point &_p) const
{
double dx = x - _p.x;
double dy = y - _p.y;
double dz = z - _p.z;
return sqrt(dx * dx + dy * dy + dz * dz);
}
};
std::ostream &operator<<(std::ostream &_stream, const BallCenter &_center)
{
_stream << "c=(" << _center.cx << ", " << _center.cy << ", " << _center.cz << ") / (" << _center.idx1 << ", " << _center.idx2 << ", " << _center.idx3 << ")";
return _stream;
}
void CudaUtil::allocPoints(const pcl::PointCloud<pcl::PointNormal>::Ptr &_cloud)
{
size_t cloudBytes = sizeof(pcl::PointNormal) * _cloud->size();
cudaMalloc((void **) &devPoints, cloudBytes);
cudaCheckErrors("cudaMalloc points failed");
cudaMemcpy(devPoints, &_cloud->points[0], cloudBytes, cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy points to dev failed");
}
void CudaUtil::allocUsed(const pcl::PointCloud<pcl::PointNormal>::Ptr &_cloud, const bool* _notUsed)
{
size_t bytes = sizeof(bool) * _cloud->size();
cudaMalloc((void **) &devNotUsed, bytes);
cudaCheckErrors("cudaMalloc notUsed failed");
cudaMemset(devNotUsed, 0, bytes);
cudaCheckErrors("cudaMemset notUsed failed");
}
__global__ void searchCloserPoints(const int _target, const Point *_points, const int _pointNumber, const double _searchRadius, const int _pointsPerThread, bool *_selected)
{
int startIdx = (blockIdx.x * blockDim.x + threadIdx.x) * _pointsPerThread;
double sqrRadius = _searchRadius * _searchRadius;
for (int i = startIdx; i < startIdx + _pointsPerThread && i < _pointNumber; i++)
{
_selected[i] = _points[_target].sqrDist(_points[i]) < sqrRadius;
}
}
bool CudaUtil::radiusSearch(const pcl::PointCloud<pcl::PointNormal>::Ptr &_cloud, const int _target, double _radius, std::vector<int> &_idxs)
{
int blocks = 10;
int threads = 256;
size_t cloudSize = _cloud->size();
// Copy points to device
if (devPoints == NULL)
allocPoints(_cloud);
// Array to store points within radius
bool *devSelected;
cudaMalloc((void **) &devSelected, sizeof(bool) * cloudSize);
cudaCheckErrors("cudaMalloc selected failed");
// Calculate adequate number of blocks and threads
while (cloudSize / blocks < 2)
blocks /= 2;
int pointsPerBlock = ceil((double) cloudSize / blocks);
while (pointsPerBlock / threads < 1)
threads /= 2;
int pointsPerThread = ceil((double) pointsPerBlock / threads);
// Execute kernel
searchCloserPoints<<<blocks, threads>>>(_target, devPoints, cloudSize, _radius, pointsPerThread, devSelected);
// Copy data to host
bool *selected = (bool *) calloc(cloudSize, sizeof(bool));
cudaMemcpy(selected, devSelected, sizeof(bool) * cloudSize, cudaMemcpyDeviceToHost);
cudaCheckErrors("cudaMemcpy selected failed");
//cudaFree(devSelected);
//cudaCheckErrors("cudaFree selected failed");
for (size_t i = 0; i < cloudSize; i++)
if (selected[i])
_idxs.push_back(i);
free(selected);
return true;
}
///////////////////////////////
__device__ bool getBallCenter(const Point *_p1, const Point *_p2, const Point *_p3, BallCenter *_center)
{
return false;
}
__global__ void checkForSeeds(const Point *_points, const int _pointNumber, const int *_neighbors, const int _neighborsSize, const bool *_notUsed, const int _index0)
{
int startIdx = 0; //(blockIdx.x * blockDim.x + threadIdx.x) * _pointsPerThread;
int endIdx = 0; //calcular_esto;
__shared__ bool found;
found = false;
//__syncthreads();
for (int j = startIdx; j < endIdx && j < _neighborsSize; j++)
{
if (!found)
{
int index1 = _neighbors[j];
// Skip invalid combinations
if (index1 == _index0 || !_notUsed[index1])
continue;
for (size_t k = 0; k < _neighborsSize && !found; k++)
{
int index2 = _neighbors[k];
// Skip invalid combinations
if (index1 == index2 || index2 == _index0 || !_notUsed[index2])
continue;
BallCenter center(_index0, index1, index2);
if (!found && getBallCenter(&_points[_index0], &_points[index1], &_points[index2], ¢er))
{
// pcl::PointNormal ballCenter = Helper::makePointNormal(center);
// std::vector<int> neighborhood = getNeighbors(ballCenter, ballRadius);
// if (!found && isEmpty(neighborhood, index0, index1, index2, center))
// {
// if (!found)
// {
//
// ESTO TIENE
// QUE SER
// EN UN
// BLOQUE CON
// MUTEX
// !
//
// seed = TrianglePtr(new Triangle(cloud->at((int) sequence[0]), cloud->at((int) sequence[1]), cloud->at((int) sequence[2]), sequence[0], sequence[1], sequence[2], ballCenter, ballRadius));
// devNotUsed.erase(index0);
// devNotUsed.erase(index1);
// devNotUsed.erase(index2);
//
// found = true;
//
// break;
// }
// }
}
}
}
}
}
bool CudaUtil::findSeed(const pcl::PointCloud<pcl::PointNormal>::Ptr &_cloud, const std::vector<int> &_neighbors, const bool *_notUsed, const int _index0)
{
int blocks = 10;
int threads = 256;
size_t cloudSize = _cloud->size();
// Prepare memory buffers
if (devPoints == NULL)
allocPoints(_cloud);
if (devNotUsed == NULL)
allocUsed(_cloud, _notUsed);
// Copy not used data to dev
size_t notUsedBytes = sizeof(bool) * _cloud->size();
cudaMemcpy(devNotUsed, _notUsed, notUsedBytes, cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy notUsed to dev failed");
// Create and prepare buffer with neighbors indices
int *devNeighbors;
size_t neighborsBytes = sizeof(int) * _neighbors.size();
cudaMalloc((void **) &devNeighbors, neighborsBytes);
cudaCheckErrors("cudaMalloc neighbors failed");
cudaMemcpy(devNeighbors, &_neighbors[0], neighborsBytes, cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy neighbors to dev failed");
checkForSeeds<<<1, 1>>>(devPoints, _cloud->size(), devNeighbors, _neighbors.size(), devNotUsed, _index0);
return true;
}
|
21797d198ef2bc33fdc7abff36b66f86ff456b3e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "test_utils.h"
#include <gtest/gtest.h>
#include <iostream>
#include <raft/label/classlabels.cuh>
#include <raft/random/make_blobs.cuh>
#include <raft/spatial/knn/knn.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <selection/knn.cuh>
#include <vector>
namespace MLCommon {
namespace Selection {
struct KNNClassifyInputs {
int rows;
int cols;
int n_labels;
float cluster_std;
int k;
};
class KNNClassifyTest : public ::testing::TestWithParam<KNNClassifyInputs> {
public:
KNNClassifyTest()
: params(::testing::TestWithParam<KNNClassifyInputs>::GetParam()),
stream(handle.get_stream()),
train_samples(params.rows * params.cols, stream),
train_labels(params.rows, stream),
pred_labels(params.rows, stream),
knn_indices(params.rows * params.k, stream),
knn_dists(params.rows * params.k, stream)
{
basicTest();
}
protected:
void basicTest()
{
raft::random::make_blobs<float, int>(train_samples.data(),
train_labels.data(),
params.rows,
params.cols,
params.n_labels,
stream,
true,
nullptr,
nullptr,
params.cluster_std);
rmm::device_uvector<int> unique_labels(0, stream);
auto n_classes =
raft::label::getUniquelabels(unique_labels, train_labels.data(), params.rows, stream);
std::vector<float*> ptrs(1);
std::vector<int> sizes(1);
ptrs[0] = train_samples.data();
sizes[0] = params.rows;
raft::spatial::knn::brute_force_knn(handle,
ptrs,
sizes,
params.cols,
train_samples.data(),
params.rows,
knn_indices.data(),
knn_dists.data(),
params.k);
std::vector<int*> y;
y.push_back(train_labels.data());
std::vector<int*> uniq_labels;
uniq_labels.push_back(unique_labels.data());
std::vector<int> n_unique;
n_unique.push_back(n_classes);
knn_classify(handle,
pred_labels.data(),
knn_indices.data(),
y,
params.rows,
params.rows,
params.k,
uniq_labels,
n_unique);
handle.sync_stream(stream);
}
protected:
KNNClassifyInputs params;
raft::handle_t handle;
hipStream_t stream;
rmm::device_uvector<float> train_samples;
rmm::device_uvector<int> train_labels;
rmm::device_uvector<int> pred_labels;
rmm::device_uvector<int64_t> knn_indices;
rmm::device_uvector<float> knn_dists;
};
typedef KNNClassifyTest KNNClassifyTestF;
TEST_P(KNNClassifyTestF, Fit)
{
ASSERT_TRUE(
devArrMatch(train_labels.data(), pred_labels.data(), params.rows, MLCommon::Compare<int>()));
}
const std::vector<KNNClassifyInputs> inputsf = {{100, 10, 2, 0.01f, 2},
{1000, 10, 5, 0.01f, 2},
{10000, 10, 5, 0.01f, 2},
{100, 10, 2, 0.01f, 10},
{1000, 10, 5, 0.01f, 10},
{10000, 10, 5, 0.01f, 10},
{100, 10, 2, 0.01f, 50},
{1000, 10, 5, 0.01f, 50},
{10000, 10, 5, 0.01f, 50}};
INSTANTIATE_TEST_CASE_P(KNNClassifyTest, KNNClassifyTestF, ::testing::ValuesIn(inputsf));
}; // end namespace Selection
}; // namespace MLCommon
| 21797d198ef2bc33fdc7abff36b66f86ff456b3e.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "test_utils.h"
#include <gtest/gtest.h>
#include <iostream>
#include <raft/label/classlabels.cuh>
#include <raft/random/make_blobs.cuh>
#include <raft/spatial/knn/knn.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <selection/knn.cuh>
#include <vector>
namespace MLCommon {
namespace Selection {
struct KNNClassifyInputs {
int rows;
int cols;
int n_labels;
float cluster_std;
int k;
};
class KNNClassifyTest : public ::testing::TestWithParam<KNNClassifyInputs> {
public:
KNNClassifyTest()
: params(::testing::TestWithParam<KNNClassifyInputs>::GetParam()),
stream(handle.get_stream()),
train_samples(params.rows * params.cols, stream),
train_labels(params.rows, stream),
pred_labels(params.rows, stream),
knn_indices(params.rows * params.k, stream),
knn_dists(params.rows * params.k, stream)
{
basicTest();
}
protected:
void basicTest()
{
raft::random::make_blobs<float, int>(train_samples.data(),
train_labels.data(),
params.rows,
params.cols,
params.n_labels,
stream,
true,
nullptr,
nullptr,
params.cluster_std);
rmm::device_uvector<int> unique_labels(0, stream);
auto n_classes =
raft::label::getUniquelabels(unique_labels, train_labels.data(), params.rows, stream);
std::vector<float*> ptrs(1);
std::vector<int> sizes(1);
ptrs[0] = train_samples.data();
sizes[0] = params.rows;
raft::spatial::knn::brute_force_knn(handle,
ptrs,
sizes,
params.cols,
train_samples.data(),
params.rows,
knn_indices.data(),
knn_dists.data(),
params.k);
std::vector<int*> y;
y.push_back(train_labels.data());
std::vector<int*> uniq_labels;
uniq_labels.push_back(unique_labels.data());
std::vector<int> n_unique;
n_unique.push_back(n_classes);
knn_classify(handle,
pred_labels.data(),
knn_indices.data(),
y,
params.rows,
params.rows,
params.k,
uniq_labels,
n_unique);
handle.sync_stream(stream);
}
protected:
KNNClassifyInputs params;
raft::handle_t handle;
cudaStream_t stream;
rmm::device_uvector<float> train_samples;
rmm::device_uvector<int> train_labels;
rmm::device_uvector<int> pred_labels;
rmm::device_uvector<int64_t> knn_indices;
rmm::device_uvector<float> knn_dists;
};
typedef KNNClassifyTest KNNClassifyTestF;
TEST_P(KNNClassifyTestF, Fit)
{
ASSERT_TRUE(
devArrMatch(train_labels.data(), pred_labels.data(), params.rows, MLCommon::Compare<int>()));
}
const std::vector<KNNClassifyInputs> inputsf = {{100, 10, 2, 0.01f, 2},
{1000, 10, 5, 0.01f, 2},
{10000, 10, 5, 0.01f, 2},
{100, 10, 2, 0.01f, 10},
{1000, 10, 5, 0.01f, 10},
{10000, 10, 5, 0.01f, 10},
{100, 10, 2, 0.01f, 50},
{1000, 10, 5, 0.01f, 50},
{10000, 10, 5, 0.01f, 50}};
INSTANTIATE_TEST_CASE_P(KNNClassifyTest, KNNClassifyTestF, ::testing::ValuesIn(inputsf));
}; // end namespace Selection
}; // namespace MLCommon
|
117f0595010872bbb38f299a0ee63678ba888718.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2022 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "cunumeric/bits/unpackbits.h"
#include "cunumeric/bits/unpackbits_template.inl"
#include "cunumeric/cuda_help.h"
namespace cunumeric {
using namespace legate;
template <typename UnpackOP, typename WriteAcc, typename ReadAcc, typename Pitches, typename Point>
static __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
generic_kernel(size_t volume,
UnpackOP unpack,
WriteAcc out,
ReadAcc in,
Pitches in_pitches,
Point in_lo,
uint32_t axis)
{
const size_t idx = global_tid_1d();
if (idx >= volume) return;
auto in_p = in_pitches.unflatten(idx, in_lo);
unpack(out, in, in_p, axis);
}
template <int32_t DIM, Bitorder BITORDER>
struct UnpackbitsImplBody<VariantKind::GPU, DIM, BITORDER> {
void operator()(const AccessorWO<uint8_t, DIM>& out,
const AccessorRO<uint8_t, DIM>& in,
const Rect<DIM>& in_rect,
const Pitches<DIM - 1>& in_pitches,
size_t in_volume,
uint32_t axis) const
{
Unpack<BITORDER> unpack{};
auto stream = get_cached_stream();
const size_t blocks = (in_volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
hipLaunchKernelGGL(( generic_kernel), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, stream,
in_volume, unpack, out, in, in_pitches, in_rect.lo, axis);
CHECK_CUDA_STREAM(stream);
}
};
/*static*/ void UnpackbitsTask::gpu_variant(TaskContext& context)
{
unpackbits_template<VariantKind::GPU>(context);
}
} // namespace cunumeric
| 117f0595010872bbb38f299a0ee63678ba888718.cu | /* Copyright 2022 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "cunumeric/bits/unpackbits.h"
#include "cunumeric/bits/unpackbits_template.inl"
#include "cunumeric/cuda_help.h"
namespace cunumeric {
using namespace legate;
template <typename UnpackOP, typename WriteAcc, typename ReadAcc, typename Pitches, typename Point>
static __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
generic_kernel(size_t volume,
UnpackOP unpack,
WriteAcc out,
ReadAcc in,
Pitches in_pitches,
Point in_lo,
uint32_t axis)
{
const size_t idx = global_tid_1d();
if (idx >= volume) return;
auto in_p = in_pitches.unflatten(idx, in_lo);
unpack(out, in, in_p, axis);
}
template <int32_t DIM, Bitorder BITORDER>
struct UnpackbitsImplBody<VariantKind::GPU, DIM, BITORDER> {
void operator()(const AccessorWO<uint8_t, DIM>& out,
const AccessorRO<uint8_t, DIM>& in,
const Rect<DIM>& in_rect,
const Pitches<DIM - 1>& in_pitches,
size_t in_volume,
uint32_t axis) const
{
Unpack<BITORDER> unpack{};
auto stream = get_cached_stream();
const size_t blocks = (in_volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
generic_kernel<<<blocks, THREADS_PER_BLOCK, 0, stream>>>(
in_volume, unpack, out, in, in_pitches, in_rect.lo, axis);
CHECK_CUDA_STREAM(stream);
}
};
/*static*/ void UnpackbitsTask::gpu_variant(TaskContext& context)
{
unpackbits_template<VariantKind::GPU>(context);
}
} // namespace cunumeric
|
8b5d6641ebf80ee59ffa942a6413324c065fcff5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2009-2016 The Regents of
the University of Michigan All rights reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: jglaser
#include "ParticleData.cuh"
#include "kernels/scan.cuh"
/*! \file ParticleGroup.cu
\brief Contains GPU kernel code used by ParticleGroup
*/
//! GPU kernel to translate between global and local membership lookup table
__global__ void gpu_rebuild_index_list_kernel(unsigned int N,
unsigned int *d_tag,
unsigned char *d_is_member_tag,
unsigned char *d_is_member)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) return;
unsigned int tag = d_tag[idx];
d_is_member[idx] = d_is_member_tag[tag];
}
__global__ void gpu_scatter_member_indices(unsigned int N,
const unsigned int *d_scan,
const unsigned char *d_is_member,
unsigned *d_member_idx)
{
unsigned int idx = blockIdx.x*blockDim.x+threadIdx.x;
if (idx >= N) return;
if (d_is_member[idx])
d_member_idx[d_scan[idx]] = idx;
}
//! GPU method for rebuilding the index list of a ParticleGroup
/*! \param N number of local particles
\param d_is_member_tag Global lookup table for tag -> group membership
\param d_is_member Array of membership flags
\param d_member_idx Array of member indices
\param d_tag Array of tags
\param num_local_members Number of members on the local processor (return value)
*/
hipError_t gpu_rebuild_index_list(unsigned int N,
unsigned char *d_is_member_tag,
unsigned char *d_is_member,
unsigned int *d_member_idx,
unsigned int *d_tag,
unsigned int &num_local_members,
unsigned int *d_tmp,
mgpu::ContextPtr mgpu_context)
{
assert(d_is_member);
assert(d_is_member_tag);
assert(d_member_idx);
assert(d_tag);
unsigned int block_size = 512;
unsigned int n_blocks = N/block_size + 1;
hipLaunchKernelGGL(( gpu_rebuild_index_list_kernel), dim3(n_blocks),dim3(block_size), 0, 0, N,
d_tag,
d_is_member_tag,
d_is_member);
// compute member_idx offsets
mgpu::Scan<mgpu::MgpuScanTypeExc>(d_is_member, N, (unsigned int) 0, mgpu::plus<unsigned int>(),
(unsigned int *) NULL, &num_local_members, d_tmp, *mgpu_context);
// fill member_idx array
hipLaunchKernelGGL(( gpu_scatter_member_indices), dim3(n_blocks), dim3(block_size), 0, 0, N, d_tmp, d_is_member, d_member_idx);
return hipSuccess;
}
| 8b5d6641ebf80ee59ffa942a6413324c065fcff5.cu | /*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2009-2016 The Regents of
the University of Michigan All rights reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: jglaser
#include "ParticleData.cuh"
#include "kernels/scan.cuh"
/*! \file ParticleGroup.cu
\brief Contains GPU kernel code used by ParticleGroup
*/
//! GPU kernel to translate between global and local membership lookup table
__global__ void gpu_rebuild_index_list_kernel(unsigned int N,
unsigned int *d_tag,
unsigned char *d_is_member_tag,
unsigned char *d_is_member)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) return;
unsigned int tag = d_tag[idx];
d_is_member[idx] = d_is_member_tag[tag];
}
__global__ void gpu_scatter_member_indices(unsigned int N,
const unsigned int *d_scan,
const unsigned char *d_is_member,
unsigned *d_member_idx)
{
unsigned int idx = blockIdx.x*blockDim.x+threadIdx.x;
if (idx >= N) return;
if (d_is_member[idx])
d_member_idx[d_scan[idx]] = idx;
}
//! GPU method for rebuilding the index list of a ParticleGroup
/*! \param N number of local particles
\param d_is_member_tag Global lookup table for tag -> group membership
\param d_is_member Array of membership flags
\param d_member_idx Array of member indices
\param d_tag Array of tags
\param num_local_members Number of members on the local processor (return value)
*/
cudaError_t gpu_rebuild_index_list(unsigned int N,
unsigned char *d_is_member_tag,
unsigned char *d_is_member,
unsigned int *d_member_idx,
unsigned int *d_tag,
unsigned int &num_local_members,
unsigned int *d_tmp,
mgpu::ContextPtr mgpu_context)
{
assert(d_is_member);
assert(d_is_member_tag);
assert(d_member_idx);
assert(d_tag);
unsigned int block_size = 512;
unsigned int n_blocks = N/block_size + 1;
gpu_rebuild_index_list_kernel<<<n_blocks,block_size>>>(N,
d_tag,
d_is_member_tag,
d_is_member);
// compute member_idx offsets
mgpu::Scan<mgpu::MgpuScanTypeExc>(d_is_member, N, (unsigned int) 0, mgpu::plus<unsigned int>(),
(unsigned int *) NULL, &num_local_members, d_tmp, *mgpu_context);
// fill member_idx array
gpu_scatter_member_indices<<<n_blocks, block_size>>>(N, d_tmp, d_is_member, d_member_idx);
return cudaSuccess;
}
|
34266806802f96f979744403e267c3c2c0ad686a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#include <assert.h>
extern "C" {
#include "blas.h"
#include "hip/hip_runtime.h"
#include "utils.h"
}
__global__ void scale_bias_kernel(float *output, float *biases, int n, int size) {
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if(offset < size) output[(batch*n+filter)*size + offset] *= biases[filter];
}
void scale_bias_gpu(float *output, float *biases, int batch, int n, int size) {
dim3 dimGrid((size-1)/BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
hipLaunchKernelGGL(( scale_bias_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, output, biases, n, size);
check_error(hipPeekAtLastError());
}
__global__ void backward_scale_kernel(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) {
__shared__ float part[BLOCK];
int i,b;
int filter = blockIdx.x;
int p = threadIdx.x;
float sum = 0;
for(b=0; b<batch; ++b) {
for(i=0; i<size; i+=BLOCK) {
int index = p + i + size*(filter + n*b);
sum += (p+i < size) ? delta[index]*x_norm[index] : 0;
}
}
part[p] = sum;
__syncthreads();
if (p == 0) {
for(i = 0; i < BLOCK; ++i) scale_updates[filter] += part[i];
}
}
void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) {
hipLaunchKernelGGL(( backward_scale_kernel), dim3(n), dim3(BLOCK), 0, 0, x_norm, delta, batch, n, size, scale_updates);
check_error(hipPeekAtLastError());
}
__global__ void add_bias_kernel(float *output, float *biases, int batch, int n, int size) {
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= n*size*batch) return;
int i = index % size;
index /= size;
int j = index % n;
index /= n;
int k = index;
output[(k*n+j)*size + i] += biases[j];
}
void add_bias_gpu(float *output, float *biases, int batch, int n, int size) {
int num = n*size*batch;
hipLaunchKernelGGL(( add_bias_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, output, biases, batch, n, size);
check_error(hipPeekAtLastError());
}
__global__ void backward_bias_conn_kernel(float *bias_updates, float *delta, int batch, int n) {
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= n) return;
int b;
float sum = 0;
for(b = 0; b < batch; ++b) {
int i = b*n + index;
sum += delta[i];
}
bias_updates[index] += sum;
}
__global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size) {
__shared__ float part[BLOCK];
int i,b;
int filter = blockIdx.x;
int p = threadIdx.x;
float sum = 0;
for(b=0; b<batch; ++b) {
for(i=0; i<size; i += BLOCK) {
int index = p + i + size*(filter + n*b);
sum += (p+i < size) ? delta[index] : 0;
}
}
part[p] = sum;
__syncthreads();
if (p == 0) {
for(i = 0; i < BLOCK; ++i) bias_updates[filter] += part[i];
}
}
void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size) {
if(size == 1){
hipLaunchKernelGGL(( backward_bias_conn_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, bias_updates, delta, batch, n);
}else{
hipLaunchKernelGGL(( backward_bias_kernel), dim3(n), dim3(BLOCK), 0, 0, bias_updates, delta, batch, n, size);
}
check_error(hipPeekAtLastError());
}
/*
__global__ void dot_kernel(float *output, float scale, int batch, int n, int size, float *delta)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int f1 = index / n;
int f2 = index % n;
if (f2 <= f1) return;
float sum = 0;
float norm1 = 0;
float norm2 = 0;
int b, i;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; ++i){
int i1 = b * size * n + f1 * size + i;
int i2 = b * size * n + f2 * size + i;
sum += output[i1] * output[i2];
norm1 += output[i1] * output[i1];
norm2 += output[i2] * output[i2];
}
}
norm1 = sqrt(norm1);
norm2 = sqrt(norm2);
float norm = norm1 * norm2;
sum = sum / norm;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; ++i){
int i1 = b * size * n + f1 * size + i;
int i2 = b * size * n + f2 * size + i;
delta[i1] += - scale * sum * output[i2] / norm;
delta[i2] += - scale * sum * output[i1] / norm;
}
}
}
void dot_error_gpu(layer l)
{
dot_kernel<<<cuda_gridsize(l.n*l.n), BLOCK>>>(l.output_gpu, l.dot, l.batch, l.n, l.out_w * l.out_h, l.delta_gpu);
check_error(hipPeekAtLastError());
}
*/
__global__ void adam_kernel(int N, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t) {
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
float mhat = m[index] / (1.f - powf(B1, t));
float vhat = v[index] / (1.f - powf(B2, t));
x[index] = x[index] + rate * mhat / (sqrtf(vhat) + eps);
}
extern "C" void adam_gpu(int n, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t) {
hipLaunchKernelGGL(( adam_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, x, m, v, B1, B2, rate, eps, t);
check_error(hipPeekAtLastError());
}
extern "C" void adam_update_gpu(float *w, float *d, float *m, float *v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t) {
scal_gpu(n, B1, m, 1);
scal_gpu(n, B2, v, 1);
axpy_gpu(n, -decay*batch, w, 1, d, 1);
axpy_gpu(n, (1-B1), d, 1, m, 1);
mul_gpu(n, d, 1, d, 1);
axpy_gpu(n, (1-B2), d, 1, v, 1);
adam_gpu(n, w, m, v, B1, B2, rate, eps, t);
fill_gpu(n, 0, d, 1);
}
__global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial) {
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index/spatial)%filters;
x[index] = (x[index] - mean[f])/(sqrtf(variance[f] + .00001f));
}
__global__ void normalize_delta_kernel(int N, float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta) {
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index/spatial)%filters;
delta[index] = delta[index] * 1.f/(sqrtf(variance[f] + .00001f)) + variance_delta[f] * 2.f * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch);
}
extern "C" void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta) {
size_t N = batch*filters*spatial;
hipLaunchKernelGGL(( normalize_delta_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, x, mean, variance, mean_delta, variance_delta, batch, filters, spatial, delta);
check_error(hipPeekAtLastError());
}
__global__ void variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
variance_delta[i] = 0;
for(j=0; j<batch; ++j) {
for(k=0; k<spatial; ++k) {
int index = j*filters*spatial + i*spatial + k;
variance_delta[i] += delta[index]*(x[index] - mean[i]);
}
}
variance_delta[i] *= -.5f * powf(variance[i] + .00001f, (float)(-3.f/2.f));
}
__global__ void accumulate_kernel(float *x, int n, int groups, float *sum) {
int k;
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= groups) return;
sum[i] = 0;
for(k=0; k<n; ++k) {
sum[i] += x[k*groups + i];
}
}
__global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) {
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j=0; j<batch; ++j) {
for(i=0; i<spatial; i+=threads) {
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? delta[index] : 0;
}
}
__syncthreads();
if(id == 0) {
mean_delta[filter] = 0;
for(i=0; i<threads; ++i) {
mean_delta[filter] += local[i];
}
mean_delta[filter] *= (-1.f/sqrtf(variance[filter] + .00001f));
}
}
__global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) {
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j=0; j<batch; ++j) {
for(i=0; i<spatial; i+=threads) {
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0;
}
}
__syncthreads();
if(id == 0) {
variance_delta[filter] = 0;
for(i=0; i<threads; ++i) {
variance_delta[filter] += local[i];
}
variance_delta[filter] *= -.5f * powf(variance[filter] + .00001f, (float)(-3.f/2.f));
}
}
__global__ void mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
mean_delta[i] = 0;
for (j=0; j<batch; ++j) {
for (k=0; k<spatial; ++k) {
int index = j*filters*spatial + i*spatial + k;
mean_delta[i] += delta[index];
}
}
mean_delta[i] *= (-1.f/sqrtf(variance[i] + .00001f));
}
extern "C" void mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) {
hipLaunchKernelGGL(( mean_delta_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, 0, delta, variance, batch, filters, spatial, mean_delta);
check_error(hipPeekAtLastError());
}
extern "C" void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) {
hipLaunchKernelGGL(( fast_mean_delta_kernel), dim3(filters), dim3(BLOCK), 0, 0, delta, variance, batch, filters, spatial, mean_delta);
check_error(hipPeekAtLastError());
}
extern "C" void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) {
hipLaunchKernelGGL(( fast_variance_delta_kernel), dim3(filters), dim3(BLOCK), 0, 0, x, delta, mean, variance, batch, filters, spatial, variance_delta);
check_error(hipPeekAtLastError());
}
__global__ void mean_kernel(float *x, int batch, int filters, int spatial, float *mean) {
float scale = 1.f/(batch * spatial);
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
mean[i] = 0;
for(j=0; j<batch; ++j) {
for(k=0; k<spatial; ++k) {
int index = j*filters*spatial + i*spatial + k;
mean[i] += x[index];
}
}
mean[i] *= scale;
}
__global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) {
float scale = 1.f/(batch * spatial - 1);
int j,k;
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
variance[i] = 0;
for(j=0; j<batch; ++j) {
for(k=0; k<spatial; ++k) {
int index = j*filters*spatial + i*spatial + k;
variance[i] += powf((x[index] - mean[i]), 2);
}
}
variance[i] *= scale;
}
__global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i >= N) return;
int in_index = i;
int in_w = i%w;
i = i/w;
int in_h = i%h;
i = i/h;
int in_c = i%c;
i = i/c;
int b = i%batch;
int out_c = c/(stride*stride);
int c2 = in_c % out_c;
int offset = in_c / out_c;
int w2 = in_w*stride + offset % stride;
int h2 = in_h*stride + offset / stride;
//printf("%d\n", offset);
int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
// printf("%d %d %d\n", w2, h2, c2);
//printf("%d %d\n", in_index, out_index);
//if(out_index >= N || out_index < 0) printf("bad bad bad \n");
if(forward) out[out_index] = x[in_index];
else out[in_index] = x[out_index];
//if(forward) out[1] = x[1];
//else out[0] = x[0];
}
__global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[OFFY+i*INCY] += ALPHA*X[OFFX+i*INCX];
}
__global__ void pow_kernel(int N, float ALPHA, float *X, int INCX, float *Y, int INCY) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY] = pow(X[i*INCX], ALPHA);
}
__global__ void const_kernel(int N, float ALPHA, float *X, int INCX) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = ALPHA;
}
__global__ void constrain_kernel(int N, float ALPHA, float *X, int INCX) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = fminf(ALPHA, fmaxf(-ALPHA, X[i*INCX]));
}
__global__ void supp_kernel(int N, float ALPHA, float *X, int INCX) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) {
if((X[i*INCX] * X[i*INCX]) < (ALPHA * ALPHA)) X[i*INCX] = 0;
}
}
__global__ void add_kernel(int N, float ALPHA, float *X, int INCX) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] += ALPHA;
}
__global__ void scal_kernel(int N, float ALPHA, float *X, int INCX) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] *= ALPHA;
}
__global__ void fill_kernel(int N, float ALPHA, float *X, int INCX) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = ALPHA;
}
__global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX];
}
__global__ void mul_kernel(int N, float *X, int INCX, float *Y, int INCY) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY] *= X[i*INCX];
}
extern "C" void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial) {
size_t N = batch*filters*spatial;
hipLaunchKernelGGL(( normalize_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, x, mean, variance, batch, filters, spatial);
check_error(hipPeekAtLastError());
}
__global__ void l2norm_kernel(int N, float *x, float *dx, int batch, int filters, int spatial) {
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int b = index / spatial;
int i = index % spatial;
int f;
float sum = 0;
for(f = 0; f < filters; ++f){
int index = b*filters*spatial + f*spatial + i;
sum += powf(x[index], 2);
}
sum = sqrtf(sum);
if(sum == 0) sum = 1;
//printf("%f\n", sum);
for(f = 0; f < filters; ++f){
int index = b*filters*spatial + f*spatial + i;
x[index] /= sum;
dx[index] = (1 - x[index]) / sum;
}
}
extern "C" void l2normalize_gpu(float *x, float *dx, int batch, int filters, int spatial) {
size_t N = batch*spatial;
hipLaunchKernelGGL(( l2norm_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, x, dx, batch, filters, spatial);
check_error(hipPeekAtLastError());
}
__global__ void fast_mean_kernel(float *x, int batch, int filters, int spatial, float *mean) {
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j=0; j<batch; ++j) {
for(i=0; i<spatial; i+=threads) {
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? x[index] : 0;
}
}
__syncthreads();
if(id == 0) {
mean[filter] = 0;
for(i=0; i<threads; ++i) {
mean[filter] += local[i];
}
mean[filter] /= spatial * batch;
}
}
__global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) {
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j=0; j<batch; ++j) {
for(i=0; i<spatial; i+=threads) {
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? powf((x[index] - mean[filter]), 2) : 0;
}
}
__syncthreads();
if(id==0) {
variance[filter] = 0;
for(i=0; i<threads; ++i) {
variance[filter] += local[i];
}
variance[filter] /= (spatial * batch - 1);
}
}
extern "C" void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean) {
hipLaunchKernelGGL(( fast_mean_kernel), dim3(filters), dim3(BLOCK), 0, 0, x, batch, filters, spatial, mean);
check_error(hipPeekAtLastError());
}
extern "C" void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) {
hipLaunchKernelGGL(( fast_variance_kernel), dim3(filters), dim3(BLOCK), 0, 0, x, mean, batch, filters, spatial, variance);
check_error(hipPeekAtLastError());
}
extern "C" void mean_gpu(float *x, int batch, int filters, int spatial, float *mean) {
hipLaunchKernelGGL(( mean_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, 0, x, batch, filters, spatial, mean);
check_error(hipPeekAtLastError());
}
extern "C" void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) {
hipLaunchKernelGGL(( variance_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, 0, x, mean, batch, filters, spatial, variance);
check_error(hipPeekAtLastError());
}
extern "C" void axpy_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) {
axpy_gpu_offset(N, ALPHA, X, 0, INCX, Y, 0, INCY);
}
extern "C" void pow_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) {
hipLaunchKernelGGL(( pow_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX, Y, INCY);
check_error(hipPeekAtLastError());
}
extern "C" void axpy_gpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) {
hipLaunchKernelGGL(( axpy_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY);
check_error(hipPeekAtLastError());
}
extern "C" void copy_gpu(int N, float * X, int INCX, float * Y, int INCY) {
copy_gpu_offset(N, X, 0, INCX, Y, 0, INCY);
}
extern "C" void mul_gpu(int N, float * X, int INCX, float * Y, int INCY) {
hipLaunchKernelGGL(( mul_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, INCX, Y, INCY);
check_error(hipPeekAtLastError());
}
extern "C" void copy_gpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) {
hipLaunchKernelGGL(( copy_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, OFFX, INCX, Y, OFFY, INCY);
check_error(hipPeekAtLastError());
}
__global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i >= N) return;
int in_s = i%spatial;
i = i/spatial;
int in_c = i%layers;
i = i/layers;
int b = i;
int i1 = b*layers*spatial + in_c*spatial + in_s;
int i2 = b*layers*spatial + in_s*layers + in_c;
if (forward) out[i2] = x[i1];
else out[i1] = x[i2];
}
extern "C" void flatten_gpu(float *x, int spatial, int layers, int batch, int forward, float *out) {
int size = spatial*batch*layers;
hipLaunchKernelGGL(( flatten_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, x, spatial, layers, batch, forward, out);
check_error(hipPeekAtLastError());
}
extern "C" void reorg_gpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out) {
int size = w*h*c*batch;
hipLaunchKernelGGL(( reorg_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, x, w, h, c, batch, stride, forward, out);
check_error(hipPeekAtLastError());
}
__global__ void mask_kernel(int n, float *x, float mask_num, float *mask, float val) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n && mask[i] == mask_num) x[i] = val;
}
extern "C" void mask_gpu(int N, float * X, float mask_num, float * mask, float val) {
hipLaunchKernelGGL(( mask_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, mask_num, mask, val);
check_error(hipPeekAtLastError());
}
__global__ void scale_mask_kernel(int n, float *x, float mask_num, float *mask, float scale) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n && mask[i] == mask_num) x[i] *= scale;
}
extern "C" void scale_mask_gpu(int N, float * X, float mask_num, float * mask, float scale) {
hipLaunchKernelGGL(( scale_mask_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, mask_num, mask, scale);
check_error(hipPeekAtLastError());
}
extern "C" void const_gpu(int N, float ALPHA, float * X, int INCX) {
hipLaunchKernelGGL(( const_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX);
check_error(hipPeekAtLastError());
}
extern "C" void constrain_gpu(int N, float ALPHA, float * X, int INCX) {
hipLaunchKernelGGL(( constrain_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX);
check_error(hipPeekAtLastError());
}
extern "C" void add_gpu(int N, float ALPHA, float * X, int INCX) {
hipLaunchKernelGGL(( add_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX);
check_error(hipPeekAtLastError());
}
extern "C" void scal_gpu(int N, float ALPHA, float * X, int INCX) {
hipLaunchKernelGGL(( scal_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX);
check_error(hipPeekAtLastError());
}
extern "C" void supp_gpu(int N, float ALPHA, float * X, int INCX) {
hipLaunchKernelGGL(( supp_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX);
check_error(hipPeekAtLastError());
}
extern "C" void fill_gpu(int N, float ALPHA, float * X, int INCX) {
hipLaunchKernelGGL(( fill_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX);
check_error(hipPeekAtLastError());
}
__global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1,
int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out) {
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
int i = id % minw;
id /= minw;
int j = id % minh;
id /= minh;
int k = id % minc;
id /= minc;
int b = id % batch;
int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
out[out_index] = s1*out[out_index] + s2*add[add_index];
//out[out_index] += add[add_index];
}
extern "C" void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out) {
int minw = (w1 < w2) ? w1 : w2;
int minh = (h1 < h2) ? h1 : h2;
int minc = (c1 < c2) ? c1 : c2;
int stride = w1/w2;
int sample = w2/w1;
assert(stride == h1/h2);
assert(sample == h2/h1);
if(stride < 1) stride = 1;
if(sample < 1) sample = 1;
int size = batch * minw * minh * minc;
hipLaunchKernelGGL(( shortcut_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, s1, s2, out);
check_error(hipPeekAtLastError());
}
__global__ void smooth_l1_kernel(int n, float *pred, float *truth, float *delta, float *error) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
float diff = truth[i] - pred[i];
float abs_val = fabsf(diff);
if(abs_val < 1) {
error[i] = diff * diff;
delta[i] = diff;
} else {
error[i] = 2*abs_val - 1;
delta[i] = (diff > 0) ? 1 : -1;
}
}
}
extern "C" void smooth_l1_gpu(int n, float *pred, float *truth, float *delta, float *error) {
hipLaunchKernelGGL(( smooth_l1_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error);
check_error(hipPeekAtLastError());
}
__global__ void softmax_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) {
float t = truth[i];
float p = pred[i];
error[i] = (t) ? -log(p) : 0;
delta[i] = t-p;
}
}
extern "C" void softmax_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error) {
hipLaunchKernelGGL(( softmax_x_ent_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error);
check_error(hipPeekAtLastError());
}
__global__ void logistic_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) {
float t = truth[i];
float p = pred[i];
error[i] = -t*log(p+.0000001) - (1-t)*log(1-p+.0000001);
delta[i] = t-p;
}
}
extern "C" void logistic_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error) {
hipLaunchKernelGGL(( logistic_x_ent_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error);
check_error(hipPeekAtLastError());
}
__global__ void l2_kernel(int n, float *pred, float *truth, float *delta, float *error) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) {
float diff = truth[i] - pred[i];
error[i] = diff * diff; //I know this is technically wrong, deal with it.
delta[i] = diff;
}
}
extern "C" void l2_gpu(int n, float *pred, float *truth, float *delta, float *error) {
hipLaunchKernelGGL(( l2_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error);
check_error(hipPeekAtLastError());
}
__global__ void l1_kernel(int n, float *pred, float *truth, float *delta, float *error) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) {
float diff = truth[i] - pred[i];
error[i] = abs(diff);
delta[i] = (diff > 0) ? 1 : -1;
}
}
extern "C" void l1_gpu(int n, float *pred, float *truth, float *delta, float *error) {
hipLaunchKernelGGL(( l1_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error);
check_error(hipPeekAtLastError());
}
__global__ void wgan_kernel(int n, float *pred, float *truth, float *delta, float *error) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) {
error[i] = truth[i] ? -pred[i] : pred[i];
delta[i] = (truth[i] > 0) ? 1 : -1;
}
}
extern "C" void wgan_gpu(int n, float *pred, float *truth, float *delta, float *error) {
hipLaunchKernelGGL(( wgan_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error);
check_error(hipPeekAtLastError());
}
__global__ void weighted_sum_kernel(int n, float *a, float *b, float *s, float *c) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) {
c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0);
}
}
__global__ void deinter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < (NX+NY)*B) {
int b = i / (NX+NY);
int j = i % (NX+NY);
if (j < NX){
if(X) X[b*NX + j] += OUT[i];
} else {
if(Y) Y[b*NY + j - NX] += OUT[i];
}
}
}
extern "C" void deinter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT) {
hipLaunchKernelGGL(( deinter_kernel), dim3(cuda_gridsize((NX+NY)*B)), dim3(BLOCK), 0, 0, NX, X, NY, Y, B, OUT);
check_error(hipPeekAtLastError());
}
__global__ void inter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < (NX+NY)*B) {
int b = i / (NX+NY);
int j = i % (NX+NY);
if (j < NX){
OUT[i] = X[b*NX + j];
} else {
OUT[i] = Y[b*NY + j - NX];
}
}
}
extern "C" void inter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT) {
hipLaunchKernelGGL(( inter_kernel), dim3(cuda_gridsize((NX+NY)*B)), dim3(BLOCK), 0, 0, NX, X, NY, Y, B, OUT);
check_error(hipPeekAtLastError());
}
extern "C" void weighted_sum_gpu(float *a, float *b, float *s, int num, float *c) {
hipLaunchKernelGGL(( weighted_sum_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, num, a, b, s, c);
check_error(hipPeekAtLastError());
}
__global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) {
if(da) da[i] += dc[i] * s[i];
if(db) db[i] += dc[i] * (1-s[i]);
ds[i] += dc[i] * (a[i] - b[i]);
}
}
extern "C" void weighted_delta_gpu(float *a, float *b, float *s, float *da, float *db, float *ds, int num, float *dc) {
hipLaunchKernelGGL(( weighted_delta_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, num, a, b, s, da, db, ds, dc);
check_error(hipPeekAtLastError());
}
__global__ void mult_add_into_kernel(int n, float *a, float *b, float *c) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) {
c[i] += a[i]*b[i];
}
}
extern "C" void mult_add_into_gpu(int num, float *a, float *b, float *c) {
hipLaunchKernelGGL(( mult_add_into_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, num, a, b, c);
check_error(hipPeekAtLastError());
}
__device__ void softmax_device(float *input, int n, float temp, int stride, float *output) {
int i;
float sum = 0;
float largest = -INFINITY;
for(i=0; i<n; ++i) {
int val = input[i*stride];
largest = (val>largest) ? val : largest;
}
for(i=0; i<n; ++i) {
float e = expf(input[i*stride]/temp - largest/temp);
sum += e;
output[i*stride] = e;
}
for(i=0; i<n; ++i) {
output[i*stride] /= sum;
}
}
__global__ void softmax_tree_kernel(float *input, int spatial, int batch, int stride, float temp, float *output, int groups, int *group_size, int *group_offset) {
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= spatial*batch*groups) return;
int s = id % spatial;
id = id / spatial;
int g = id % groups;
int b = id / groups;
int goff = group_offset[g]*spatial;
int boff = b*stride;
softmax_device(input + goff + boff + s, group_size[g], temp, spatial, output + goff + boff + s);
}
extern "C" void softmax_tree(float *input, int spatial, int batch, int stride, float temp, float *output, tree hier) {
int *tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups);
int *tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups);
/*
static int *tree_groups_size = 0;
static int *tree_groups_offset = 0;
if(!tree_groups_size){
tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups);
tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups);
}
*/
int num = spatial*batch*hier.groups;
hipLaunchKernelGGL(( softmax_tree_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, input, spatial, batch, stride, temp, output, hier.groups, tree_groups_size, tree_groups_offset);
check_error(hipPeekAtLastError());
cuda_free((float *)tree_groups_size);
cuda_free((float *)tree_groups_offset);
}
__global__ void softmax_kernel(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) {
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= batch*groups) return;
int b = id / groups;
int g = id % groups;
softmax_device(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset);
}
extern "C" void softmax_gpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) {
hipLaunchKernelGGL(( softmax_kernel), dim3(cuda_gridsize(batch*groups)), dim3(BLOCK), 0, 0, input, n, batch, batch_offset, groups, group_offset, stride, temp, output);
check_error(hipPeekAtLastError());
}
__global__ void upsample_kernel(size_t N, float *x, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) {
size_t i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i >= N) return;
int out_index = i;
int out_w = i%(w*stride);
i = i/(w*stride);
int out_h = i%(h*stride);
i = i/(h*stride);
int out_c = i%c;
i = i/c;
int b = i%batch;
int in_w = out_w / stride;
int in_h = out_h / stride;
int in_c = out_c;
int in_index = b*w*h*c + in_c*w*h + in_h*w + in_w;
if(forward) out[out_index] += scale * x[in_index];
else atomicAdd(x+in_index, scale * out[out_index]);
}
extern "C" void upsample_gpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) {
size_t size = w*h*c*batch*stride*stride;
hipLaunchKernelGGL(( upsample_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, in, w, h, c, batch, stride, forward, scale, out);
check_error(hipPeekAtLastError());
}
| 34266806802f96f979744403e267c3c2c0ad686a.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
#include <assert.h>
extern "C" {
#include "blas.h"
#include "cuda.h"
#include "utils.h"
}
__global__ void scale_bias_kernel(float *output, float *biases, int n, int size) {
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if(offset < size) output[(batch*n+filter)*size + offset] *= biases[filter];
}
void scale_bias_gpu(float *output, float *biases, int batch, int n, int size) {
dim3 dimGrid((size-1)/BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
scale_bias_kernel<<<dimGrid, dimBlock>>>(output, biases, n, size);
check_error(cudaPeekAtLastError());
}
__global__ void backward_scale_kernel(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) {
__shared__ float part[BLOCK];
int i,b;
int filter = blockIdx.x;
int p = threadIdx.x;
float sum = 0;
for(b=0; b<batch; ++b) {
for(i=0; i<size; i+=BLOCK) {
int index = p + i + size*(filter + n*b);
sum += (p+i < size) ? delta[index]*x_norm[index] : 0;
}
}
part[p] = sum;
__syncthreads();
if (p == 0) {
for(i = 0; i < BLOCK; ++i) scale_updates[filter] += part[i];
}
}
void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) {
backward_scale_kernel<<<n, BLOCK>>>(x_norm, delta, batch, n, size, scale_updates);
check_error(cudaPeekAtLastError());
}
__global__ void add_bias_kernel(float *output, float *biases, int batch, int n, int size) {
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= n*size*batch) return;
int i = index % size;
index /= size;
int j = index % n;
index /= n;
int k = index;
output[(k*n+j)*size + i] += biases[j];
}
void add_bias_gpu(float *output, float *biases, int batch, int n, int size) {
int num = n*size*batch;
add_bias_kernel<<<cuda_gridsize(num), BLOCK>>>(output, biases, batch, n, size);
check_error(cudaPeekAtLastError());
}
__global__ void backward_bias_conn_kernel(float *bias_updates, float *delta, int batch, int n) {
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= n) return;
int b;
float sum = 0;
for(b = 0; b < batch; ++b) {
int i = b*n + index;
sum += delta[i];
}
bias_updates[index] += sum;
}
__global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size) {
__shared__ float part[BLOCK];
int i,b;
int filter = blockIdx.x;
int p = threadIdx.x;
float sum = 0;
for(b=0; b<batch; ++b) {
for(i=0; i<size; i += BLOCK) {
int index = p + i + size*(filter + n*b);
sum += (p+i < size) ? delta[index] : 0;
}
}
part[p] = sum;
__syncthreads();
if (p == 0) {
for(i = 0; i < BLOCK; ++i) bias_updates[filter] += part[i];
}
}
void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size) {
if(size == 1){
backward_bias_conn_kernel<<<cuda_gridsize(n), BLOCK>>>(bias_updates, delta, batch, n);
}else{
backward_bias_kernel<<<n, BLOCK>>>(bias_updates, delta, batch, n, size);
}
check_error(cudaPeekAtLastError());
}
/*
__global__ void dot_kernel(float *output, float scale, int batch, int n, int size, float *delta)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int f1 = index / n;
int f2 = index % n;
if (f2 <= f1) return;
float sum = 0;
float norm1 = 0;
float norm2 = 0;
int b, i;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; ++i){
int i1 = b * size * n + f1 * size + i;
int i2 = b * size * n + f2 * size + i;
sum += output[i1] * output[i2];
norm1 += output[i1] * output[i1];
norm2 += output[i2] * output[i2];
}
}
norm1 = sqrt(norm1);
norm2 = sqrt(norm2);
float norm = norm1 * norm2;
sum = sum / norm;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; ++i){
int i1 = b * size * n + f1 * size + i;
int i2 = b * size * n + f2 * size + i;
delta[i1] += - scale * sum * output[i2] / norm;
delta[i2] += - scale * sum * output[i1] / norm;
}
}
}
void dot_error_gpu(layer l)
{
dot_kernel<<<cuda_gridsize(l.n*l.n), BLOCK>>>(l.output_gpu, l.dot, l.batch, l.n, l.out_w * l.out_h, l.delta_gpu);
check_error(cudaPeekAtLastError());
}
*/
__global__ void adam_kernel(int N, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t) {
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
float mhat = m[index] / (1.f - powf(B1, t));
float vhat = v[index] / (1.f - powf(B2, t));
x[index] = x[index] + rate * mhat / (sqrtf(vhat) + eps);
}
extern "C" void adam_gpu(int n, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t) {
adam_kernel<<<cuda_gridsize(n), BLOCK>>>(n, x, m, v, B1, B2, rate, eps, t);
check_error(cudaPeekAtLastError());
}
extern "C" void adam_update_gpu(float *w, float *d, float *m, float *v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t) {
scal_gpu(n, B1, m, 1);
scal_gpu(n, B2, v, 1);
axpy_gpu(n, -decay*batch, w, 1, d, 1);
axpy_gpu(n, (1-B1), d, 1, m, 1);
mul_gpu(n, d, 1, d, 1);
axpy_gpu(n, (1-B2), d, 1, v, 1);
adam_gpu(n, w, m, v, B1, B2, rate, eps, t);
fill_gpu(n, 0, d, 1);
}
__global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial) {
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index/spatial)%filters;
x[index] = (x[index] - mean[f])/(sqrtf(variance[f] + .00001f));
}
__global__ void normalize_delta_kernel(int N, float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta) {
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index/spatial)%filters;
delta[index] = delta[index] * 1.f/(sqrtf(variance[f] + .00001f)) + variance_delta[f] * 2.f * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch);
}
extern "C" void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta) {
size_t N = batch*filters*spatial;
normalize_delta_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, mean, variance, mean_delta, variance_delta, batch, filters, spatial, delta);
check_error(cudaPeekAtLastError());
}
__global__ void variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
variance_delta[i] = 0;
for(j=0; j<batch; ++j) {
for(k=0; k<spatial; ++k) {
int index = j*filters*spatial + i*spatial + k;
variance_delta[i] += delta[index]*(x[index] - mean[i]);
}
}
variance_delta[i] *= -.5f * powf(variance[i] + .00001f, (float)(-3.f/2.f));
}
__global__ void accumulate_kernel(float *x, int n, int groups, float *sum) {
int k;
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= groups) return;
sum[i] = 0;
for(k=0; k<n; ++k) {
sum[i] += x[k*groups + i];
}
}
__global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) {
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j=0; j<batch; ++j) {
for(i=0; i<spatial; i+=threads) {
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? delta[index] : 0;
}
}
__syncthreads();
if(id == 0) {
mean_delta[filter] = 0;
for(i=0; i<threads; ++i) {
mean_delta[filter] += local[i];
}
mean_delta[filter] *= (-1.f/sqrtf(variance[filter] + .00001f));
}
}
__global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) {
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j=0; j<batch; ++j) {
for(i=0; i<spatial; i+=threads) {
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0;
}
}
__syncthreads();
if(id == 0) {
variance_delta[filter] = 0;
for(i=0; i<threads; ++i) {
variance_delta[filter] += local[i];
}
variance_delta[filter] *= -.5f * powf(variance[filter] + .00001f, (float)(-3.f/2.f));
}
}
__global__ void mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
mean_delta[i] = 0;
for (j=0; j<batch; ++j) {
for (k=0; k<spatial; ++k) {
int index = j*filters*spatial + i*spatial + k;
mean_delta[i] += delta[index];
}
}
mean_delta[i] *= (-1.f/sqrtf(variance[i] + .00001f));
}
extern "C" void mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) {
mean_delta_kernel<<<cuda_gridsize(filters), BLOCK>>>(delta, variance, batch, filters, spatial, mean_delta);
check_error(cudaPeekAtLastError());
}
extern "C" void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) {
fast_mean_delta_kernel<<<filters, BLOCK>>>(delta, variance, batch, filters, spatial, mean_delta);
check_error(cudaPeekAtLastError());
}
extern "C" void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) {
fast_variance_delta_kernel<<<filters, BLOCK>>>(x, delta, mean, variance, batch, filters, spatial, variance_delta);
check_error(cudaPeekAtLastError());
}
__global__ void mean_kernel(float *x, int batch, int filters, int spatial, float *mean) {
float scale = 1.f/(batch * spatial);
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
mean[i] = 0;
for(j=0; j<batch; ++j) {
for(k=0; k<spatial; ++k) {
int index = j*filters*spatial + i*spatial + k;
mean[i] += x[index];
}
}
mean[i] *= scale;
}
__global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) {
float scale = 1.f/(batch * spatial - 1);
int j,k;
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
variance[i] = 0;
for(j=0; j<batch; ++j) {
for(k=0; k<spatial; ++k) {
int index = j*filters*spatial + i*spatial + k;
variance[i] += powf((x[index] - mean[i]), 2);
}
}
variance[i] *= scale;
}
__global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i >= N) return;
int in_index = i;
int in_w = i%w;
i = i/w;
int in_h = i%h;
i = i/h;
int in_c = i%c;
i = i/c;
int b = i%batch;
int out_c = c/(stride*stride);
int c2 = in_c % out_c;
int offset = in_c / out_c;
int w2 = in_w*stride + offset % stride;
int h2 = in_h*stride + offset / stride;
//printf("%d\n", offset);
int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
// printf("%d %d %d\n", w2, h2, c2);
//printf("%d %d\n", in_index, out_index);
//if(out_index >= N || out_index < 0) printf("bad bad bad \n");
if(forward) out[out_index] = x[in_index];
else out[in_index] = x[out_index];
//if(forward) out[1] = x[1];
//else out[0] = x[0];
}
__global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[OFFY+i*INCY] += ALPHA*X[OFFX+i*INCX];
}
__global__ void pow_kernel(int N, float ALPHA, float *X, int INCX, float *Y, int INCY) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY] = pow(X[i*INCX], ALPHA);
}
__global__ void const_kernel(int N, float ALPHA, float *X, int INCX) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = ALPHA;
}
__global__ void constrain_kernel(int N, float ALPHA, float *X, int INCX) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = fminf(ALPHA, fmaxf(-ALPHA, X[i*INCX]));
}
__global__ void supp_kernel(int N, float ALPHA, float *X, int INCX) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) {
if((X[i*INCX] * X[i*INCX]) < (ALPHA * ALPHA)) X[i*INCX] = 0;
}
}
__global__ void add_kernel(int N, float ALPHA, float *X, int INCX) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] += ALPHA;
}
__global__ void scal_kernel(int N, float ALPHA, float *X, int INCX) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] *= ALPHA;
}
__global__ void fill_kernel(int N, float ALPHA, float *X, int INCX) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = ALPHA;
}
__global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX];
}
__global__ void mul_kernel(int N, float *X, int INCX, float *Y, int INCY) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY] *= X[i*INCX];
}
extern "C" void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial) {
size_t N = batch*filters*spatial;
normalize_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, mean, variance, batch, filters, spatial);
check_error(cudaPeekAtLastError());
}
__global__ void l2norm_kernel(int N, float *x, float *dx, int batch, int filters, int spatial) {
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int b = index / spatial;
int i = index % spatial;
int f;
float sum = 0;
for(f = 0; f < filters; ++f){
int index = b*filters*spatial + f*spatial + i;
sum += powf(x[index], 2);
}
sum = sqrtf(sum);
if(sum == 0) sum = 1;
//printf("%f\n", sum);
for(f = 0; f < filters; ++f){
int index = b*filters*spatial + f*spatial + i;
x[index] /= sum;
dx[index] = (1 - x[index]) / sum;
}
}
extern "C" void l2normalize_gpu(float *x, float *dx, int batch, int filters, int spatial) {
size_t N = batch*spatial;
l2norm_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, dx, batch, filters, spatial);
check_error(cudaPeekAtLastError());
}
__global__ void fast_mean_kernel(float *x, int batch, int filters, int spatial, float *mean) {
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j=0; j<batch; ++j) {
for(i=0; i<spatial; i+=threads) {
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? x[index] : 0;
}
}
__syncthreads();
if(id == 0) {
mean[filter] = 0;
for(i=0; i<threads; ++i) {
mean[filter] += local[i];
}
mean[filter] /= spatial * batch;
}
}
__global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) {
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j=0; j<batch; ++j) {
for(i=0; i<spatial; i+=threads) {
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? powf((x[index] - mean[filter]), 2) : 0;
}
}
__syncthreads();
if(id==0) {
variance[filter] = 0;
for(i=0; i<threads; ++i) {
variance[filter] += local[i];
}
variance[filter] /= (spatial * batch - 1);
}
}
extern "C" void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean) {
fast_mean_kernel<<<filters, BLOCK>>>(x, batch, filters, spatial, mean);
check_error(cudaPeekAtLastError());
}
extern "C" void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) {
fast_variance_kernel<<<filters, BLOCK>>>(x, mean, batch, filters, spatial, variance);
check_error(cudaPeekAtLastError());
}
extern "C" void mean_gpu(float *x, int batch, int filters, int spatial, float *mean) {
mean_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, batch, filters, spatial, mean);
check_error(cudaPeekAtLastError());
}
extern "C" void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) {
variance_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, mean, batch, filters, spatial, variance);
check_error(cudaPeekAtLastError());
}
extern "C" void axpy_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) {
axpy_gpu_offset(N, ALPHA, X, 0, INCX, Y, 0, INCY);
}
extern "C" void pow_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) {
pow_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX, Y, INCY);
check_error(cudaPeekAtLastError());
}
extern "C" void axpy_gpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) {
axpy_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY);
check_error(cudaPeekAtLastError());
}
extern "C" void copy_gpu(int N, float * X, int INCX, float * Y, int INCY) {
copy_gpu_offset(N, X, 0, INCX, Y, 0, INCY);
}
extern "C" void mul_gpu(int N, float * X, int INCX, float * Y, int INCY) {
mul_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, INCX, Y, INCY);
check_error(cudaPeekAtLastError());
}
extern "C" void copy_gpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) {
copy_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, OFFX, INCX, Y, OFFY, INCY);
check_error(cudaPeekAtLastError());
}
__global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i >= N) return;
int in_s = i%spatial;
i = i/spatial;
int in_c = i%layers;
i = i/layers;
int b = i;
int i1 = b*layers*spatial + in_c*spatial + in_s;
int i2 = b*layers*spatial + in_s*layers + in_c;
if (forward) out[i2] = x[i1];
else out[i1] = x[i2];
}
extern "C" void flatten_gpu(float *x, int spatial, int layers, int batch, int forward, float *out) {
int size = spatial*batch*layers;
flatten_kernel<<<cuda_gridsize(size), BLOCK>>>(size, x, spatial, layers, batch, forward, out);
check_error(cudaPeekAtLastError());
}
extern "C" void reorg_gpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out) {
int size = w*h*c*batch;
reorg_kernel<<<cuda_gridsize(size), BLOCK>>>(size, x, w, h, c, batch, stride, forward, out);
check_error(cudaPeekAtLastError());
}
__global__ void mask_kernel(int n, float *x, float mask_num, float *mask, float val) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n && mask[i] == mask_num) x[i] = val;
}
extern "C" void mask_gpu(int N, float * X, float mask_num, float * mask, float val) {
mask_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, mask_num, mask, val);
check_error(cudaPeekAtLastError());
}
__global__ void scale_mask_kernel(int n, float *x, float mask_num, float *mask, float scale) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n && mask[i] == mask_num) x[i] *= scale;
}
extern "C" void scale_mask_gpu(int N, float * X, float mask_num, float * mask, float scale) {
scale_mask_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, mask_num, mask, scale);
check_error(cudaPeekAtLastError());
}
extern "C" void const_gpu(int N, float ALPHA, float * X, int INCX) {
const_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
check_error(cudaPeekAtLastError());
}
extern "C" void constrain_gpu(int N, float ALPHA, float * X, int INCX) {
constrain_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
check_error(cudaPeekAtLastError());
}
extern "C" void add_gpu(int N, float ALPHA, float * X, int INCX) {
add_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
check_error(cudaPeekAtLastError());
}
extern "C" void scal_gpu(int N, float ALPHA, float * X, int INCX) {
scal_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
check_error(cudaPeekAtLastError());
}
extern "C" void supp_gpu(int N, float ALPHA, float * X, int INCX) {
supp_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
check_error(cudaPeekAtLastError());
}
extern "C" void fill_gpu(int N, float ALPHA, float * X, int INCX) {
fill_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
check_error(cudaPeekAtLastError());
}
__global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1,
int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out) {
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
int i = id % minw;
id /= minw;
int j = id % minh;
id /= minh;
int k = id % minc;
id /= minc;
int b = id % batch;
int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
out[out_index] = s1*out[out_index] + s2*add[add_index];
//out[out_index] += add[add_index];
}
extern "C" void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out) {
int minw = (w1 < w2) ? w1 : w2;
int minh = (h1 < h2) ? h1 : h2;
int minc = (c1 < c2) ? c1 : c2;
int stride = w1/w2;
int sample = w2/w1;
assert(stride == h1/h2);
assert(sample == h2/h1);
if(stride < 1) stride = 1;
if(sample < 1) sample = 1;
int size = batch * minw * minh * minc;
shortcut_kernel<<<cuda_gridsize(size), BLOCK>>>(size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, s1, s2, out);
check_error(cudaPeekAtLastError());
}
__global__ void smooth_l1_kernel(int n, float *pred, float *truth, float *delta, float *error) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
float diff = truth[i] - pred[i];
float abs_val = fabsf(diff);
if(abs_val < 1) {
error[i] = diff * diff;
delta[i] = diff;
} else {
error[i] = 2*abs_val - 1;
delta[i] = (diff > 0) ? 1 : -1;
}
}
}
extern "C" void smooth_l1_gpu(int n, float *pred, float *truth, float *delta, float *error) {
smooth_l1_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error);
check_error(cudaPeekAtLastError());
}
__global__ void softmax_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) {
float t = truth[i];
float p = pred[i];
error[i] = (t) ? -log(p) : 0;
delta[i] = t-p;
}
}
extern "C" void softmax_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error) {
softmax_x_ent_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error);
check_error(cudaPeekAtLastError());
}
__global__ void logistic_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) {
float t = truth[i];
float p = pred[i];
error[i] = -t*log(p+.0000001) - (1-t)*log(1-p+.0000001);
delta[i] = t-p;
}
}
extern "C" void logistic_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error) {
logistic_x_ent_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error);
check_error(cudaPeekAtLastError());
}
__global__ void l2_kernel(int n, float *pred, float *truth, float *delta, float *error) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) {
float diff = truth[i] - pred[i];
error[i] = diff * diff; //I know this is technically wrong, deal with it.
delta[i] = diff;
}
}
extern "C" void l2_gpu(int n, float *pred, float *truth, float *delta, float *error) {
l2_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error);
check_error(cudaPeekAtLastError());
}
__global__ void l1_kernel(int n, float *pred, float *truth, float *delta, float *error) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) {
float diff = truth[i] - pred[i];
error[i] = abs(diff);
delta[i] = (diff > 0) ? 1 : -1;
}
}
extern "C" void l1_gpu(int n, float *pred, float *truth, float *delta, float *error) {
l1_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error);
check_error(cudaPeekAtLastError());
}
__global__ void wgan_kernel(int n, float *pred, float *truth, float *delta, float *error) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) {
error[i] = truth[i] ? -pred[i] : pred[i];
delta[i] = (truth[i] > 0) ? 1 : -1;
}
}
extern "C" void wgan_gpu(int n, float *pred, float *truth, float *delta, float *error) {
wgan_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error);
check_error(cudaPeekAtLastError());
}
__global__ void weighted_sum_kernel(int n, float *a, float *b, float *s, float *c) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) {
c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0);
}
}
__global__ void deinter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < (NX+NY)*B) {
int b = i / (NX+NY);
int j = i % (NX+NY);
if (j < NX){
if(X) X[b*NX + j] += OUT[i];
} else {
if(Y) Y[b*NY + j - NX] += OUT[i];
}
}
}
extern "C" void deinter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT) {
deinter_kernel<<<cuda_gridsize((NX+NY)*B), BLOCK>>>(NX, X, NY, Y, B, OUT);
check_error(cudaPeekAtLastError());
}
__global__ void inter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < (NX+NY)*B) {
int b = i / (NX+NY);
int j = i % (NX+NY);
if (j < NX){
OUT[i] = X[b*NX + j];
} else {
OUT[i] = Y[b*NY + j - NX];
}
}
}
extern "C" void inter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT) {
inter_kernel<<<cuda_gridsize((NX+NY)*B), BLOCK>>>(NX, X, NY, Y, B, OUT);
check_error(cudaPeekAtLastError());
}
extern "C" void weighted_sum_gpu(float *a, float *b, float *s, int num, float *c) {
weighted_sum_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, s, c);
check_error(cudaPeekAtLastError());
}
__global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) {
if(da) da[i] += dc[i] * s[i];
if(db) db[i] += dc[i] * (1-s[i]);
ds[i] += dc[i] * (a[i] - b[i]);
}
}
extern "C" void weighted_delta_gpu(float *a, float *b, float *s, float *da, float *db, float *ds, int num, float *dc) {
weighted_delta_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, s, da, db, ds, dc);
check_error(cudaPeekAtLastError());
}
__global__ void mult_add_into_kernel(int n, float *a, float *b, float *c) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) {
c[i] += a[i]*b[i];
}
}
extern "C" void mult_add_into_gpu(int num, float *a, float *b, float *c) {
mult_add_into_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, c);
check_error(cudaPeekAtLastError());
}
__device__ void softmax_device(float *input, int n, float temp, int stride, float *output) {
int i;
float sum = 0;
float largest = -INFINITY;
for(i=0; i<n; ++i) {
int val = input[i*stride];
largest = (val>largest) ? val : largest;
}
for(i=0; i<n; ++i) {
float e = expf(input[i*stride]/temp - largest/temp);
sum += e;
output[i*stride] = e;
}
for(i=0; i<n; ++i) {
output[i*stride] /= sum;
}
}
__global__ void softmax_tree_kernel(float *input, int spatial, int batch, int stride, float temp, float *output, int groups, int *group_size, int *group_offset) {
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= spatial*batch*groups) return;
int s = id % spatial;
id = id / spatial;
int g = id % groups;
int b = id / groups;
int goff = group_offset[g]*spatial;
int boff = b*stride;
softmax_device(input + goff + boff + s, group_size[g], temp, spatial, output + goff + boff + s);
}
extern "C" void softmax_tree(float *input, int spatial, int batch, int stride, float temp, float *output, tree hier) {
int *tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups);
int *tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups);
/*
static int *tree_groups_size = 0;
static int *tree_groups_offset = 0;
if(!tree_groups_size){
tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups);
tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups);
}
*/
int num = spatial*batch*hier.groups;
softmax_tree_kernel<<<cuda_gridsize(num), BLOCK>>>(input, spatial, batch, stride, temp, output, hier.groups, tree_groups_size, tree_groups_offset);
check_error(cudaPeekAtLastError());
cuda_free((float *)tree_groups_size);
cuda_free((float *)tree_groups_offset);
}
__global__ void softmax_kernel(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) {
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= batch*groups) return;
int b = id / groups;
int g = id % groups;
softmax_device(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset);
}
extern "C" void softmax_gpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) {
softmax_kernel<<<cuda_gridsize(batch*groups), BLOCK>>>(input, n, batch, batch_offset, groups, group_offset, stride, temp, output);
check_error(cudaPeekAtLastError());
}
__global__ void upsample_kernel(size_t N, float *x, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) {
size_t i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i >= N) return;
int out_index = i;
int out_w = i%(w*stride);
i = i/(w*stride);
int out_h = i%(h*stride);
i = i/(h*stride);
int out_c = i%c;
i = i/c;
int b = i%batch;
int in_w = out_w / stride;
int in_h = out_h / stride;
int in_c = out_c;
int in_index = b*w*h*c + in_c*w*h + in_h*w + in_w;
if(forward) out[out_index] += scale * x[in_index];
else atomicAdd(x+in_index, scale * out[out_index]);
}
extern "C" void upsample_gpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) {
size_t size = w*h*c*batch*stride*stride;
upsample_kernel<<<cuda_gridsize(size), BLOCK>>>(size, in, w, h, c, batch, stride, forward, scale, out);
check_error(cudaPeekAtLastError());
}
|
816ad6ac91608c90c57333585fc62c7218cfcf49.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "../Extra/helper_functions.h" // includes cuda.h and hip/hip_runtime_api.h
// CUDA helper functions
#include "../Extra/helper_cuda.h" // helper functions for CUDA error check
#include<iostream>
#include "../include/StableFluid/StableFluidKernels.h"
float t = 0.0f;
surface<void, cudaSurfaceType2D> surfRef;
__global__ void TextureUpdate(int width, int height,float time)
{
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
float4 colour = { 0.0f,1.0f,1.0f,1.0f };
float xx =(float) x / (float)width;
float yy = (float)y / (float)height;
colour.x = 0.5f + 0.5f * sinf( xx + 0 + time);
colour.y = 0.5f + 0.5f * sinf( yy + 2 + time);
colour.z = 0.5f + 0.5f * sinf( xx + 4 + time);
surf2Dwrite(colour, surfRef, x * sizeof(float4), y);
}
void WashColor(hipArray_t array, int width, int height, float delT)
{
t += delT;
checkCudaErrors(hipBindSurfaceToArray(surfRef, array));
dim3 block(16, 16);
dim3 grid(width/block.x, height/block.y);
hipLaunchKernelGGL(( TextureUpdate) , dim3(grid),dim3(block) , 0, 0, width,height,t);
}
| 816ad6ac91608c90c57333585fc62c7218cfcf49.cu | #include <cuda_runtime.h>
#include <cuda.h>
#include "../Extra/helper_functions.h" // includes cuda.h and cuda_runtime_api.h
// CUDA helper functions
#include "../Extra/helper_cuda.h" // helper functions for CUDA error check
#include<iostream>
#include "../include/StableFluid/StableFluidKernels.h"
float t = 0.0f;
surface<void, cudaSurfaceType2D> surfRef;
__global__ void TextureUpdate(int width, int height,float time)
{
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
float4 colour = { 0.0f,1.0f,1.0f,1.0f };
float xx =(float) x / (float)width;
float yy = (float)y / (float)height;
colour.x = 0.5f + 0.5f * sinf( xx + 0 + time);
colour.y = 0.5f + 0.5f * sinf( yy + 2 + time);
colour.z = 0.5f + 0.5f * sinf( xx + 4 + time);
surf2Dwrite(colour, surfRef, x * sizeof(float4), y);
}
void WashColor(cudaArray_t array, int width, int height, float delT)
{
t += delT;
checkCudaErrors(cudaBindSurfaceToArray(surfRef, array));
dim3 block(16, 16);
dim3 grid(width/block.x, height/block.y);
TextureUpdate <<< grid,block >>>(width,height,t);
}
|
05e9134fc8bc2a8bebc731aad2df5562c309f960.hip | // !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
int recursiveReduce(int *data, int const size)
{
if (size == 1) return data[0];
int const stride = size / 2;
for (int i = 0; i < stride; i++)
{
data[i] += data[i + stride];
}
return recursiveReduce(data, stride);
}
__global__ void reduceDivergence (int *g_idata, int *g_odata, unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
int *idata = g_idata + blockIdx.x * blockDim.x;
if (idx >= n) return;
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
if ((tid % (2 * stride)) == 0)
{
idata[tid] += idata[tid + stride];
}
__syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceDivergenceLess (int *g_idata, int *g_odata,
unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
int *idata = g_idata + blockIdx.x * blockDim.x;
if(idx >= n) return;
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
int index = 2 * stride * tid;
if (index < blockDim.x)
{
idata[index] += idata[index + stride];
}
__syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
int main(int argc, char **argv)
{
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
bool bResult = false;
int size = 1 << 28;
printf(" with array size %d ", size);
int blocksize = atoi(argv[1]);
if(argc > 1)
{
blocksize = atoi(argv[1]);
}
dim3 block (blocksize, 1);
dim3 grid ((size + block.x - 1) / block.x, 1);
printf("grid %d block %d\n", grid.x, block.x);
size_t bytes = size * sizeof(int);
int *h_idata = (int *) malloc(bytes);
int *h_odata = (int *) malloc(grid.x * sizeof(int));
int *tmp = (int *) malloc(bytes);
for (int i = 0; i < size; i++)
{
h_idata[i] = (int)( rand() & 0xFF );
}
memcpy (tmp, h_idata, bytes);
double iStart, iElaps;
int gpu_sum = 0;
int *d_idata = NULL;
int *d_odata = NULL;
CHECK(hipMalloc((void **) &d_idata, bytes));
CHECK(hipMalloc((void **) &d_odata, grid.x * sizeof(int)));
iStart = seconds();
int cpu_sum = recursiveReduce (tmp, size);
iElaps = seconds() - iStart;
printf("cpu reduce elapsed %f sec cpu_sum: %d\n", iElaps, cpu_sum);
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
hipLaunchKernelGGL(( reduceDivergence), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("gpu Divergence elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x, block.x);
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
hipLaunchKernelGGL(( reduceDivergenceLess), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("gpu Divergence2 elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x, block.x);
free(h_idata);
free(h_odata);
CHECK(hipFree(d_idata));
CHECK(hipFree(d_odata));
CHECK(hipDeviceReset());
bResult = (gpu_sum == cpu_sum);
if(!bResult) printf("Test failed!\n");
return EXIT_SUCCESS;
}
| 05e9134fc8bc2a8bebc731aad2df5562c309f960.cu | #include "../common/common.h"
#include <cuda_runtime.h>
#include <stdio.h>
int recursiveReduce(int *data, int const size)
{
if (size == 1) return data[0];
int const stride = size / 2;
for (int i = 0; i < stride; i++)
{
data[i] += data[i + stride];
}
return recursiveReduce(data, stride);
}
__global__ void reduceDivergence (int *g_idata, int *g_odata, unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
int *idata = g_idata + blockIdx.x * blockDim.x;
if (idx >= n) return;
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
if ((tid % (2 * stride)) == 0)
{
idata[tid] += idata[tid + stride];
}
__syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceDivergenceLess (int *g_idata, int *g_odata,
unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
int *idata = g_idata + blockIdx.x * blockDim.x;
if(idx >= n) return;
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
int index = 2 * stride * tid;
if (index < blockDim.x)
{
idata[index] += idata[index + stride];
}
__syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
int main(int argc, char **argv)
{
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
bool bResult = false;
int size = 1 << 28;
printf(" with array size %d ", size);
int blocksize = atoi(argv[1]);
if(argc > 1)
{
blocksize = atoi(argv[1]);
}
dim3 block (blocksize, 1);
dim3 grid ((size + block.x - 1) / block.x, 1);
printf("grid %d block %d\n", grid.x, block.x);
size_t bytes = size * sizeof(int);
int *h_idata = (int *) malloc(bytes);
int *h_odata = (int *) malloc(grid.x * sizeof(int));
int *tmp = (int *) malloc(bytes);
for (int i = 0; i < size; i++)
{
h_idata[i] = (int)( rand() & 0xFF );
}
memcpy (tmp, h_idata, bytes);
double iStart, iElaps;
int gpu_sum = 0;
int *d_idata = NULL;
int *d_odata = NULL;
CHECK(cudaMalloc((void **) &d_idata, bytes));
CHECK(cudaMalloc((void **) &d_odata, grid.x * sizeof(int)));
iStart = seconds();
int cpu_sum = recursiveReduce (tmp, size);
iElaps = seconds() - iStart;
printf("cpu reduce elapsed %f sec cpu_sum: %d\n", iElaps, cpu_sum);
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceDivergence<<<grid, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("gpu Divergence elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x, block.x);
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceDivergenceLess<<<grid, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("gpu Divergence2 elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x, block.x);
free(h_idata);
free(h_odata);
CHECK(cudaFree(d_idata));
CHECK(cudaFree(d_odata));
CHECK(cudaDeviceReset());
bResult = (gpu_sum == cpu_sum);
if(!bResult) printf("Test failed!\n");
return EXIT_SUCCESS;
}
|
a2692d83e7db6d12e75377c9aea5bf86a1c69fd2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <numeric>
#include <stdlib.h>
static void CheckCudaErrorAux (const char *, unsigned, const char *, hipError_t);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
/**
* CUDA kernel that computes reciprocal values for a given vector
*/
__global__ void reciprocalKernel(float *data, unsigned vectorSize) {
unsigned idx = blockIdx.x*blockDim.x+threadIdx.x;
if (idx < vectorSize)
data[idx] = 1.0/data[idx];
}
/**
* Host function that copies the data and launches the work on GPU
*/
float *gpuReciprocal(float *data, unsigned size)
{
float *rc = new float[size];
float *gpuData;
CUDA_CHECK_RETURN(hipMalloc((void **)&gpuData, sizeof(float)*size));
CUDA_CHECK_RETURN(hipMemcpy(gpuData, data, sizeof(float)*size, hipMemcpyHostToDevice));
static const int BLOCK_SIZE = 256;
const int blockCount = (size+BLOCK_SIZE-1)/BLOCK_SIZE;
hipLaunchKernelGGL(( reciprocalKernel), dim3(blockCount), dim3(BLOCK_SIZE), 0, 0, gpuData, size);
CUDA_CHECK_RETURN(hipMemcpy(rc, gpuData, sizeof(float)*size, hipMemcpyDeviceToHost));
CUDA_CHECK_RETURN(hipFree(gpuData));
return rc;
}
float *cpuReciprocal(float *data, unsigned size)
{
float *rc = new float[size];
for (unsigned cnt = 0; cnt < size; ++cnt) rc[cnt] = 1.0/data[cnt];
return rc;
}
void initialize(float *data, unsigned size)
{
for (unsigned i = 0; i < size; ++i)
data[i] = .5*(i+1);
}
int main(void)
{
static const int WORK_SIZE = 65530;
float *data = new float[WORK_SIZE];
initialize (data, WORK_SIZE);
float *recCpu = cpuReciprocal(data, WORK_SIZE);
float *recGpu = gpuReciprocal(data, WORK_SIZE);
float cpuSum = std::accumulate (recCpu, recCpu+WORK_SIZE, 0.0);
float gpuSum = std::accumulate (recGpu, recGpu+WORK_SIZE, 0.0);
/* Verify the results */
std::cout<<"gpuSum = "<<gpuSum<< " cpuSum = " <<cpuSum<<std::endl;
/* Free memory */
delete[] data;
delete[] recCpu;
delete[] recGpu;
return 0;
}
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, hipError_t err)
{
if (err == hipSuccess)
return;
std::cerr << statement<<" returned " << hipGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl;
exit (1);
}
| a2692d83e7db6d12e75377c9aea5bf86a1c69fd2.cu | #include <iostream>
#include <numeric>
#include <stdlib.h>
static void CheckCudaErrorAux (const char *, unsigned, const char *, cudaError_t);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
/**
* CUDA kernel that computes reciprocal values for a given vector
*/
__global__ void reciprocalKernel(float *data, unsigned vectorSize) {
unsigned idx = blockIdx.x*blockDim.x+threadIdx.x;
if (idx < vectorSize)
data[idx] = 1.0/data[idx];
}
/**
* Host function that copies the data and launches the work on GPU
*/
float *gpuReciprocal(float *data, unsigned size)
{
float *rc = new float[size];
float *gpuData;
CUDA_CHECK_RETURN(cudaMalloc((void **)&gpuData, sizeof(float)*size));
CUDA_CHECK_RETURN(cudaMemcpy(gpuData, data, sizeof(float)*size, cudaMemcpyHostToDevice));
static const int BLOCK_SIZE = 256;
const int blockCount = (size+BLOCK_SIZE-1)/BLOCK_SIZE;
reciprocalKernel<<<blockCount, BLOCK_SIZE>>> (gpuData, size);
CUDA_CHECK_RETURN(cudaMemcpy(rc, gpuData, sizeof(float)*size, cudaMemcpyDeviceToHost));
CUDA_CHECK_RETURN(cudaFree(gpuData));
return rc;
}
float *cpuReciprocal(float *data, unsigned size)
{
float *rc = new float[size];
for (unsigned cnt = 0; cnt < size; ++cnt) rc[cnt] = 1.0/data[cnt];
return rc;
}
void initialize(float *data, unsigned size)
{
for (unsigned i = 0; i < size; ++i)
data[i] = .5*(i+1);
}
int main(void)
{
static const int WORK_SIZE = 65530;
float *data = new float[WORK_SIZE];
initialize (data, WORK_SIZE);
float *recCpu = cpuReciprocal(data, WORK_SIZE);
float *recGpu = gpuReciprocal(data, WORK_SIZE);
float cpuSum = std::accumulate (recCpu, recCpu+WORK_SIZE, 0.0);
float gpuSum = std::accumulate (recGpu, recGpu+WORK_SIZE, 0.0);
/* Verify the results */
std::cout<<"gpuSum = "<<gpuSum<< " cpuSum = " <<cpuSum<<std::endl;
/* Free memory */
delete[] data;
delete[] recCpu;
delete[] recGpu;
return 0;
}
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err)
{
if (err == cudaSuccess)
return;
std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl;
exit (1);
}
|
9e453f8ece639b3fcf78fd1307d3247acb564930.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<string>
#include<vector>
#include "dirent.h"
#include<cstring>
#include<unordered_set>
#include<fstream>
#include<stdlib.h>
#include<regex>
#include <unordered_map>
#include <unordered_set>
#include"porter2_stemmer.h"
#include <utility>
#include <omp.h>
#include<chrono>
#include <iomanip>
#include"myheader.h"
std::vector<std::vector<std::string>> Tokenizer(const std::string& doc_name);
void Fill_dataset(std::vector<std::string>& DATASET)
{
struct dirent *de; // Pointer for directory entry
// opendir() returns a pointer of DIR type.
DIR *dr = opendir("../news_docs");
if (dr == NULL) // opendir returns NULL if couldn't open directory
{
printf("Could not open news_docs directory" );
exit(0);
}
// Refer http://pubs.opengroup.org/onlinepubs/7990989775/xsh/readdir.html
// for readdir()
while ((de = readdir(dr)) != NULL)
{
if(strcmp(de->d_name,".") == 0 || strcmp(de->d_name,"..") == 0 )
continue;
DATASET.push_back(std::string("../news_docs/") + std::string(de->d_name));
}
closedir(dr);
}
void Fill_StopWordSet(std::unordered_set<std::string>& STOP_WORD_SET)
{
std::ifstream fin;
fin.open("../STOP_WORDS_LIST.txt");
std::string line;
while(std::getline(fin,line)) //read the file line by line...
{
if(line == "" || line[0] == '#')
continue;
STOP_WORD_SET.insert(line);
}
fin.close();
}
std::vector<zone> create_zonal_structures(const std::vector<std::string>& DATASET,const std::unordered_set<std::string>& STOP_WORD_SET,bool & clustering)
{
std::ofstream myfile;
myfile.open("RESULTS/Doc-names-index-MAP.txt");
myfile << " Doc names-index mapping " << std::endl;
myfile << " Total number of docs- " << DATASET.size() << std::endl << std::endl;
for(int i=0;i<DATASET.size();i++)
myfile << DATASET[i] << " indexed as " << i << std::endl;
myfile.close();
auto start = std::chrono::high_resolution_clock::now();
zone title_zone{"TITLE",true,true};
zone publication_zone{"PUBLICATION",false,false};
zone author_zone{"AUTHOR",false,false};
zone content_zone{"CONTENT",true,true};
std::vector<zone> zonal_structures;
for(int i = 0; i< DATASET.size();i++)
{
std::vector<std::vector<std::string>> tokens_all = Tokenizer(DATASET[i]);
title_zone.fill_mat(tokens_all[0],i,STOP_WORD_SET);
publication_zone.fill_mat(tokens_all[1],i,STOP_WORD_SET);
author_zone.fill_mat(tokens_all[2],i,STOP_WORD_SET);
content_zone.fill_mat(tokens_all[3],i,STOP_WORD_SET);
}
auto stop = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::seconds>(stop - start);
std::cout << " Time taken to parse all docs and create zonal structures is:" << (double)duration.count()/(60);
std::cout << " minutes " << std::endl;
std::cout << "\n All docs parsed \n" << std::endl;
std::cout << "Total number of docs in dataset: " << DATASET.size() << std::endl;
std::cout << "Vocabulary size for zone- TITLE : " << title_zone.vocabulary.size() << std::endl;
std::cout << "Vocabulary size for zone- PUBLICATION : " << publication_zone.vocabulary.size() << std::endl;
std::cout << "Vocabulary size for zone- AUTHOR : " << author_zone.vocabulary.size() << std::endl;
std::cout << "Vocabulary size for zone- CONTENT : " << content_zone.vocabulary.size() << std::endl;
title_zone.Print_zonal_structure();
publication_zone.Print_zonal_structure();
author_zone.Print_zonal_structure();
content_zone.Print_zonal_structure();
title_zone.Basic_Initialization();
publication_zone.Basic_Initialization();
author_zone.Basic_Initialization();
content_zone.Basic_Initialization();
int cluster_yes;
std::cout << "Do you want to go for clustering:??? If yes, enter 1: ";
std::cin >> cluster_yes;
if(cluster_yes == 1)
{
clustering = true;
title_zone.Clustering_docs(DATASET);
publication_zone.Clustering_docs(DATASET);
author_zone.Clustering_docs(DATASET);
}
else
{
clustering = false;
}
zonal_structures.push_back(title_zone); //pointers copy!!! Careful...
zonal_structures.push_back(publication_zone);
zonal_structures.push_back(author_zone);
zonal_structures.push_back(content_zone);
return zonal_structures;
}
__global__ void Sum_Scores_kernel(float* result, float* arr0, float* arr1,
float* arr2, float* arr3, int len)
{
int gid = threadIdx.x + blockIdx.x*blockDim.x;
if(gid < len)
{
result[gid] = arr0[gid] + arr1[gid] + arr2[gid] + arr3[gid];
}
}
void Retrieve(const std::vector<std::string>& DATASET, std::vector<zone>& zonal_structures, const std::vector<std::string>& query,int k, bool cluster_yes,const std::string & fname )
{
int cluster_based_retrieval = 0;
if(cluster_yes == true){
std::cout << "\nDo you want to go for cluster based retrieval ?? If yes, enter 1: ";
std::cin >> cluster_based_retrieval;
}
std::ofstream myfile;
myfile.open(fname);
std::vector<std::pair<float,int>> total(DATASET.size());
if(cluster_based_retrieval == 1)
myfile << " Cluster based retrieval \n" << std::endl;
else
myfile << "Exact top k Retrieval\n" << std::endl;
auto start = std::chrono::high_resolution_clock::now();
std::vector<float*> zone_scores_gpu_array_address(zonal_structures.size());
for(int i = 0; i< zonal_structures.size();i++)
{
std::string query_for_zone = query[i];
if(cluster_based_retrieval == 1 && zonal_structures[i].zone_name != std::string("CONTENT"))
{
zone_scores_gpu_array_address[i] = zonal_structures[i].query_handler_cluster_based(query_for_zone);
}
else
{
zone_scores_gpu_array_address[i] = zonal_structures[i].query_handler_exact(query_for_zone);
}
myfile << " zone : " << zonal_structures[i].zone_name << ", free text query is: " << query_for_zone << std::endl;
}
float* result;
hipMalloc((void**)&result, sizeof(float)*DATASET.size());
dim3 block(THREADS_PER_BLOCK);
dim3 grid(ceil((double)DATASET.size()/(double)THREADS_PER_BLOCK));
hipLaunchKernelGGL(( Sum_Scores_kernel), dim3(grid), dim3(block) , 0, 0, result, zone_scores_gpu_array_address[0], zone_scores_gpu_array_address[1],
zone_scores_gpu_array_address[2], zone_scores_gpu_array_address[3] , DATASET.size());
std::vector<float> temp(DATASET.size(),0);
hipMemcpy(&temp[0], result, sizeof(float)*DATASET.size(), hipMemcpyDeviceToHost);
for(int k = 0; k < DATASET.size();k++)
{
total[k].first = temp[k]; //Note: Equally weighting all zones....
total[k].second = k;
}
std::sort(total.rbegin(),total.rend());
auto stop = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(stop - start);
for(int i=0; i< zonal_structures.size();i++)
hipFree(zone_scores_gpu_array_address[i]);
hipFree(result);
myfile << "Top " << k << " docs are :" << std::endl;
for(int i = 0 ; i < k ; i++)
{
std::cout << "doc name: " << DATASET[total[i].second] << " score: " << total[i].first << " out of 4.00 " << std::endl;
myfile << "doc name: " << DATASET[total[i].second] << " score: " << total[i].first << " out of 4.00 " << std::endl;
}
std::cout << " The retrieval time is :" << (double)duration.count();
std::cout << " milliseconds " << std::endl;
myfile << " The retrieval time is :" << (double)duration.count();
myfile << " milliseconds " << std::endl;
myfile.close();
}
| 9e453f8ece639b3fcf78fd1307d3247acb564930.cu | #include<iostream>
#include<string>
#include<vector>
#include "dirent.h"
#include<cstring>
#include<unordered_set>
#include<fstream>
#include<stdlib.h>
#include<regex>
#include <unordered_map>
#include <unordered_set>
#include"porter2_stemmer.h"
#include <utility>
#include <omp.h>
#include<chrono>
#include <iomanip>
#include"myheader.h"
std::vector<std::vector<std::string>> Tokenizer(const std::string& doc_name);
void Fill_dataset(std::vector<std::string>& DATASET)
{
struct dirent *de; // Pointer for directory entry
// opendir() returns a pointer of DIR type.
DIR *dr = opendir("../news_docs");
if (dr == NULL) // opendir returns NULL if couldn't open directory
{
printf("Could not open news_docs directory" );
exit(0);
}
// Refer http://pubs.opengroup.org/onlinepubs/7990989775/xsh/readdir.html
// for readdir()
while ((de = readdir(dr)) != NULL)
{
if(strcmp(de->d_name,".") == 0 || strcmp(de->d_name,"..") == 0 )
continue;
DATASET.push_back(std::string("../news_docs/") + std::string(de->d_name));
}
closedir(dr);
}
void Fill_StopWordSet(std::unordered_set<std::string>& STOP_WORD_SET)
{
std::ifstream fin;
fin.open("../STOP_WORDS_LIST.txt");
std::string line;
while(std::getline(fin,line)) //read the file line by line...
{
if(line == "" || line[0] == '#')
continue;
STOP_WORD_SET.insert(line);
}
fin.close();
}
std::vector<zone> create_zonal_structures(const std::vector<std::string>& DATASET,const std::unordered_set<std::string>& STOP_WORD_SET,bool & clustering)
{
std::ofstream myfile;
myfile.open("RESULTS/Doc-names-index-MAP.txt");
myfile << " Doc names-index mapping " << std::endl;
myfile << " Total number of docs- " << DATASET.size() << std::endl << std::endl;
for(int i=0;i<DATASET.size();i++)
myfile << DATASET[i] << " indexed as " << i << std::endl;
myfile.close();
auto start = std::chrono::high_resolution_clock::now();
zone title_zone{"TITLE",true,true};
zone publication_zone{"PUBLICATION",false,false};
zone author_zone{"AUTHOR",false,false};
zone content_zone{"CONTENT",true,true};
std::vector<zone> zonal_structures;
for(int i = 0; i< DATASET.size();i++)
{
std::vector<std::vector<std::string>> tokens_all = Tokenizer(DATASET[i]);
title_zone.fill_mat(tokens_all[0],i,STOP_WORD_SET);
publication_zone.fill_mat(tokens_all[1],i,STOP_WORD_SET);
author_zone.fill_mat(tokens_all[2],i,STOP_WORD_SET);
content_zone.fill_mat(tokens_all[3],i,STOP_WORD_SET);
}
auto stop = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::seconds>(stop - start);
std::cout << " Time taken to parse all docs and create zonal structures is:" << (double)duration.count()/(60);
std::cout << " minutes " << std::endl;
std::cout << "\n All docs parsed \n" << std::endl;
std::cout << "Total number of docs in dataset: " << DATASET.size() << std::endl;
std::cout << "Vocabulary size for zone- TITLE : " << title_zone.vocabulary.size() << std::endl;
std::cout << "Vocabulary size for zone- PUBLICATION : " << publication_zone.vocabulary.size() << std::endl;
std::cout << "Vocabulary size for zone- AUTHOR : " << author_zone.vocabulary.size() << std::endl;
std::cout << "Vocabulary size for zone- CONTENT : " << content_zone.vocabulary.size() << std::endl;
title_zone.Print_zonal_structure();
publication_zone.Print_zonal_structure();
author_zone.Print_zonal_structure();
content_zone.Print_zonal_structure();
title_zone.Basic_Initialization();
publication_zone.Basic_Initialization();
author_zone.Basic_Initialization();
content_zone.Basic_Initialization();
int cluster_yes;
std::cout << "Do you want to go for clustering:??? If yes, enter 1: ";
std::cin >> cluster_yes;
if(cluster_yes == 1)
{
clustering = true;
title_zone.Clustering_docs(DATASET);
publication_zone.Clustering_docs(DATASET);
author_zone.Clustering_docs(DATASET);
}
else
{
clustering = false;
}
zonal_structures.push_back(title_zone); //pointers copy!!! Careful...
zonal_structures.push_back(publication_zone);
zonal_structures.push_back(author_zone);
zonal_structures.push_back(content_zone);
return zonal_structures;
}
__global__ void Sum_Scores_kernel(float* result, float* arr0, float* arr1,
float* arr2, float* arr3, int len)
{
int gid = threadIdx.x + blockIdx.x*blockDim.x;
if(gid < len)
{
result[gid] = arr0[gid] + arr1[gid] + arr2[gid] + arr3[gid];
}
}
void Retrieve(const std::vector<std::string>& DATASET, std::vector<zone>& zonal_structures, const std::vector<std::string>& query,int k, bool cluster_yes,const std::string & fname )
{
int cluster_based_retrieval = 0;
if(cluster_yes == true){
std::cout << "\nDo you want to go for cluster based retrieval ?? If yes, enter 1: ";
std::cin >> cluster_based_retrieval;
}
std::ofstream myfile;
myfile.open(fname);
std::vector<std::pair<float,int>> total(DATASET.size());
if(cluster_based_retrieval == 1)
myfile << " Cluster based retrieval \n" << std::endl;
else
myfile << "Exact top k Retrieval\n" << std::endl;
auto start = std::chrono::high_resolution_clock::now();
std::vector<float*> zone_scores_gpu_array_address(zonal_structures.size());
for(int i = 0; i< zonal_structures.size();i++)
{
std::string query_for_zone = query[i];
if(cluster_based_retrieval == 1 && zonal_structures[i].zone_name != std::string("CONTENT"))
{
zone_scores_gpu_array_address[i] = zonal_structures[i].query_handler_cluster_based(query_for_zone);
}
else
{
zone_scores_gpu_array_address[i] = zonal_structures[i].query_handler_exact(query_for_zone);
}
myfile << " zone : " << zonal_structures[i].zone_name << ", free text query is: " << query_for_zone << std::endl;
}
float* result;
cudaMalloc((void**)&result, sizeof(float)*DATASET.size());
dim3 block(THREADS_PER_BLOCK);
dim3 grid(ceil((double)DATASET.size()/(double)THREADS_PER_BLOCK));
Sum_Scores_kernel<<< grid, block >>>(result, zone_scores_gpu_array_address[0], zone_scores_gpu_array_address[1],
zone_scores_gpu_array_address[2], zone_scores_gpu_array_address[3] , DATASET.size());
std::vector<float> temp(DATASET.size(),0);
cudaMemcpy(&temp[0], result, sizeof(float)*DATASET.size(), cudaMemcpyDeviceToHost);
for(int k = 0; k < DATASET.size();k++)
{
total[k].first = temp[k]; //Note: Equally weighting all zones....
total[k].second = k;
}
std::sort(total.rbegin(),total.rend());
auto stop = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(stop - start);
for(int i=0; i< zonal_structures.size();i++)
cudaFree(zone_scores_gpu_array_address[i]);
cudaFree(result);
myfile << "Top " << k << " docs are :" << std::endl;
for(int i = 0 ; i < k ; i++)
{
std::cout << "doc name: " << DATASET[total[i].second] << " score: " << total[i].first << " out of 4.00 " << std::endl;
myfile << "doc name: " << DATASET[total[i].second] << " score: " << total[i].first << " out of 4.00 " << std::endl;
}
std::cout << " The retrieval time is :" << (double)duration.count();
std::cout << " milliseconds " << std::endl;
myfile << " The retrieval time is :" << (double)duration.count();
myfile << " milliseconds " << std::endl;
myfile.close();
}
|
f3b0f0751848af24a9596bd44d79fa43abfeaecc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES.
// All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* Copyright (c) Chris Choy ([email protected]).
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
// Use the torch for GPU memory management. Thrust resize gives segfulat during
// debugging -g #include <torch/extension.h>
#include "convolution_hip.cuh"
#include "../../utils.h"
#include <ATen/hip/HIPContext.h>
#include <THH/THHAtomics.cuh>
namespace kaolin {
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <typename Dtype, typename Itype, int BLOCK_SIZE>
__global__ void matmul(const Dtype *A, const int wA, const int hA,
const Dtype *B, const int wB, const int hB, Dtype *C,
const Itype *in_map, const Itype *out_map) {
// Use in_feat as A and kernel as B
// Block index
const int bx = blockIdx.x;
const int by = blockIdx.y;
// Thread index
const int tx = threadIdx.x;
const int ty = threadIdx.y;
// Coordinate. x is for rows, y is for columns.
const int x = BLOCK_SIZE * bx + tx;
const int y = BLOCK_SIZE * by + ty;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
Dtype Csub = 0;
const Itype in_row = y < hA ? in_map[y] : 0;
const Itype out_row = y < hA ? out_map[y] : 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int s = 0; s < wA; s += BLOCK_SIZE) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ Dtype As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ Dtype Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = ((s + tx) < wA && y < hA) ? A[wA * in_row + s + tx] : 0;
Bs[ty][tx] = ((s + ty) < hB && x < wB) ? B[wB * (s + ty) + x] : 0;
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
if (y < hA && x < wB)
atomicAdd(&C[wB * out_row + x], Csub);
// C[wB * out_row + x] += Csub;
}
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B^T, E = D^T * A
* wA is A's width and wB is B's width
*
* +---+
* |B^T|
* +-------+
* | | |
* | A | C |
* | | |
* | | |
* +------------------+
* | D^T | E |
* +----------+---+
*
*/
template <typename Dtype, typename Itype, int BLOCK_SIZE>
__global__ void matmul2(const Dtype *A, const int wA, const int hA,
const Dtype *B, const int wB, const int hB,
const Dtype *D, const int wD, const int hD, Dtype *C,
Dtype *E, const Itype *in_map, const Itype *out_map) {
// Use grad_out_feat as A, transposed kernel weight as B, and in_feat as D
// Block index
const int bx = blockIdx.x;
const int by = blockIdx.y;
// Thread index
const int tx = threadIdx.x;
const int ty = threadIdx.y;
// Coordinate. y is for rows, x is for columns.
const int x = BLOCK_SIZE * bx + tx;
const int y = BLOCK_SIZE * by + ty;
const Itype in_row = y < hA ? in_map[y] : 0;
const Itype out_row = y < hA ? out_map[y] : 0;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
Dtype Csub = 0;
Dtype Esub = 0;
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ Dtype As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ Dtype BTs[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Ds used to
// store the sub-matrix of D
__shared__ Dtype DTs[BLOCK_SIZE][BLOCK_SIZE];
// For Ds = D^T[...:..., ...:...], use the transposed grid dimension for A
DTs[ty][tx] = (x < wD && y < hD) ? D[wD * in_row + x] : 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int s = 0; s < wA; s += BLOCK_SIZE) {
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = ((s + tx) < wA && y < hA) ? A[wA * out_row + s + tx] : 0;
// Transposed kernel
BTs[ty][tx] = ((s + ty) < wB && x < hB) ? B[wB * x + s + ty] : 0;
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * BTs[k][tx];
}
// For Esub, reset to 0
Esub = 0;
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Esub += DTs[k][ty] * As[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
// For the E matrix which requires accmulation of multiple blocks, use
// atomic addition. This can be replaced with a more sophisticaed reduction
// algorithm.
if ((bx * BLOCK_SIZE + ty) < wD && (s + tx) < wA)
atomicAdd(&E[wA * (bx * BLOCK_SIZE + ty) + (s + tx)], Esub);
}
// Write the block sub-matrix to device memory;
// each thread writes one element
if (y < hA && x < hB)
atomicAdd(&C[hB * in_row + x], Csub);
}
namespace minkowski {
template <typename Dtype, typename Itype>
void ConvolutionForwardKernelGPU(const Dtype *d_in_feat, int in_nchannel,
Dtype *d_out_feat, int out_nchannel,
const Dtype *d_kernel,
const pInOutMaps<Itype> &in_maps,
const pInOutMaps<Itype> &out_maps,
int out_nrows, hipblasHandle_t cuhandle,
hipStream_t stream) {
AT_CUDA_CHECK(hipDeviceSynchronize());
int n_active_in_volume, shared_mem_size = -1;
// Define the shared memory size
if ((in_nchannel > 16 && out_nchannel > 16 &&
in_nchannel * out_nchannel >= 512) ||
(in_nchannel > 24 && out_nchannel > 24))
shared_mem_size = 32;
else if (in_nchannel % 24 == 0 && out_nchannel % 24 == 0)
shared_mem_size = 24;
else if ((in_nchannel > 8 && out_nchannel > 8) ||
(in_nchannel % 16 == 0 && out_nchannel % 16 == 0))
shared_mem_size = 16;
else
shared_mem_size = 8;
dim3 threads(shared_mem_size, shared_mem_size);
// Iterate through each spatial kernel and get indices for in_map and out_map
for (int k = 0; k < in_maps.size(); k++) {
n_active_in_volume = in_maps[k].size();
if (n_active_in_volume == 0)
continue;
int num_grid = (n_active_in_volume + shared_mem_size - 1) / shared_mem_size;
int num_div = (num_grid + MAX_GRID - 1) / MAX_GRID;
int step = (n_active_in_volume + num_div - 1) / num_div;
for (int s = 0; s < num_div; s++) {
int offset = step * s;
int remainder = n_active_in_volume - step * s;
int curr_num_active = remainder < step ? remainder : step;
dim3 grid((out_nchannel + threads.x - 1) / threads.x,
(curr_num_active + threads.y - 1) / threads.y);
switch (shared_mem_size) {
case 32:
matmul<Dtype, Itype, 32> << <grid, threads, 0, stream >> > (
d_in_feat, in_nchannel, curr_num_active,
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, d_out_feat, in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 24:
matmul<Dtype, Itype, 24> << <grid, threads, 0, stream >> > (
d_in_feat, in_nchannel, curr_num_active,
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, d_out_feat, in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 16:
matmul<Dtype, Itype, 16> << <grid, threads, 0, stream >> > (
d_in_feat, in_nchannel, curr_num_active,
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, d_out_feat, in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 8:
matmul<Dtype, Itype, 8> << <grid, threads, 0, stream >> > (
d_in_feat, in_nchannel, curr_num_active,
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, d_out_feat, in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
}
}
AT_CUDA_CHECK(hipGetLastError());
}
AT_CUDA_CHECK(hipDeviceSynchronize());
}
template void ConvolutionForwardKernelGPU<float, int32_t>(
const float *d_in_feat, int in_nchannel, float *d_out_feat,
int out_nchannel, const float *d_kernel, const pInOutMaps<int32_t> &in_map,
const pInOutMaps<int32_t> &out_map, int out_nrows, hipblasHandle_t cuhandle,
hipStream_t stream);
template void ConvolutionForwardKernelGPU<double, int32_t>(
const double *d_in_feat, int in_nchannel, double *d_out_feat,
int out_nchannel, const double *d_kernel, const pInOutMaps<int32_t> &in_map,
const pInOutMaps<int32_t> &out_map, int out_nrows, hipblasHandle_t cuhandle,
hipStream_t stream);
template <typename Dtype, typename Itype>
void ConvolutionBackwardKernelGPU(const Dtype *d_in_feat, Dtype *d_grad_in_feat,
int in_nchannel, const Dtype *d_grad_out_feat,
int out_nchannel, const Dtype *d_kernel,
Dtype *d_grad_kernel,
const pInOutMaps<Itype> &in_maps,
const pInOutMaps<Itype> &out_maps,
int out_nrows, hipblasHandle_t cuhandle,
hipStream_t stream) {
AT_CUDA_CHECK(hipDeviceSynchronize());
int n_active_in_volume, shared_mem_size = -1;
// Define the shared memory size
if ((in_nchannel > 16 && out_nchannel > 16 &&
in_nchannel * out_nchannel >= 512) ||
(in_nchannel % 32 == 0 && out_nchannel % 32 == 0))
shared_mem_size = 32;
else if (in_nchannel % 24 == 0 && out_nchannel % 24 == 0)
shared_mem_size = 24;
else if ((in_nchannel > 8 && out_nchannel > 8) ||
(in_nchannel % 16 == 0 && out_nchannel % 16 == 0))
shared_mem_size = 16;
else
shared_mem_size = 8;
dim3 threads(shared_mem_size, shared_mem_size);
for (int k = 0; k < in_maps.size(); k++) {
n_active_in_volume = in_maps[k].size();
if (n_active_in_volume == 0)
continue;
int num_grid = (n_active_in_volume + shared_mem_size - 1) / shared_mem_size;
int num_div = (num_grid + MAX_GRID - 1) / MAX_GRID;
int step = (n_active_in_volume + num_div - 1) / num_div;
for (int s = 0; s < num_div; s++) {
int offset = step * s;
int remainder = n_active_in_volume - step * s;
int curr_num_active = remainder < step ? remainder : step;
dim3 grid((in_nchannel + threads.x - 1) / threads.x,
(curr_num_active + threads.y - 1) / threads.y);
switch (shared_mem_size) {
case 32:
matmul2<Dtype, Itype, 32> << <grid, threads, 0, stream >> > (
d_grad_out_feat, out_nchannel, curr_num_active, // A
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, // B
d_in_feat, in_nchannel, curr_num_active, // D
d_grad_in_feat, // C
&d_grad_kernel[k * in_nchannel * out_nchannel], // E
in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 24:
matmul2<Dtype, Itype, 24> << <grid, threads, 0, stream >> > (
d_grad_out_feat, out_nchannel, curr_num_active, // A
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, // B
d_in_feat, in_nchannel, curr_num_active, // D
d_grad_in_feat, // C
&d_grad_kernel[k * in_nchannel * out_nchannel], // E
in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 16:
matmul2<Dtype, Itype, 16> << <grid, threads, 0, stream >> > (
d_grad_out_feat, out_nchannel, curr_num_active, // A
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, // B
d_in_feat, in_nchannel, curr_num_active, // D
d_grad_in_feat, // C
&d_grad_kernel[k * in_nchannel * out_nchannel], // E
in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 8:
matmul2<Dtype, Itype, 8> << <grid, threads, 0, stream >> > (
d_grad_out_feat, out_nchannel, curr_num_active, // A
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, // B
d_in_feat, in_nchannel, curr_num_active, // D
d_grad_in_feat, // C
&d_grad_kernel[k * in_nchannel * out_nchannel], // E
in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
}
}
AT_CUDA_CHECK(hipGetLastError());
}
AT_CUDA_CHECK(hipDeviceSynchronize());
}
template void ConvolutionBackwardKernelGPU<float, int32_t>(
const float *d_in_feat, float *d_grad_in_feat, int in_nchannel,
const float *d_grad_out_feat, int out_nchannel, const float *d_kernel,
float *p_grad_kernel, const pInOutMaps<int32_t> &in_map,
const pInOutMaps<int32_t> &out_map, int out_nrows, hipblasHandle_t cuhandle,
hipStream_t stream);
template void ConvolutionBackwardKernelGPU<double, int32_t>(
const double *d_in_feat, double *d_grad_in_feat, int in_nchannel,
const double *d_grad_out_feat, int out_nchannel, const double *d_kernel,
double *p_grad_kernel, const pInOutMaps<int32_t> &in_map,
const pInOutMaps<int32_t> &out_map, int out_nrows, hipblasHandle_t cuhandle,
hipStream_t stream);
} // end namespace minkowski
} // namespace kaolin
| f3b0f0751848af24a9596bd44d79fa43abfeaecc.cu | // Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES.
// All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* Copyright (c) Chris Choy ([email protected]).
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
// Use the torch for GPU memory management. Thrust resize gives segfulat during
// debugging -g #include <torch/extension.h>
#include "convolution.cuh"
#include "../../utils.h"
#include <ATen/cuda/CUDAContext.h>
#include <THC/THCAtomics.cuh>
namespace kaolin {
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <typename Dtype, typename Itype, int BLOCK_SIZE>
__global__ void matmul(const Dtype *A, const int wA, const int hA,
const Dtype *B, const int wB, const int hB, Dtype *C,
const Itype *in_map, const Itype *out_map) {
// Use in_feat as A and kernel as B
// Block index
const int bx = blockIdx.x;
const int by = blockIdx.y;
// Thread index
const int tx = threadIdx.x;
const int ty = threadIdx.y;
// Coordinate. x is for rows, y is for columns.
const int x = BLOCK_SIZE * bx + tx;
const int y = BLOCK_SIZE * by + ty;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
Dtype Csub = 0;
const Itype in_row = y < hA ? in_map[y] : 0;
const Itype out_row = y < hA ? out_map[y] : 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int s = 0; s < wA; s += BLOCK_SIZE) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ Dtype As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ Dtype Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = ((s + tx) < wA && y < hA) ? A[wA * in_row + s + tx] : 0;
Bs[ty][tx] = ((s + ty) < hB && x < wB) ? B[wB * (s + ty) + x] : 0;
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
if (y < hA && x < wB)
atomicAdd(&C[wB * out_row + x], Csub);
// C[wB * out_row + x] += Csub;
}
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B^T, E = D^T * A
* wA is A's width and wB is B's width
*
* +---+
* |B^T|
* +-------+
* | | |
* | A | C |
* | | |
* | | |
* +------------------+
* | D^T | E |
* +----------+---+
*
*/
template <typename Dtype, typename Itype, int BLOCK_SIZE>
__global__ void matmul2(const Dtype *A, const int wA, const int hA,
const Dtype *B, const int wB, const int hB,
const Dtype *D, const int wD, const int hD, Dtype *C,
Dtype *E, const Itype *in_map, const Itype *out_map) {
// Use grad_out_feat as A, transposed kernel weight as B, and in_feat as D
// Block index
const int bx = blockIdx.x;
const int by = blockIdx.y;
// Thread index
const int tx = threadIdx.x;
const int ty = threadIdx.y;
// Coordinate. y is for rows, x is for columns.
const int x = BLOCK_SIZE * bx + tx;
const int y = BLOCK_SIZE * by + ty;
const Itype in_row = y < hA ? in_map[y] : 0;
const Itype out_row = y < hA ? out_map[y] : 0;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
Dtype Csub = 0;
Dtype Esub = 0;
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ Dtype As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ Dtype BTs[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Ds used to
// store the sub-matrix of D
__shared__ Dtype DTs[BLOCK_SIZE][BLOCK_SIZE];
// For Ds = D^T[...:..., ...:...], use the transposed grid dimension for A
DTs[ty][tx] = (x < wD && y < hD) ? D[wD * in_row + x] : 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int s = 0; s < wA; s += BLOCK_SIZE) {
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = ((s + tx) < wA && y < hA) ? A[wA * out_row + s + tx] : 0;
// Transposed kernel
BTs[ty][tx] = ((s + ty) < wB && x < hB) ? B[wB * x + s + ty] : 0;
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * BTs[k][tx];
}
// For Esub, reset to 0
Esub = 0;
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Esub += DTs[k][ty] * As[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
// For the E matrix which requires accmulation of multiple blocks, use
// atomic addition. This can be replaced with a more sophisticaed reduction
// algorithm.
if ((bx * BLOCK_SIZE + ty) < wD && (s + tx) < wA)
atomicAdd(&E[wA * (bx * BLOCK_SIZE + ty) + (s + tx)], Esub);
}
// Write the block sub-matrix to device memory;
// each thread writes one element
if (y < hA && x < hB)
atomicAdd(&C[hB * in_row + x], Csub);
}
namespace minkowski {
template <typename Dtype, typename Itype>
void ConvolutionForwardKernelGPU(const Dtype *d_in_feat, int in_nchannel,
Dtype *d_out_feat, int out_nchannel,
const Dtype *d_kernel,
const pInOutMaps<Itype> &in_maps,
const pInOutMaps<Itype> &out_maps,
int out_nrows, cublasHandle_t cuhandle,
cudaStream_t stream) {
AT_CUDA_CHECK(cudaDeviceSynchronize());
int n_active_in_volume, shared_mem_size = -1;
// Define the shared memory size
if ((in_nchannel > 16 && out_nchannel > 16 &&
in_nchannel * out_nchannel >= 512) ||
(in_nchannel > 24 && out_nchannel > 24))
shared_mem_size = 32;
else if (in_nchannel % 24 == 0 && out_nchannel % 24 == 0)
shared_mem_size = 24;
else if ((in_nchannel > 8 && out_nchannel > 8) ||
(in_nchannel % 16 == 0 && out_nchannel % 16 == 0))
shared_mem_size = 16;
else
shared_mem_size = 8;
dim3 threads(shared_mem_size, shared_mem_size);
// Iterate through each spatial kernel and get indices for in_map and out_map
for (int k = 0; k < in_maps.size(); k++) {
n_active_in_volume = in_maps[k].size();
if (n_active_in_volume == 0)
continue;
int num_grid = (n_active_in_volume + shared_mem_size - 1) / shared_mem_size;
int num_div = (num_grid + MAX_GRID - 1) / MAX_GRID;
int step = (n_active_in_volume + num_div - 1) / num_div;
for (int s = 0; s < num_div; s++) {
int offset = step * s;
int remainder = n_active_in_volume - step * s;
int curr_num_active = remainder < step ? remainder : step;
dim3 grid((out_nchannel + threads.x - 1) / threads.x,
(curr_num_active + threads.y - 1) / threads.y);
switch (shared_mem_size) {
case 32:
matmul<Dtype, Itype, 32> << <grid, threads, 0, stream >> > (
d_in_feat, in_nchannel, curr_num_active,
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, d_out_feat, in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 24:
matmul<Dtype, Itype, 24> << <grid, threads, 0, stream >> > (
d_in_feat, in_nchannel, curr_num_active,
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, d_out_feat, in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 16:
matmul<Dtype, Itype, 16> << <grid, threads, 0, stream >> > (
d_in_feat, in_nchannel, curr_num_active,
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, d_out_feat, in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 8:
matmul<Dtype, Itype, 8> << <grid, threads, 0, stream >> > (
d_in_feat, in_nchannel, curr_num_active,
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, d_out_feat, in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
}
}
AT_CUDA_CHECK(cudaGetLastError());
}
AT_CUDA_CHECK(cudaDeviceSynchronize());
}
template void ConvolutionForwardKernelGPU<float, int32_t>(
const float *d_in_feat, int in_nchannel, float *d_out_feat,
int out_nchannel, const float *d_kernel, const pInOutMaps<int32_t> &in_map,
const pInOutMaps<int32_t> &out_map, int out_nrows, cublasHandle_t cuhandle,
cudaStream_t stream);
template void ConvolutionForwardKernelGPU<double, int32_t>(
const double *d_in_feat, int in_nchannel, double *d_out_feat,
int out_nchannel, const double *d_kernel, const pInOutMaps<int32_t> &in_map,
const pInOutMaps<int32_t> &out_map, int out_nrows, cublasHandle_t cuhandle,
cudaStream_t stream);
template <typename Dtype, typename Itype>
void ConvolutionBackwardKernelGPU(const Dtype *d_in_feat, Dtype *d_grad_in_feat,
int in_nchannel, const Dtype *d_grad_out_feat,
int out_nchannel, const Dtype *d_kernel,
Dtype *d_grad_kernel,
const pInOutMaps<Itype> &in_maps,
const pInOutMaps<Itype> &out_maps,
int out_nrows, cublasHandle_t cuhandle,
cudaStream_t stream) {
AT_CUDA_CHECK(cudaDeviceSynchronize());
int n_active_in_volume, shared_mem_size = -1;
// Define the shared memory size
if ((in_nchannel > 16 && out_nchannel > 16 &&
in_nchannel * out_nchannel >= 512) ||
(in_nchannel % 32 == 0 && out_nchannel % 32 == 0))
shared_mem_size = 32;
else if (in_nchannel % 24 == 0 && out_nchannel % 24 == 0)
shared_mem_size = 24;
else if ((in_nchannel > 8 && out_nchannel > 8) ||
(in_nchannel % 16 == 0 && out_nchannel % 16 == 0))
shared_mem_size = 16;
else
shared_mem_size = 8;
dim3 threads(shared_mem_size, shared_mem_size);
for (int k = 0; k < in_maps.size(); k++) {
n_active_in_volume = in_maps[k].size();
if (n_active_in_volume == 0)
continue;
int num_grid = (n_active_in_volume + shared_mem_size - 1) / shared_mem_size;
int num_div = (num_grid + MAX_GRID - 1) / MAX_GRID;
int step = (n_active_in_volume + num_div - 1) / num_div;
for (int s = 0; s < num_div; s++) {
int offset = step * s;
int remainder = n_active_in_volume - step * s;
int curr_num_active = remainder < step ? remainder : step;
dim3 grid((in_nchannel + threads.x - 1) / threads.x,
(curr_num_active + threads.y - 1) / threads.y);
switch (shared_mem_size) {
case 32:
matmul2<Dtype, Itype, 32> << <grid, threads, 0, stream >> > (
d_grad_out_feat, out_nchannel, curr_num_active, // A
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, // B
d_in_feat, in_nchannel, curr_num_active, // D
d_grad_in_feat, // C
&d_grad_kernel[k * in_nchannel * out_nchannel], // E
in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 24:
matmul2<Dtype, Itype, 24> << <grid, threads, 0, stream >> > (
d_grad_out_feat, out_nchannel, curr_num_active, // A
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, // B
d_in_feat, in_nchannel, curr_num_active, // D
d_grad_in_feat, // C
&d_grad_kernel[k * in_nchannel * out_nchannel], // E
in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 16:
matmul2<Dtype, Itype, 16> << <grid, threads, 0, stream >> > (
d_grad_out_feat, out_nchannel, curr_num_active, // A
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, // B
d_in_feat, in_nchannel, curr_num_active, // D
d_grad_in_feat, // C
&d_grad_kernel[k * in_nchannel * out_nchannel], // E
in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 8:
matmul2<Dtype, Itype, 8> << <grid, threads, 0, stream >> > (
d_grad_out_feat, out_nchannel, curr_num_active, // A
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, // B
d_in_feat, in_nchannel, curr_num_active, // D
d_grad_in_feat, // C
&d_grad_kernel[k * in_nchannel * out_nchannel], // E
in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
}
}
AT_CUDA_CHECK(cudaGetLastError());
}
AT_CUDA_CHECK(cudaDeviceSynchronize());
}
template void ConvolutionBackwardKernelGPU<float, int32_t>(
const float *d_in_feat, float *d_grad_in_feat, int in_nchannel,
const float *d_grad_out_feat, int out_nchannel, const float *d_kernel,
float *p_grad_kernel, const pInOutMaps<int32_t> &in_map,
const pInOutMaps<int32_t> &out_map, int out_nrows, cublasHandle_t cuhandle,
cudaStream_t stream);
template void ConvolutionBackwardKernelGPU<double, int32_t>(
const double *d_in_feat, double *d_grad_in_feat, int in_nchannel,
const double *d_grad_out_feat, int out_nchannel, const double *d_kernel,
double *p_grad_kernel, const pInOutMaps<int32_t> &in_map,
const pInOutMaps<int32_t> &out_map, int out_nrows, cublasHandle_t cuhandle,
cudaStream_t stream);
} // end namespace minkowski
} // namespace kaolin
|
7ecd26d2eeeb9f270a7dd2c1594d85ec50304c89.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "core/geometry/ReinitRemainingSurfelMarker.h"
#include "core/warp_solver/solver_types.h"
#include <device_launch_parameters.h>
namespace surfelwarp { namespace device {
struct ReinitRemainingMarkerDevice {
enum {
window_halfsize = 2,
};
//The geometry model input
struct {
DeviceArrayView<float4> vertex_confid;
const float4* normal_radius;
const float4* color_time;
const ushort4* surfel_knn;
} live_geometry;
//The observation from camera
struct {
hipTextureObject_t vertex_map;
hipTextureObject_t normal_map;
hipTextureObject_t foreground_mask;
} camera_observation;
//The information on camera
mat34 world2camera;
Intrinsic intrinsic;
__device__ __forceinline__ void processMarkingObservedOnly(unsigned* remaining_indicator) const {
const auto idx = threadIdx.x + blockDim.x * blockIdx.x;
if(idx >= live_geometry.vertex_confid.Size()) return;
//Is this surfel fused? If so, must remain
const auto fused = remaining_indicator[idx];
if(fused > 0) return;
const float4 surfel_vertex_confid = live_geometry.vertex_confid[idx];
const float4 surfel_normal_radius = live_geometry.normal_radius[idx];
//const float4 surfel_color_time = live_geometry.color_time[idx];
//Transfer to camera space
const float3 vertex = world2camera.rot * surfel_vertex_confid + world2camera.trans;
const float3 normal = world2camera.rot * surfel_normal_radius;
//Project to camera image
const int x = __float2int_rn(((vertex.x / (vertex.z + 1e-10)) * intrinsic.focal_x) + intrinsic.principal_x);
const int y = __float2int_rn(((vertex.y / (vertex.z + 1e-10)) * intrinsic.focal_y) + intrinsic.principal_y);
//Deal with the case where (x, y) is out of the range of the image
//The flag value
bool has_corresponded = false;
//Does this surfel has correspondece on depth image?
for(auto map_y = y - window_halfsize; map_y < y + window_halfsize; map_y++) {
for(auto map_x = x - window_halfsize; map_x < x + window_halfsize; map_x++) {
//Load the depth image
const float4 depth_vertex = tex2D<float4>(camera_observation.vertex_map, map_x, map_y);
const float4 depth_normal = tex2D<float4>(camera_observation.normal_map, map_x, map_y);
//Compute various values
const float normal_dot = dotxyz(normal, depth_normal);
//Check for correspond
if(squared_distance(vertex, depth_vertex) < 0.003f * 0.003f && normal_dot >= 0.8f)
has_corresponded = true;
}
} // windows search on depth image
//Check the foreground
auto foregound = tex2D<unsigned char>(camera_observation.foreground_mask, x, y);
unsigned remain = 0;
if(has_corresponded && (foregound > 0))
remain = 1;
//Write to output
remaining_indicator[idx] = remain;
}
__device__ __forceinline__ void processMarkingNodeError(
const NodeAlignmentError& node_error,
float threshold,
unsigned* remaining_indicator
) const {
const auto idx = threadIdx.x + blockDim.x * blockIdx.x;
if(idx >= live_geometry.vertex_confid.Size()) return;
//Is this surfel fused? If so, must remain
const auto fused = remaining_indicator[idx];
if(fused > 0) return;
const float4 surfel_vertex_confid = live_geometry.vertex_confid[idx];
const float4 surfel_normal_radius = live_geometry.normal_radius[idx];
//const float4 surfel_color_time = live_geometry.color_time[idx];
//Transfer to camera space
const float3 vertex = world2camera.rot * surfel_vertex_confid + world2camera.trans;
//const float3 normal = world2camera.rot * surfel_normal_radius;
//Project to camera image and check foreground
const int x = __float2int_rn(((vertex.x / (vertex.z + 1e-10)) * intrinsic.focal_x) + intrinsic.principal_x);
const int y = __float2int_rn(((vertex.y / (vertex.z + 1e-10)) * intrinsic.focal_y) + intrinsic.principal_y);
const auto foregound = tex2D<unsigned char>(camera_observation.foreground_mask, x, y);
//Somehow optimistic
unsigned remain = foregound > 0 ? 1 : 0;
//Check the error
const ushort4 knn_nodes = live_geometry.surfel_knn[idx];
const unsigned short* knn_nodes_flat = (const unsigned short*)(&knn_nodes);
for(auto i = 0; i < 4; i++) {
const auto node = knn_nodes_flat[i];
const float accumlate_error = node_error.node_accumlated_error[node];
const float accumlate_weight = node_error.node_accumlate_weight[node];
if(accumlate_weight * threshold > accumlate_error)
remain = 0;
}
//Write to output
remaining_indicator[idx] = remain;
}
};
__global__ void markReinitRemainingSurfelObservedOnlyKernel(
const ReinitRemainingMarkerDevice marker,
unsigned* remaining_indicator
) {
marker.processMarkingObservedOnly(remaining_indicator);
}
__global__ void markReinitRemainingSurfelNodeErrorKernel(
const ReinitRemainingMarkerDevice marker,
const NodeAlignmentError node_error,
float threshold,
unsigned* remaining_indicator
) {
marker.processMarkingNodeError(node_error, threshold, remaining_indicator);
}
} // device
} // surfelwarp
void surfelwarp::ReinitRemainingSurfelMarker::prepareMarkerArguments(void * raw_marker) {
device::ReinitRemainingMarkerDevice& marker = *((device::ReinitRemainingMarkerDevice*)raw_marker);
marker.live_geometry.vertex_confid = m_surfel_geometry.live_vertex_confid.ArrayView();
marker.live_geometry.normal_radius = m_surfel_geometry.live_normal_radius.RawPtr();
marker.live_geometry.color_time = m_surfel_geometry.color_time.RawPtr();
marker.live_geometry.surfel_knn = m_surfel_geometry.surfel_knn.RawPtr();
marker.camera_observation.vertex_map = m_observation.vertex_config_map;
marker.camera_observation.normal_map = m_observation.normal_radius_map;
marker.camera_observation.foreground_mask = m_observation.foreground_mask;
marker.world2camera = m_world2camera;
marker.intrinsic = m_intrinsic;
}
void surfelwarp::ReinitRemainingSurfelMarker::MarkRemainingSurfelObservedOnly(hipStream_t stream) {
//Construct the argument
device::ReinitRemainingMarkerDevice marker;
prepareMarkerArguments((void*)&marker);
//Invoke the kernel
dim3 blk(256);
dim3 grid(divUp(m_remaining_surfel_indicator.Size(), blk.x));
hipLaunchKernelGGL(( device::markReinitRemainingSurfelObservedOnlyKernel), dim3(grid), dim3(blk), 0, stream,
marker,
m_remaining_surfel_indicator.RawPtr()
);
//Sync and check error
#if defined(CUDA_DEBUG_SYNC_CHECK)
cudaSafeCall(hipStreamSynchronize(stream));
cudaSafeCall(hipGetLastError());
#endif
}
void surfelwarp::ReinitRemainingSurfelMarker::MarkRemainingSurfelNodeError(
const NodeAlignmentError & node_error,
float threshold,
hipStream_t stream
) {
//Construct the argument
device::ReinitRemainingMarkerDevice marker;
prepareMarkerArguments((void*)&marker);
//Invoke the kernel
dim3 blk(256);
dim3 grid(divUp(m_remaining_surfel_indicator.Size(), blk.x));
hipLaunchKernelGGL(( device::markReinitRemainingSurfelNodeErrorKernel), dim3(grid), dim3(blk), 0, stream,
marker,
node_error,
threshold,
m_remaining_surfel_indicator.RawPtr()
);
//Sync and check error
#if defined(CUDA_DEBUG_SYNC_CHECK)
cudaSafeCall(hipStreamSynchronize(stream));
cudaSafeCall(hipGetLastError());
#endif
} | 7ecd26d2eeeb9f270a7dd2c1594d85ec50304c89.cu | #include "core/geometry/ReinitRemainingSurfelMarker.h"
#include "core/warp_solver/solver_types.h"
#include <device_launch_parameters.h>
namespace surfelwarp { namespace device {
struct ReinitRemainingMarkerDevice {
enum {
window_halfsize = 2,
};
//The geometry model input
struct {
DeviceArrayView<float4> vertex_confid;
const float4* normal_radius;
const float4* color_time;
const ushort4* surfel_knn;
} live_geometry;
//The observation from camera
struct {
cudaTextureObject_t vertex_map;
cudaTextureObject_t normal_map;
cudaTextureObject_t foreground_mask;
} camera_observation;
//The information on camera
mat34 world2camera;
Intrinsic intrinsic;
__device__ __forceinline__ void processMarkingObservedOnly(unsigned* remaining_indicator) const {
const auto idx = threadIdx.x + blockDim.x * blockIdx.x;
if(idx >= live_geometry.vertex_confid.Size()) return;
//Is this surfel fused? If so, must remain
const auto fused = remaining_indicator[idx];
if(fused > 0) return;
const float4 surfel_vertex_confid = live_geometry.vertex_confid[idx];
const float4 surfel_normal_radius = live_geometry.normal_radius[idx];
//const float4 surfel_color_time = live_geometry.color_time[idx];
//Transfer to camera space
const float3 vertex = world2camera.rot * surfel_vertex_confid + world2camera.trans;
const float3 normal = world2camera.rot * surfel_normal_radius;
//Project to camera image
const int x = __float2int_rn(((vertex.x / (vertex.z + 1e-10)) * intrinsic.focal_x) + intrinsic.principal_x);
const int y = __float2int_rn(((vertex.y / (vertex.z + 1e-10)) * intrinsic.focal_y) + intrinsic.principal_y);
//Deal with the case where (x, y) is out of the range of the image
//The flag value
bool has_corresponded = false;
//Does this surfel has correspondece on depth image?
for(auto map_y = y - window_halfsize; map_y < y + window_halfsize; map_y++) {
for(auto map_x = x - window_halfsize; map_x < x + window_halfsize; map_x++) {
//Load the depth image
const float4 depth_vertex = tex2D<float4>(camera_observation.vertex_map, map_x, map_y);
const float4 depth_normal = tex2D<float4>(camera_observation.normal_map, map_x, map_y);
//Compute various values
const float normal_dot = dotxyz(normal, depth_normal);
//Check for correspond
if(squared_distance(vertex, depth_vertex) < 0.003f * 0.003f && normal_dot >= 0.8f)
has_corresponded = true;
}
} // windows search on depth image
//Check the foreground
auto foregound = tex2D<unsigned char>(camera_observation.foreground_mask, x, y);
unsigned remain = 0;
if(has_corresponded && (foregound > 0))
remain = 1;
//Write to output
remaining_indicator[idx] = remain;
}
__device__ __forceinline__ void processMarkingNodeError(
const NodeAlignmentError& node_error,
float threshold,
unsigned* remaining_indicator
) const {
const auto idx = threadIdx.x + blockDim.x * blockIdx.x;
if(idx >= live_geometry.vertex_confid.Size()) return;
//Is this surfel fused? If so, must remain
const auto fused = remaining_indicator[idx];
if(fused > 0) return;
const float4 surfel_vertex_confid = live_geometry.vertex_confid[idx];
const float4 surfel_normal_radius = live_geometry.normal_radius[idx];
//const float4 surfel_color_time = live_geometry.color_time[idx];
//Transfer to camera space
const float3 vertex = world2camera.rot * surfel_vertex_confid + world2camera.trans;
//const float3 normal = world2camera.rot * surfel_normal_radius;
//Project to camera image and check foreground
const int x = __float2int_rn(((vertex.x / (vertex.z + 1e-10)) * intrinsic.focal_x) + intrinsic.principal_x);
const int y = __float2int_rn(((vertex.y / (vertex.z + 1e-10)) * intrinsic.focal_y) + intrinsic.principal_y);
const auto foregound = tex2D<unsigned char>(camera_observation.foreground_mask, x, y);
//Somehow optimistic
unsigned remain = foregound > 0 ? 1 : 0;
//Check the error
const ushort4 knn_nodes = live_geometry.surfel_knn[idx];
const unsigned short* knn_nodes_flat = (const unsigned short*)(&knn_nodes);
for(auto i = 0; i < 4; i++) {
const auto node = knn_nodes_flat[i];
const float accumlate_error = node_error.node_accumlated_error[node];
const float accumlate_weight = node_error.node_accumlate_weight[node];
if(accumlate_weight * threshold > accumlate_error)
remain = 0;
}
//Write to output
remaining_indicator[idx] = remain;
}
};
__global__ void markReinitRemainingSurfelObservedOnlyKernel(
const ReinitRemainingMarkerDevice marker,
unsigned* remaining_indicator
) {
marker.processMarkingObservedOnly(remaining_indicator);
}
__global__ void markReinitRemainingSurfelNodeErrorKernel(
const ReinitRemainingMarkerDevice marker,
const NodeAlignmentError node_error,
float threshold,
unsigned* remaining_indicator
) {
marker.processMarkingNodeError(node_error, threshold, remaining_indicator);
}
} // device
} // surfelwarp
void surfelwarp::ReinitRemainingSurfelMarker::prepareMarkerArguments(void * raw_marker) {
device::ReinitRemainingMarkerDevice& marker = *((device::ReinitRemainingMarkerDevice*)raw_marker);
marker.live_geometry.vertex_confid = m_surfel_geometry.live_vertex_confid.ArrayView();
marker.live_geometry.normal_radius = m_surfel_geometry.live_normal_radius.RawPtr();
marker.live_geometry.color_time = m_surfel_geometry.color_time.RawPtr();
marker.live_geometry.surfel_knn = m_surfel_geometry.surfel_knn.RawPtr();
marker.camera_observation.vertex_map = m_observation.vertex_config_map;
marker.camera_observation.normal_map = m_observation.normal_radius_map;
marker.camera_observation.foreground_mask = m_observation.foreground_mask;
marker.world2camera = m_world2camera;
marker.intrinsic = m_intrinsic;
}
void surfelwarp::ReinitRemainingSurfelMarker::MarkRemainingSurfelObservedOnly(cudaStream_t stream) {
//Construct the argument
device::ReinitRemainingMarkerDevice marker;
prepareMarkerArguments((void*)&marker);
//Invoke the kernel
dim3 blk(256);
dim3 grid(divUp(m_remaining_surfel_indicator.Size(), blk.x));
device::markReinitRemainingSurfelObservedOnlyKernel<<<grid, blk, 0, stream>>>(
marker,
m_remaining_surfel_indicator.RawPtr()
);
//Sync and check error
#if defined(CUDA_DEBUG_SYNC_CHECK)
cudaSafeCall(cudaStreamSynchronize(stream));
cudaSafeCall(cudaGetLastError());
#endif
}
void surfelwarp::ReinitRemainingSurfelMarker::MarkRemainingSurfelNodeError(
const NodeAlignmentError & node_error,
float threshold,
cudaStream_t stream
) {
//Construct the argument
device::ReinitRemainingMarkerDevice marker;
prepareMarkerArguments((void*)&marker);
//Invoke the kernel
dim3 blk(256);
dim3 grid(divUp(m_remaining_surfel_indicator.Size(), blk.x));
device::markReinitRemainingSurfelNodeErrorKernel<<<grid, blk, 0, stream>>>(
marker,
node_error,
threshold,
m_remaining_surfel_indicator.RawPtr()
);
//Sync and check error
#if defined(CUDA_DEBUG_SYNC_CHECK)
cudaSafeCall(cudaStreamSynchronize(stream));
cudaSafeCall(cudaGetLastError());
#endif
} |
dd8d6aa8302add64130f1461b43a6195cbc6efa7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/gpu_util.cuh"
#include "caffe/layers/tps_c2t.hpp"
#include "caffe/util/benchmark.hpp"
//#include "caffe/layers/TPSInterpolate.hpp"
namespace caffe{
template <typename Dtype>
__global__ void set_value_to_constant(const int nthreads, int N, int src_size, const Dtype* src, Dtype* dst) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int t = index % src_size;
const int i = index / src_size;
int row_index = (src_size+3) * i + t;
dst[row_index] = src[index];
}
}
template <typename Dtype>
void CToParaLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
string prefix = "CToParaLayer::Forward_gpu::\t";
//std::cout << prefix << "Starting!" << std::endl;
//test_defined_count++;
//Forward_cpu(bottom, top);
const Dtype* ctl_points = bottom[0]->gpu_data();
Dtype* T = top[0]->mutable_gpu_data();
Dtype* Y = full_y.mutable_gpu_data();
caffe_gpu_set(top[0]->count(), (Dtype)0, T);
caffe_gpu_set(full_y.count(), (Dtype)0, Y);
int coff_num = top[0]->shape(1);
const int nthreads = N * (coff_num - 6);
set_value_to_constant<Dtype> << <CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS >> >(nthreads, N, (coff_num - 6)/2, ctl_points, Y);
const Dtype* inv_delta = inv_delta_c.gpu_data();
for(int i = 0; i < N; ++i) {
Dtype* curr_t = T + coff_num * i;
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, coff_num/2, 2, coff_num/2, (Dtype)1., inv_delta, Y + coff_num * i, (Dtype)0., curr_t);
}
//std::cout << prefix << "ending!" << std::endl;
//if (test_defined_count == 10000)
//{
// std::cout << "----------------ctl_points_test gpu-------------------" << std::endl;
// const Dtype* ctl_points_test = bottom[0]->cpu_data();
// for (int i = 0; i<bottom[0]->count(); ++i) {
// std::cout << ctl_points_test[i] << " ";
// }
// std::cout << std::endl;
// std::cout << "----------------Y gpu-------------------" << std::endl;
// const Dtype* dY_test = full_y.cpu_data();
// for (int i = 0; i<full_y.count(); ++i) {
// std::cout << dY_test[i] << " ";
// }
// std::cout << std::endl;
// std::cout << "----------------dT gpu-------------------" << std::endl;
// const Dtype* dT_test = top[0]->cpu_data();
// for (int i = 0; i<top[0]->count(); ++i) {
// std::cout << dT_test[i] << " ";
// }
// std::cout << std::endl;
// Dtype* break_ptr = 0;
// *break_ptr = 1;
//}
}
template <typename Dtype>
__global__ void CToParaBackwardGPU(const int nthreads, int N, int K1, int K2, const Dtype* inv_constant, const Dtype* dTop, Dtype* dBottom) {
CUDA_KERNEL_LOOP(index, nthreads) {
//const int dt_index = index % K1;
const int dinv_row = index % K2;
const int dinv_col = (index / K2) % K1;
const int i = index / (K1 * K2);
int t_index_x = (K2*2) * i + dinv_col;
int inv_index_x = K2 * dinv_row + dinv_col;
const Dtype value_x = dTop[t_index_x]* inv_constant[inv_index_x];
dBottom[index] = value_x;
int t_index_y = (K2 * 2) * i + dinv_col + K2;
int inv_index_y = K2 * dinv_row + dinv_col;
const Dtype value_y = dTop[t_index_y] * inv_constant[inv_index_y];
dBottom[index + K1 * K2] = value_y;
}
}
template <typename Dtype>
void CToParaLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
string prefix = "CToParaLayer::Backward_gpu::\t";
//std::cout << prefix << "Starting!" << std::endl;
//test_defined_count++;
//Backward_cpu(top, propagate_down, bottom);
const Dtype* dT = top[0]->gpu_diff();
const Dtype* inv_c = inv_delta_c.gpu_data();
Dtype* dC = bottom[0]->mutable_gpu_diff();
Dtype* dC_tmp_diff = dC_tmp.mutable_gpu_diff();
caffe_gpu_set(dC_tmp.count(), (Dtype)0., dC_tmp_diff);
caffe_gpu_set(bottom[0]->count(), (Dtype)0, dC);
int coff_num = top[0]->shape(1);
int c_num = bottom[0]->shape(1);
const int nthreads = N * (K) * (K+3);
CToParaBackwardGPU<Dtype> << <CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS >> >(nthreads, N, K, (K + 3), inv_c, dT, dC_tmp_diff);
Dtype* all_ones_2_data = all_ones_2.mutable_gpu_data();
caffe_gpu_set(all_ones_2.count(), (Dtype)1., all_ones_2_data);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 2*K, 1, K + 3,
(Dtype)1., dC_tmp_diff, all_ones_2_data, (Dtype)0., dC);
//std::cout << prefix << "ending!" << std::endl;
//if (test_defined_count == 1000)
//{
// std::cout << "----------------dC gpu-------------------" << std::endl;
// const Dtype* dC_test = bottom[0]->cpu_diff();
// for (int i = 0; i < bottom[0]->count(); ++i) {
// std::cout << dC_test[i] << " ";
// }
// std::cout << std::endl;
// Dtype* break_ptr = 0;
// *break_ptr = 1;
//}
}
INSTANTIATE_LAYER_GPU_FUNCS(CToParaLayer);
}
| dd8d6aa8302add64130f1461b43a6195cbc6efa7.cu | #include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/gpu_util.cuh"
#include "caffe/layers/tps_c2t.hpp"
#include "caffe/util/benchmark.hpp"
//#include "caffe/layers/TPSInterpolate.hpp"
namespace caffe{
template <typename Dtype>
__global__ void set_value_to_constant(const int nthreads, int N, int src_size, const Dtype* src, Dtype* dst) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int t = index % src_size;
const int i = index / src_size;
int row_index = (src_size+3) * i + t;
dst[row_index] = src[index];
}
}
template <typename Dtype>
void CToParaLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
string prefix = "CToParaLayer::Forward_gpu::\t";
//std::cout << prefix << "Starting!" << std::endl;
//test_defined_count++;
//Forward_cpu(bottom, top);
const Dtype* ctl_points = bottom[0]->gpu_data();
Dtype* T = top[0]->mutable_gpu_data();
Dtype* Y = full_y.mutable_gpu_data();
caffe_gpu_set(top[0]->count(), (Dtype)0, T);
caffe_gpu_set(full_y.count(), (Dtype)0, Y);
int coff_num = top[0]->shape(1);
const int nthreads = N * (coff_num - 6);
set_value_to_constant<Dtype> << <CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS >> >(nthreads, N, (coff_num - 6)/2, ctl_points, Y);
const Dtype* inv_delta = inv_delta_c.gpu_data();
for(int i = 0; i < N; ++i) {
Dtype* curr_t = T + coff_num * i;
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, coff_num/2, 2, coff_num/2, (Dtype)1., inv_delta, Y + coff_num * i, (Dtype)0., curr_t);
}
//std::cout << prefix << "ending!" << std::endl;
//if (test_defined_count == 10000)
//{
// std::cout << "----------------ctl_points_test gpu-------------------" << std::endl;
// const Dtype* ctl_points_test = bottom[0]->cpu_data();
// for (int i = 0; i<bottom[0]->count(); ++i) {
// std::cout << ctl_points_test[i] << " ";
// }
// std::cout << std::endl;
// std::cout << "----------------Y gpu-------------------" << std::endl;
// const Dtype* dY_test = full_y.cpu_data();
// for (int i = 0; i<full_y.count(); ++i) {
// std::cout << dY_test[i] << " ";
// }
// std::cout << std::endl;
// std::cout << "----------------dT gpu-------------------" << std::endl;
// const Dtype* dT_test = top[0]->cpu_data();
// for (int i = 0; i<top[0]->count(); ++i) {
// std::cout << dT_test[i] << " ";
// }
// std::cout << std::endl;
// Dtype* break_ptr = 0;
// *break_ptr = 1;
//}
}
template <typename Dtype>
__global__ void CToParaBackwardGPU(const int nthreads, int N, int K1, int K2, const Dtype* inv_constant, const Dtype* dTop, Dtype* dBottom) {
CUDA_KERNEL_LOOP(index, nthreads) {
//const int dt_index = index % K1;
const int dinv_row = index % K2;
const int dinv_col = (index / K2) % K1;
const int i = index / (K1 * K2);
int t_index_x = (K2*2) * i + dinv_col;
int inv_index_x = K2 * dinv_row + dinv_col;
const Dtype value_x = dTop[t_index_x]* inv_constant[inv_index_x];
dBottom[index] = value_x;
int t_index_y = (K2 * 2) * i + dinv_col + K2;
int inv_index_y = K2 * dinv_row + dinv_col;
const Dtype value_y = dTop[t_index_y] * inv_constant[inv_index_y];
dBottom[index + K1 * K2] = value_y;
}
}
template <typename Dtype>
void CToParaLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
string prefix = "CToParaLayer::Backward_gpu::\t";
//std::cout << prefix << "Starting!" << std::endl;
//test_defined_count++;
//Backward_cpu(top, propagate_down, bottom);
const Dtype* dT = top[0]->gpu_diff();
const Dtype* inv_c = inv_delta_c.gpu_data();
Dtype* dC = bottom[0]->mutable_gpu_diff();
Dtype* dC_tmp_diff = dC_tmp.mutable_gpu_diff();
caffe_gpu_set(dC_tmp.count(), (Dtype)0., dC_tmp_diff);
caffe_gpu_set(bottom[0]->count(), (Dtype)0, dC);
int coff_num = top[0]->shape(1);
int c_num = bottom[0]->shape(1);
const int nthreads = N * (K) * (K+3);
CToParaBackwardGPU<Dtype> << <CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS >> >(nthreads, N, K, (K + 3), inv_c, dT, dC_tmp_diff);
Dtype* all_ones_2_data = all_ones_2.mutable_gpu_data();
caffe_gpu_set(all_ones_2.count(), (Dtype)1., all_ones_2_data);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 2*K, 1, K + 3,
(Dtype)1., dC_tmp_diff, all_ones_2_data, (Dtype)0., dC);
//std::cout << prefix << "ending!" << std::endl;
//if (test_defined_count == 1000)
//{
// std::cout << "----------------dC gpu-------------------" << std::endl;
// const Dtype* dC_test = bottom[0]->cpu_diff();
// for (int i = 0; i < bottom[0]->count(); ++i) {
// std::cout << dC_test[i] << " ";
// }
// std::cout << std::endl;
// Dtype* break_ptr = 0;
// *break_ptr = 1;
//}
}
INSTANTIATE_LAYER_GPU_FUNCS(CToParaLayer);
}
|
e35c4e9efd113c06b47058742e6b31aaf84fc7fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define X_BLOCK 1024
#define PITCH 1024
extern "C"
__global__ void MRDWT(float *constant,float *input4,float *result0){
result0[(((blockIdx.y*PITCH)+(blockIdx.x*X_BLOCK))+threadIdx.x)] = ((((((((((((((((((((((((input4[(threadIdx.x+(blockIdx.x*X_BLOCK))]*9.0)+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+1)%1024)]*8.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)]*7.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+3)%1024)]*6.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)]*5.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+5)%1024)]*4.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)]*3.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+7)%1024)]*2.0))*9.0)+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)+7)%1024)]*2.0))*2.0))*9.0)+(((((((((((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+7)%1024)]*2.0))*9.0)+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)+7)%1024)]*2.0))*2.0))*8.0))+(((((((((((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+7)%1024)]*2.0))*9.0)+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)+7)%1024)]*2.0))*2.0))*7.0))+(((((((((((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+7)%1024)]*2.0))*9.0)+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)+7)%1024)]*2.0))*2.0))*6.0))+(((((((((((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+7)%1024)]*2.0))*9.0)+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)+7)%1024)]*2.0))*2.0))*5.0))+(((((((((((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+7)%1024)]*2.0))*9.0)+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)+7)%1024)]*2.0))*2.0))*4.0))+(((((((((((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+7)%1024)]*2.0))*9.0)+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)+7)%1024)]*2.0))*2.0))*3.0))+(((((((((((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+7)%1024)]*2.0))*9.0)+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+2)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+2)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+2)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+2)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+2)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+2)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+2)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+4)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+4)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+4)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+4)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+4)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+4)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+4)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+6)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+6)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+6)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+6)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+6)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+6)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+6)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+8)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+8)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+8)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+8)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+8)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+8)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+8)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+10)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+10)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+10)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+10)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+10)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+10)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+10)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+12)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+12)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+12)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+12)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+12)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+12)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+12)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+14)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+14)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+14)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+14)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+14)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+14)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+14)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+14)%1024)+7)%1024)]*2.0))*2.0))*2.0));
}
#undef X_BLOCK
#undef PITCH
#define X_BLOCK 1024
#define PITCH 1024
extern "C"
__global__ void MRDWT0(float *constant,float *input4,float *result0){
result0[(((blockIdx.y*PITCH)+(blockIdx.x*X_BLOCK))+threadIdx.x)] = (((((((input4[(threadIdx.x+(blockIdx.x*X_BLOCK))]*7.0)+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+1)%1024)]*6.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)]*5.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+3)%1024)]*4.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)]*3.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+5)%1024)]*2.0))+input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)]);
}
#undef X_BLOCK
#undef PITCH
#define X_BLOCK 1024
#define PITCH 1024
extern "C"
__global__ void MRDWT1(float *constant,float *input4,float *result0){
result0[(((blockIdx.y*PITCH)+(blockIdx.x*X_BLOCK))+threadIdx.x)] = (((((((((((((((input4[(threadIdx.x+(blockIdx.x*X_BLOCK))]*9.0)+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+1)%1024)]*8.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)]*7.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+3)%1024)]*6.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)]*5.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+5)%1024)]*4.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)]*3.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+7)%1024)]*2.0))*7.0)+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+7)%1024)]*2.0))*2.0))+((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+7)%1024)]*2.0)));
}
#undef X_BLOCK
#undef PITCH
#define X_BLOCK 1024
#define PITCH 1024
extern "C"
__global__ void MRDWT2(float *constant,float *input4,float *result0){
result0[(((blockIdx.y*PITCH)+(blockIdx.x*X_BLOCK))+threadIdx.x)] = (((((((((((((((((((((((input4[(threadIdx.x+(blockIdx.x*X_BLOCK))]*9.0)+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+1)%1024)]*8.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)]*7.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+3)%1024)]*6.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)]*5.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+5)%1024)]*4.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)]*3.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+7)%1024)]*2.0))*9.0)+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)+7)%1024)]*2.0))*2.0))*7.0)+(((((((((((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+7)%1024)]*2.0))*9.0)+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)+7)%1024)]*2.0))*2.0))*6.0))+(((((((((((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+7)%1024)]*2.0))*9.0)+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)+7)%1024)]*2.0))*2.0))*5.0))+(((((((((((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+7)%1024)]*2.0))*9.0)+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)+7)%1024)]*2.0))*2.0))*4.0))+(((((((((((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+7)%1024)]*2.0))*9.0)+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)+7)%1024)]*2.0))*2.0))*3.0))+(((((((((((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+7)%1024)]*2.0))*9.0)+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)+7)%1024)]*2.0))*2.0))*2.0))+((((((((((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+7)%1024)]*2.0))*9.0)+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)+7)%1024)]*2.0))*2.0)));
}
#undef X_BLOCK
#undef PITCH
#define X_BLOCK 1024
#define PITCH 1024
extern "C"
__global__ void MIRDWT(float *constant,float *input5,float *input6,float *input7,float *input8,float *result0){
result0[(((blockIdx.y*PITCH)+(blockIdx.x*X_BLOCK))+threadIdx.x)] = (((((((((((((((((((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967292)%1024)]))*10.0)+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967292)%1024)]))*9.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967292)%1024)]))*8.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967292)%1024)]))*7.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967292)%1024)]))*6.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967292)%1024)]))*5.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967292)%1024)]))*4.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967292)%1024)]))*3.0))+((((((((input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)]*8.0)+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)]*7.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)]*6.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)]*5.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)]*4.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)]*3.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)]*2.0))+input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)]))*10.0)+(((((((((((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967292)%1024)]))*10.0)+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967292)%1024)]))*9.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967292)%1024)]))*8.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967292)%1024)]))*7.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967292)%1024)]))*6.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967292)%1024)]))*5.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967292)%1024)]))*4.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967292)%1024)]))*3.0))+((((((((input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)]*8.0)+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)]*7.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)]*6.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)]*5.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)]*4.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)]*3.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)]*2.0))+input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)]))*9.0))+(((((((((((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967292)%1024)]))*10.0)+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967292)%1024)]))*9.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967292)%1024)]))*8.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967292)%1024)]))*7.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967292)%1024)]))*6.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967292)%1024)]))*5.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967292)%1024)]))*4.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967292)%1024)]))*3.0))+((((((((input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)]*8.0)+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)]*7.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)]*6.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)]*5.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)]*4.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)]*3.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)]*2.0))+input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)]))*8.0))+(((((((((((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967292)%1024)]))*10.0)+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967292)%1024)]))*9.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967292)%1024)]))*8.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967292)%1024)]))*7.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967292)%1024)]))*6.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967292)%1024)]))*5.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967292)%1024)]))*4.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967292)%1024)]))*3.0))+((((((((input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)]*8.0)+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)]*7.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)]*6.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)]*5.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)]*4.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)]*3.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)]*2.0))+input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)]))*7.0))+(((((((((((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967292)%1024)]))*10.0)+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967292)%1024)]))*9.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967292)%1024)]))*8.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967292)%1024)]))*7.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967292)%1024)]))*6.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967292)%1024)]))*5.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967292)%1024)]))*4.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967292)%1024)]))*3.0))+((((((((input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)]*8.0)+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)]*7.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)]*6.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)]*5.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)]*4.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)]*3.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)]*2.0))+input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)]))*6.0))+(((((((((((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967292)%1024)]))*10.0)+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967292)%1024)]))*9.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967292)%1024)]))*8.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967292)%1024)]))*7.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967292)%1024)]))*6.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967292)%1024)]))*5.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967292)%1024)]))*4.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967292)%1024)]))*3.0))+((((((((input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)]*8.0)+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)]*7.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)]*6.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)]*5.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)]*4.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)]*3.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)]*2.0))+input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)]))*5.0))+(((((((((((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967292)%1024)]))*10.0)+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967292)%1024)]))*9.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967292)%1024)]))*8.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967292)%1024)]))*7.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967292)%1024)]))*6.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967292)%1024)]))*5.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967292)%1024)]))*4.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967292)%1024)]))*3.0))+((((((((input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)]*8.0)+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)]*7.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)]*6.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)]*5.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)]*4.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)]*3.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)]*2.0))+input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)]))*4.0))+(((((((((((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967292)%1024)]))*10.0)+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967292)%1024)]))*9.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967292)%1024)]))*8.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967292)%1024)]))*7.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967292)%1024)]))*6.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967292)%1024)]))*5.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967292)%1024)]))*4.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967292)%1024)]))*3.0))+((((((((input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)]*8.0)+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)]*7.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)]*6.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)]*5.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)]*4.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)]*3.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)]*2.0))+input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)]))*3.0))+((((((((input8[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)]*8.0)+(input8[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)]*7.0))+(input8[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)]*6.0))+(input8[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)]*5.0))+(input8[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)]*4.0))+(input8[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)]*3.0))+(input8[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)]*2.0))+input8[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)]));
}
#undef X_BLOCK
#undef PITCH
| e35c4e9efd113c06b47058742e6b31aaf84fc7fe.cu | #define X_BLOCK 1024
#define PITCH 1024
extern "C"
__global__ void MRDWT(float *constant,float *input4,float *result0){
result0[(((blockIdx.y*PITCH)+(blockIdx.x*X_BLOCK))+threadIdx.x)] = ((((((((((((((((((((((((input4[(threadIdx.x+(blockIdx.x*X_BLOCK))]*9.0)+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+1)%1024)]*8.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)]*7.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+3)%1024)]*6.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)]*5.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+5)%1024)]*4.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)]*3.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+7)%1024)]*2.0))*9.0)+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)+7)%1024)]*2.0))*2.0))*9.0)+(((((((((((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+7)%1024)]*2.0))*9.0)+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)+7)%1024)]*2.0))*2.0))*8.0))+(((((((((((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+7)%1024)]*2.0))*9.0)+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)+7)%1024)]*2.0))*2.0))*7.0))+(((((((((((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+7)%1024)]*2.0))*9.0)+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)+7)%1024)]*2.0))*2.0))*6.0))+(((((((((((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+7)%1024)]*2.0))*9.0)+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)+7)%1024)]*2.0))*2.0))*5.0))+(((((((((((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+7)%1024)]*2.0))*9.0)+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)+7)%1024)]*2.0))*2.0))*4.0))+(((((((((((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+7)%1024)]*2.0))*9.0)+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)+7)%1024)]*2.0))*2.0))*3.0))+(((((((((((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+7)%1024)]*2.0))*9.0)+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+2)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+2)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+2)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+2)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+2)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+2)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+2)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+4)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+4)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+4)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+4)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+4)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+4)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+4)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+6)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+6)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+6)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+6)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+6)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+6)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+6)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+8)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+8)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+8)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+8)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+8)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+8)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+8)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+10)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+10)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+10)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+10)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+10)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+10)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+10)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+12)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+12)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+12)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+12)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+12)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+12)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+12)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+14)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+14)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+14)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+14)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+14)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+14)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+14)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+28)%1024)+14)%1024)+7)%1024)]*2.0))*2.0))*2.0));
}
#undef X_BLOCK
#undef PITCH
#define X_BLOCK 1024
#define PITCH 1024
extern "C"
__global__ void MRDWT0(float *constant,float *input4,float *result0){
result0[(((blockIdx.y*PITCH)+(blockIdx.x*X_BLOCK))+threadIdx.x)] = (((((((input4[(threadIdx.x+(blockIdx.x*X_BLOCK))]*7.0)+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+1)%1024)]*6.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)]*5.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+3)%1024)]*4.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)]*3.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+5)%1024)]*2.0))+input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)]);
}
#undef X_BLOCK
#undef PITCH
#define X_BLOCK 1024
#define PITCH 1024
extern "C"
__global__ void MRDWT1(float *constant,float *input4,float *result0){
result0[(((blockIdx.y*PITCH)+(blockIdx.x*X_BLOCK))+threadIdx.x)] = (((((((((((((((input4[(threadIdx.x+(blockIdx.x*X_BLOCK))]*9.0)+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+1)%1024)]*8.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)]*7.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+3)%1024)]*6.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)]*5.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+5)%1024)]*4.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)]*3.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+7)%1024)]*2.0))*7.0)+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+7)%1024)]*2.0))*2.0))+((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+7)%1024)]*2.0)));
}
#undef X_BLOCK
#undef PITCH
#define X_BLOCK 1024
#define PITCH 1024
extern "C"
__global__ void MRDWT2(float *constant,float *input4,float *result0){
result0[(((blockIdx.y*PITCH)+(blockIdx.x*X_BLOCK))+threadIdx.x)] = (((((((((((((((((((((((input4[(threadIdx.x+(blockIdx.x*X_BLOCK))]*9.0)+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+1)%1024)]*8.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)]*7.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+3)%1024)]*6.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)]*5.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+5)%1024)]*4.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)]*3.0))+(input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+7)%1024)]*2.0))*9.0)+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+14)%1024)+7)%1024)]*2.0))*2.0))*7.0)+(((((((((((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+7)%1024)]*2.0))*9.0)+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)%1024)+14)%1024)+7)%1024)]*2.0))*2.0))*6.0))+(((((((((((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+7)%1024)]*2.0))*9.0)+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+8)%1024)+14)%1024)+7)%1024)]*2.0))*2.0))*5.0))+(((((((((((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+7)%1024)]*2.0))*9.0)+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+12)%1024)+14)%1024)+7)%1024)]*2.0))*2.0))*4.0))+(((((((((((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+7)%1024)]*2.0))*9.0)+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+16)%1024)+14)%1024)+7)%1024)]*2.0))*2.0))*3.0))+(((((((((((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+7)%1024)]*2.0))*9.0)+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+20)%1024)+14)%1024)+7)%1024)]*2.0))*2.0))*2.0))+((((((((((((((((input4[(((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)]*9.0)+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+1)%1024)]*8.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)]*7.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+3)%1024)]*6.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)]*5.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+5)%1024)]*4.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)]*3.0))+(input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+7)%1024)]*2.0))*9.0)+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+2)%1024)+7)%1024)]*2.0))*8.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+4)%1024)+7)%1024)]*2.0))*7.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+6)%1024)+7)%1024)]*2.0))*6.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+8)%1024)+7)%1024)]*2.0))*5.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+10)%1024)+7)%1024)]*2.0))*4.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+12)%1024)+7)%1024)]*2.0))*3.0))+(((((((((input4[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)]*9.0)+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)+1)%1024)]*8.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)+2)%1024)]*7.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)+3)%1024)]*6.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)+4)%1024)]*5.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)+5)%1024)]*4.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)+6)%1024)]*3.0))+(input4[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+24)%1024)+14)%1024)+7)%1024)]*2.0))*2.0)));
}
#undef X_BLOCK
#undef PITCH
#define X_BLOCK 1024
#define PITCH 1024
extern "C"
__global__ void MIRDWT(float *constant,float *input5,float *input6,float *input7,float *input8,float *result0){
result0[(((blockIdx.y*PITCH)+(blockIdx.x*X_BLOCK))+threadIdx.x)] = (((((((((((((((((((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)+4294967292)%1024)]))*10.0)+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)+4294967292)%1024)]))*9.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)+4294967292)%1024)]))*8.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)+4294967292)%1024)]))*7.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)+4294967292)%1024)]))*6.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)+4294967292)%1024)]))*5.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)+4294967292)%1024)]))*4.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)+4294967292)%1024)]))*3.0))+((((((((input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967280)%1024)]*8.0)+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967282)%1024)]*7.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967284)%1024)]*6.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967286)%1024)]*5.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967288)%1024)]*4.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967290)%1024)]*3.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967292)%1024)]*2.0))+input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)+4294967294)%1024)]))*10.0)+(((((((((((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)+4294967292)%1024)]))*10.0)+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)+4294967292)%1024)]))*9.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)+4294967292)%1024)]))*8.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)+4294967292)%1024)]))*7.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)+4294967292)%1024)]))*6.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)+4294967292)%1024)]))*5.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)+4294967292)%1024)]))*4.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)+4294967292)%1024)]))*3.0))+((((((((input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967280)%1024)]*8.0)+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967282)%1024)]*7.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967284)%1024)]*6.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967286)%1024)]*5.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967288)%1024)]*4.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967290)%1024)]*3.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967292)%1024)]*2.0))+input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)+4294967294)%1024)]))*9.0))+(((((((((((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)+4294967292)%1024)]))*10.0)+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)+4294967292)%1024)]))*9.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)+4294967292)%1024)]))*8.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)+4294967292)%1024)]))*7.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)+4294967292)%1024)]))*6.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)+4294967292)%1024)]))*5.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)+4294967292)%1024)]))*4.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)+4294967292)%1024)]))*3.0))+((((((((input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967280)%1024)]*8.0)+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967282)%1024)]*7.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967284)%1024)]*6.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967286)%1024)]*5.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967288)%1024)]*4.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967290)%1024)]*3.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967292)%1024)]*2.0))+input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)+4294967294)%1024)]))*8.0))+(((((((((((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)+4294967292)%1024)]))*10.0)+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)+4294967292)%1024)]))*9.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)+4294967292)%1024)]))*8.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)+4294967292)%1024)]))*7.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)+4294967292)%1024)]))*6.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)+4294967292)%1024)]))*5.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)+4294967292)%1024)]))*4.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)+4294967292)%1024)]))*3.0))+((((((((input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967280)%1024)]*8.0)+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967282)%1024)]*7.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967284)%1024)]*6.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967286)%1024)]*5.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967288)%1024)]*4.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967290)%1024)]*3.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967292)%1024)]*2.0))+input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)+4294967294)%1024)]))*7.0))+(((((((((((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)+4294967292)%1024)]))*10.0)+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)+4294967292)%1024)]))*9.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)+4294967292)%1024)]))*8.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)+4294967292)%1024)]))*7.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)+4294967292)%1024)]))*6.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)+4294967292)%1024)]))*5.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)+4294967292)%1024)]))*4.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)+4294967292)%1024)]))*3.0))+((((((((input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967280)%1024)]*8.0)+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967282)%1024)]*7.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967284)%1024)]*6.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967286)%1024)]*5.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967288)%1024)]*4.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967290)%1024)]*3.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967292)%1024)]*2.0))+input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)+4294967294)%1024)]))*6.0))+(((((((((((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)+4294967292)%1024)]))*10.0)+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)+4294967292)%1024)]))*9.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)+4294967292)%1024)]))*8.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)+4294967292)%1024)]))*7.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)+4294967292)%1024)]))*6.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)+4294967292)%1024)]))*5.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)+4294967292)%1024)]))*4.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)+4294967292)%1024)]))*3.0))+((((((((input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967280)%1024)]*8.0)+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967282)%1024)]*7.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967284)%1024)]*6.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967286)%1024)]*5.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967288)%1024)]*4.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967290)%1024)]*3.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967292)%1024)]*2.0))+input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)+4294967294)%1024)]))*5.0))+(((((((((((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)+4294967292)%1024)]))*10.0)+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)+4294967292)%1024)]))*9.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)+4294967292)%1024)]))*8.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)+4294967292)%1024)]))*7.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)+4294967292)%1024)]))*6.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)+4294967292)%1024)]))*5.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)+4294967292)%1024)]))*4.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)+4294967292)%1024)]))*3.0))+((((((((input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967280)%1024)]*8.0)+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967282)%1024)]*7.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967284)%1024)]*6.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967286)%1024)]*5.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967288)%1024)]*4.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967290)%1024)]*3.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967292)%1024)]*2.0))+input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)+4294967294)%1024)]))*4.0))+(((((((((((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)+4294967292)%1024)]))*10.0)+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)+4294967292)%1024)]))*9.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)+4294967292)%1024)]))*8.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)+4294967292)%1024)]))*7.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)+4294967292)%1024)]))*6.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)+4294967292)%1024)]))*5.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)+4294967292)%1024)]))*4.0))+((((((((((input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967264)%1024)]*10.0)+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967268)%1024)]*9.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967272)%1024)]*8.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967276)%1024)]*7.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967280)%1024)]*6.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967284)%1024)]*5.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967288)%1024)]*4.0))+(input5[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967292)%1024)]*3.0))+((((((((input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967264)%1024)]*8.0)+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967268)%1024)]*7.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967272)%1024)]*6.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967276)%1024)]*5.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967280)%1024)]*4.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967284)%1024)]*3.0))+(input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967288)%1024)]*2.0))+input6[(((((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)+4294967292)%1024)]))*3.0))+((((((((input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967280)%1024)]*8.0)+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967282)%1024)]*7.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967284)%1024)]*6.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967286)%1024)]*5.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967288)%1024)]*4.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967290)%1024)]*3.0))+(input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967292)%1024)]*2.0))+input7[(((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)+4294967294)%1024)]))*3.0))+((((((((input8[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967288)%1024)]*8.0)+(input8[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967289)%1024)]*7.0))+(input8[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967290)%1024)]*6.0))+(input8[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)%1024)]*5.0))+(input8[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)%1024)]*4.0))+(input8[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)%1024)]*3.0))+(input8[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)%1024)]*2.0))+input8[(((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)%1024)]));
}
#undef X_BLOCK
#undef PITCH
|
35c827b938fea3a54f329785f49973a029b5c9d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
typedef ColorConvParameter P;
namespace convert_color_gpu {
template <typename T, int, int> struct Converter {
};
template <typename T> struct Converter<T, P::RGB, P::RGB> {
__device__ static void convert(const T * in, T * out) {
out[0] = in[0];
out[1] = in[1];
out[2] = in[2];
}
};
template <typename T>
__device__ T invS(T v) {
return v <= T(0.04045) ? v/T(12.92) : pow((v + T(0.055))/T(1.055), T(2.4));
}
template <typename T>
__device__ T S(T v) {
return v <= T(0.0031308) ? v*T(12.92) : T(1.055)*pow(v, T(1./2.4)) - T(0.055);
}
template <typename T> struct Converter<T, P::RGB, P::XYZ> {
__device__ static void convert(const T * in, T * out) {
T tmp[3] = {0};
for (int i = 0; i < 3; i++)
tmp[i] = invS(in[i]/T(255.));
out[0] = T(0.4124564)*tmp[0] + T(0.3575761)*tmp[1] + T(0.1804375)*tmp[2];
out[1] = T(0.2126729)*tmp[0] + T(0.7151522)*tmp[1] + T(0.0721750)*tmp[2];
out[2] = T(0.0193339)*tmp[0] + T(0.1191920)*tmp[1] + T(0.9503041)*tmp[2];
}
};
template <typename T> struct Converter<T, P::XYZ, P::RGB> {
__device__ static void convert(const T * in, T * out) {
T tmp[3] = {0};
tmp[0] = T(3.2404542)*in[0] - T(1.5371385)*in[1] - T(0.4985314)*in[2];
tmp[1] = -T(0.9692660)*in[0] + T(1.8760108)*in[1] + T(0.0415560)*in[2];
tmp[2] = T(0.0556434)*in[0] - T(0.2040259)*in[1] + T(1.0572252)*in[2];
for (int i = 0; i < 3; i++)
out[i] = S(tmp[i])*T(255.);
}
};
template <typename T> struct Converter<T, P::RGB, P::Gray> {
__device__ static void convert(const T * in, T * out) {
out[0] = T(0.2126729)*in[0] + T(0.7151522)*in[1] + T(0.0721750)*in[2];
}
};
template <typename T> struct Converter<T, P::Gray, P::RGB> {
__device__ static void convert(const T * in, T * out) {
out[0] = out[1] = out[2] = in[0];
}
};
template <typename T>
__device__ T invF(T v) {
return v > T(0.206896) ? pow(v, T(3.)) : (T(116) * v - T(16.)) / T(903.3);
}
template <typename T>
__device__ T F(T v) {
return v > T(0.008867) ? pow(v, T(1./3)) : (T(903.3) * v + T(16.)) / T(116.);
}
template <typename T> struct Converter<T, P::Lab, P::XYZ> {
__device__ static void convert(const T * in, T * out) {
const T R[3] = {0.95074, 1.0, 1.08883}; // Reference white D65
T tmp[3] = {0};
tmp[1] = (in[0]+16) / T(116.);
tmp[0] = tmp[1] + in[1] / T(500.);
tmp[2] = tmp[1] - in[2] / T(200.);
for (int i = 0; i < 3; i++)
out[i] = invF(tmp[i]) * R[i];
}
};
template <typename T> struct Converter<T, P::XYZ, P::Lab> {
__device__ static void convert(const T * in, T * out) {
const T R[3] = {0.95074, 1.0, 1.08883}; // Reference white D65
T tmp[3] = {0};
for (int i = 0; i < 3; i++)
tmp[i] = F(in[i] / R[i]);
out[0] = T(116.)*tmp[1] - T(16.);
out[1] = T(500.)*(tmp[0] - tmp[1]);
out[2] = T(200.)*(tmp[1] - tmp[2]);
}
};
template <typename T> struct Converter<T, P::RGB, P::Lab> {
__device__ static void convert(const T * in, T * out) {
T tmp[3] = {0};
Converter<T, P::RGB, P::XYZ>::convert(in, tmp);
Converter<T, P::XYZ, P::Lab>::convert(tmp, out);
}
};
template <typename T> struct Converter<T, P::Lab, P::RGB> {
__device__ static void convert(const T * in, T * out) {
T tmp[3] = {0};
Converter<T, P::Lab, P::XYZ>::convert(in, tmp);
Converter<T, P::XYZ, P::RGB>::convert(tmp, out);
}
};
template <typename T, int from, int to>
__global__ void convert_kernel(int N, const T * i1, const T * i2,
const T * i3, T * o1, T * o2, T * o3) {
typedef Converter<T, from, to> C;
T in[3], out[3];
CUDA_KERNEL_LOOP(i, N) {
in[0] = i1[i];
if (i2) in[1] = i2[i];
if (i3) in[2] = i3[i];
C::convert(in, out);
o1[i] = out[0];
if (o2) o2[i] = out[1];
if (o3) o3[i] = out[2];
}
}
template <typename T, int from, int to>
void convert(int N, const T * input, T * output,
bool swap_in = 0, bool swap_out = 0) {
const T *i1 = input, *i2 = input + N, *i3 = input + 2*N;
if (swap_in) std::swap(i1, i3);
T *o1 = output, *o2 = output + N, *o3 = output + 2*N;
if (swap_out) std::swap(o1, o3);
if (from == P::Gray) i2 = i3 = NULL;
if (to == P::Gray) o2 = o3 = NULL;
convert_kernel<T, from, to> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, i1, i2, i3, o1, o2, o3);
}
}; // namespace convert_color_gpu
template <typename T>
void ColorConvLayer<T>::Forward_gpu(const vector<Blob<T>*>& bottom,
const vector<Blob<T>*>& top) {
using convert_color_gpu::convert;
const int N = bottom[0]->shape()[0], Cb = bottom[0]->shape()[1];
const int Ct = top[0]->shape()[1], count = bottom[0]->count() / (N*Cb);
ColorConvParameter::ColorSpace in = input_space_, out = output_space_;
bool swap_in = (in == P::BGR), swap_out = (out == P::BGR);
if (swap_in) in = P::RGB;
if (swap_out) out = P::RGB;
for (int n = 0; n < N; n++) {
const T * input = bottom[0]->gpu_data() + n * count * Cb;
T * output = top[0]->mutable_gpu_data() + n * count * Ct;
#define CONVERT(A, B) if (in == A && out == B) {\
convert<T, A, B>(count, input, output, swap_in, swap_out);\
continue;\
}
CONVERT(P::Gray, P::RGB);
CONVERT(P::Lab, P::RGB);
CONVERT(P::Lab, P::XYZ);
CONVERT(P::RGB, P::Gray);
CONVERT(P::RGB, P::Lab);
CONVERT(P::RGB, P::RGB);
CONVERT(P::RGB, P::XYZ);
CONVERT(P::XYZ, P::Lab);
CONVERT(P::XYZ, P::RGB);
}
}
INSTANTIATE_LAYER_GPU_FORWARD(ColorConvLayer);
} // namespace caffe
| 35c827b938fea3a54f329785f49973a029b5c9d4.cu | #include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
typedef ColorConvParameter P;
namespace convert_color_gpu {
template <typename T, int, int> struct Converter {
};
template <typename T> struct Converter<T, P::RGB, P::RGB> {
__device__ static void convert(const T * in, T * out) {
out[0] = in[0];
out[1] = in[1];
out[2] = in[2];
}
};
template <typename T>
__device__ T invS(T v) {
return v <= T(0.04045) ? v/T(12.92) : pow((v + T(0.055))/T(1.055), T(2.4));
}
template <typename T>
__device__ T S(T v) {
return v <= T(0.0031308) ? v*T(12.92) : T(1.055)*pow(v, T(1./2.4)) - T(0.055);
}
template <typename T> struct Converter<T, P::RGB, P::XYZ> {
__device__ static void convert(const T * in, T * out) {
T tmp[3] = {0};
for (int i = 0; i < 3; i++)
tmp[i] = invS(in[i]/T(255.));
out[0] = T(0.4124564)*tmp[0] + T(0.3575761)*tmp[1] + T(0.1804375)*tmp[2];
out[1] = T(0.2126729)*tmp[0] + T(0.7151522)*tmp[1] + T(0.0721750)*tmp[2];
out[2] = T(0.0193339)*tmp[0] + T(0.1191920)*tmp[1] + T(0.9503041)*tmp[2];
}
};
template <typename T> struct Converter<T, P::XYZ, P::RGB> {
__device__ static void convert(const T * in, T * out) {
T tmp[3] = {0};
tmp[0] = T(3.2404542)*in[0] - T(1.5371385)*in[1] - T(0.4985314)*in[2];
tmp[1] = -T(0.9692660)*in[0] + T(1.8760108)*in[1] + T(0.0415560)*in[2];
tmp[2] = T(0.0556434)*in[0] - T(0.2040259)*in[1] + T(1.0572252)*in[2];
for (int i = 0; i < 3; i++)
out[i] = S(tmp[i])*T(255.);
}
};
template <typename T> struct Converter<T, P::RGB, P::Gray> {
__device__ static void convert(const T * in, T * out) {
out[0] = T(0.2126729)*in[0] + T(0.7151522)*in[1] + T(0.0721750)*in[2];
}
};
template <typename T> struct Converter<T, P::Gray, P::RGB> {
__device__ static void convert(const T * in, T * out) {
out[0] = out[1] = out[2] = in[0];
}
};
template <typename T>
__device__ T invF(T v) {
return v > T(0.206896) ? pow(v, T(3.)) : (T(116) * v - T(16.)) / T(903.3);
}
template <typename T>
__device__ T F(T v) {
return v > T(0.008867) ? pow(v, T(1./3)) : (T(903.3) * v + T(16.)) / T(116.);
}
template <typename T> struct Converter<T, P::Lab, P::XYZ> {
__device__ static void convert(const T * in, T * out) {
const T R[3] = {0.95074, 1.0, 1.08883}; // Reference white D65
T tmp[3] = {0};
tmp[1] = (in[0]+16) / T(116.);
tmp[0] = tmp[1] + in[1] / T(500.);
tmp[2] = tmp[1] - in[2] / T(200.);
for (int i = 0; i < 3; i++)
out[i] = invF(tmp[i]) * R[i];
}
};
template <typename T> struct Converter<T, P::XYZ, P::Lab> {
__device__ static void convert(const T * in, T * out) {
const T R[3] = {0.95074, 1.0, 1.08883}; // Reference white D65
T tmp[3] = {0};
for (int i = 0; i < 3; i++)
tmp[i] = F(in[i] / R[i]);
out[0] = T(116.)*tmp[1] - T(16.);
out[1] = T(500.)*(tmp[0] - tmp[1]);
out[2] = T(200.)*(tmp[1] - tmp[2]);
}
};
template <typename T> struct Converter<T, P::RGB, P::Lab> {
__device__ static void convert(const T * in, T * out) {
T tmp[3] = {0};
Converter<T, P::RGB, P::XYZ>::convert(in, tmp);
Converter<T, P::XYZ, P::Lab>::convert(tmp, out);
}
};
template <typename T> struct Converter<T, P::Lab, P::RGB> {
__device__ static void convert(const T * in, T * out) {
T tmp[3] = {0};
Converter<T, P::Lab, P::XYZ>::convert(in, tmp);
Converter<T, P::XYZ, P::RGB>::convert(tmp, out);
}
};
template <typename T, int from, int to>
__global__ void convert_kernel(int N, const T * i1, const T * i2,
const T * i3, T * o1, T * o2, T * o3) {
typedef Converter<T, from, to> C;
T in[3], out[3];
CUDA_KERNEL_LOOP(i, N) {
in[0] = i1[i];
if (i2) in[1] = i2[i];
if (i3) in[2] = i3[i];
C::convert(in, out);
o1[i] = out[0];
if (o2) o2[i] = out[1];
if (o3) o3[i] = out[2];
}
}
template <typename T, int from, int to>
void convert(int N, const T * input, T * output,
bool swap_in = 0, bool swap_out = 0) {
const T *i1 = input, *i2 = input + N, *i3 = input + 2*N;
if (swap_in) std::swap(i1, i3);
T *o1 = output, *o2 = output + N, *o3 = output + 2*N;
if (swap_out) std::swap(o1, o3);
if (from == P::Gray) i2 = i3 = NULL;
if (to == P::Gray) o2 = o3 = NULL;
convert_kernel<T, from, to> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, i1, i2, i3, o1, o2, o3);
}
}; // namespace convert_color_gpu
template <typename T>
void ColorConvLayer<T>::Forward_gpu(const vector<Blob<T>*>& bottom,
const vector<Blob<T>*>& top) {
using convert_color_gpu::convert;
const int N = bottom[0]->shape()[0], Cb = bottom[0]->shape()[1];
const int Ct = top[0]->shape()[1], count = bottom[0]->count() / (N*Cb);
ColorConvParameter::ColorSpace in = input_space_, out = output_space_;
bool swap_in = (in == P::BGR), swap_out = (out == P::BGR);
if (swap_in) in = P::RGB;
if (swap_out) out = P::RGB;
for (int n = 0; n < N; n++) {
const T * input = bottom[0]->gpu_data() + n * count * Cb;
T * output = top[0]->mutable_gpu_data() + n * count * Ct;
#define CONVERT(A, B) if (in == A && out == B) {\
convert<T, A, B>(count, input, output, swap_in, swap_out);\
continue;\
}
CONVERT(P::Gray, P::RGB);
CONVERT(P::Lab, P::RGB);
CONVERT(P::Lab, P::XYZ);
CONVERT(P::RGB, P::Gray);
CONVERT(P::RGB, P::Lab);
CONVERT(P::RGB, P::RGB);
CONVERT(P::RGB, P::XYZ);
CONVERT(P::XYZ, P::Lab);
CONVERT(P::XYZ, P::RGB);
}
}
INSTANTIATE_LAYER_GPU_FORWARD(ColorConvLayer);
} // namespace caffe
|
a3af2cd7caa3a0f5245672dc30dd7101a13dd8c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ unsigned int cuda_delta = 0; __device__ unsigned int maskForMode(unsigned int x, unsigned int y, unsigned int z, unsigned int w ){
unsigned int max = x > y ? x : y;
max = z > max ? z : max;
max = w > max ? w : max;
unsigned int mask = 0;
if (max == x){
mask |= 1;
}
if (max == y){
mask |= 2; // 010
}
if (max == z){
mask |= 4; // 0100
}
if (max == w){
mask |= 8; // 0100
}
return mask;
}
__global__ void update_centroids(const sequence_t *data, sequence_t *centroids, unsigned int * tmp_centroidCount, unsigned int numClusters){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < numClusters){
sequence_t seq = make_ulong3(0,0,0);
unsigned int *tmp_centroid = &tmp_centroidCount[i* BIT_SIZE_OF(sequence_t)];
for (int j=0;j<SEQ_DIM_BITS_SIZE;j+=4)
{
// bits tmp_centroid[0] is less significative bit from sequence_t
// bits tmp_centroid[0] = z << 0
unsigned int *bitCountX = &tmp_centroid[j + (SEQ_DIM_BITS_SIZE * 2)];
unsigned int *bitCountY = &tmp_centroid[j + SEQ_DIM_BITS_SIZE];
unsigned int *bitCountZ = &tmp_centroid[j];
unsigned long int mask = maskForMode(bitCountX[0],bitCountX[1],bitCountX[2],bitCountX[3]);
seq.x |= (mask << j);
mask = maskForMode(bitCountY[0],bitCountY[1],bitCountY[2],bitCountY[3]);
seq.y |= (mask << j);
mask = maskForMode(bitCountZ[0],bitCountZ[1],bitCountZ[2],bitCountZ[3]);
seq.z |= (mask << j);
}
centroids[i] = seq;
}
} | a3af2cd7caa3a0f5245672dc30dd7101a13dd8c1.cu | #include "includes.h"
__device__ unsigned int cuda_delta = 0; __device__ unsigned int maskForMode(unsigned int x, unsigned int y, unsigned int z, unsigned int w ){
unsigned int max = x > y ? x : y;
max = z > max ? z : max;
max = w > max ? w : max;
unsigned int mask = 0;
if (max == x){
mask |= 1;
}
if (max == y){
mask |= 2; // 010
}
if (max == z){
mask |= 4; // 0100
}
if (max == w){
mask |= 8; // 0100
}
return mask;
}
__global__ void update_centroids(const sequence_t *data, sequence_t *centroids, unsigned int * tmp_centroidCount, unsigned int numClusters){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < numClusters){
sequence_t seq = make_ulong3(0,0,0);
unsigned int *tmp_centroid = &tmp_centroidCount[i* BIT_SIZE_OF(sequence_t)];
for (int j=0;j<SEQ_DIM_BITS_SIZE;j+=4)
{
// bits tmp_centroid[0] is less significative bit from sequence_t
// bits tmp_centroid[0] = z << 0
unsigned int *bitCountX = &tmp_centroid[j + (SEQ_DIM_BITS_SIZE * 2)];
unsigned int *bitCountY = &tmp_centroid[j + SEQ_DIM_BITS_SIZE];
unsigned int *bitCountZ = &tmp_centroid[j];
unsigned long int mask = maskForMode(bitCountX[0],bitCountX[1],bitCountX[2],bitCountX[3]);
seq.x |= (mask << j);
mask = maskForMode(bitCountY[0],bitCountY[1],bitCountY[2],bitCountY[3]);
seq.y |= (mask << j);
mask = maskForMode(bitCountZ[0],bitCountZ[1],bitCountZ[2],bitCountZ[3]);
seq.z |= (mask << j);
}
centroids[i] = seq;
}
} |
29b3fe33d2acb1a917164ee6a553bef0890b3eab.hip | // !!! This is a file automatically generated by hipify!!!
/* -----------------------------------------------------------------------------
*
* Module : MVM
* Copyright : (c) [2012] Kevin Ying
* License : BSD
*
* Matrix vector multiplication using cublas
*
* ---------------------------------------------------------------------------*/
#include "algorithms.h"
#include <stdint.h>
#include <thrust/device_vector.h>
#include "rocblas.h"
/* -----------------------------------------------------------------------------
* Instances
* ---------------------------------------------------------------------------*/
void
mvm_ff(hipblasHandle_t handle, float *d_y, const float *d_A, const float *d_x, const uint32_t m, const uint32_t n)
{
#ifdef _BENCH
hipDeviceSynchronize();
time_t t_beg, t_end;
time(&t_beg);
std::cerr << "mvm_ff" << std::endl;
#endif
/*hipblasHandle_t handle;*/
/*hipblasCreate(&handle);*/
float alpha = 1.0;
float beta = 0.0;
// Because cublas uses col major storage (as opposed to row major) swap row and col values and use HIPBLAS_OP_T
/*thrust::device_ptr<const float> d_A_th(d_A);*/
/*for (int i = 0; i < m; i++) {*/
/*for (int j = 0; j < n; j++) {*/
/*std::cerr << d_A_th[i*n + j] << " ";*/
/*}*/
/*std::cerr << std::endl;*/
/*}*/
//std::cerr << "m " << m << "n " << n << "alpha " << alpha << "beta " << beta << std::endl;
hipblasStatus_t status = hipblasSgemv(handle, HIPBLAS_OP_T, n, m, &alpha, d_A, n, d_x, 1, &beta, d_y, 1);
if (status != HIPBLAS_STATUS_SUCCESS) {
std::cerr << "CUBLAS FAILURE" << std::endl;
}
/*hipblasDestroy(handle);*/
#ifdef _BENCH
hipDeviceSynchronize();
time(&t_end);
std::cerr<< "Time elapsed for mvm_ff: " << difftime(t_end,t_beg) << " seconds" << std::endl;
#endif
}
| 29b3fe33d2acb1a917164ee6a553bef0890b3eab.cu | /* -----------------------------------------------------------------------------
*
* Module : MVM
* Copyright : (c) [2012] Kevin Ying
* License : BSD
*
* Matrix vector multiplication using cublas
*
* ---------------------------------------------------------------------------*/
#include "algorithms.h"
#include <stdint.h>
#include <thrust/device_vector.h>
#include "cublas_v2.h"
/* -----------------------------------------------------------------------------
* Instances
* ---------------------------------------------------------------------------*/
void
mvm_ff(cublasHandle_t handle, float *d_y, const float *d_A, const float *d_x, const uint32_t m, const uint32_t n)
{
#ifdef _BENCH
cudaThreadSynchronize();
time_t t_beg, t_end;
time(&t_beg);
std::cerr << "mvm_ff" << std::endl;
#endif
/*cublasHandle_t handle;*/
/*cublasCreate(&handle);*/
float alpha = 1.0;
float beta = 0.0;
// Because cublas uses col major storage (as opposed to row major) swap row and col values and use CUBLAS_OP_T
/*thrust::device_ptr<const float> d_A_th(d_A);*/
/*for (int i = 0; i < m; i++) {*/
/*for (int j = 0; j < n; j++) {*/
/*std::cerr << d_A_th[i*n + j] << " ";*/
/*}*/
/*std::cerr << std::endl;*/
/*}*/
//std::cerr << "m " << m << "n " << n << "alpha " << alpha << "beta " << beta << std::endl;
cublasStatus_t status = cublasSgemv(handle, CUBLAS_OP_T, n, m, &alpha, d_A, n, d_x, 1, &beta, d_y, 1);
if (status != CUBLAS_STATUS_SUCCESS) {
std::cerr << "CUBLAS FAILURE" << std::endl;
}
/*cublasDestroy(handle);*/
#ifdef _BENCH
cudaThreadSynchronize();
time(&t_end);
std::cerr<< "Time elapsed for mvm_ff: " << difftime(t_end,t_beg) << " seconds" << std::endl;
#endif
}
|
9d60e6964300739bfd5c4507b4664a333f39fcc0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#include <stdio.h>
__global__ void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
int absolute_image_position_x=thread_2D_pos.x;
int absolute_image_position_y=thread_2D_pos.y;
if ( absolute_image_position_x >= numCols ||
absolute_image_position_y >= numRows )
{
return;
}
float sum=0.0f;
int k=-filterWidth/2;
int l=filterWidth/2;
int m=-filterWidth/2;
int n=filterWidth/2;
if(thread_2D_pos.y < filterWidth/2)
k=-thread_2D_pos.y;
if((thread_2D_pos.y+filterWidth/2)>(numRows-1))
l=numRows-1-thread_2D_pos.y;
if(thread_2D_pos.x < filterWidth/2)
m=-thread_2D_pos.x;
if((thread_2D_pos.x+filterWidth/2)>(numCols-1))
n=numCols-1-thread_2D_pos.x;
for(int r=k; r<=l;++r){
for(int c=m; c<=n;++c){
int yIdx=absolute_image_position_y+r;
int xIdx=absolute_image_position_x+c;
int idx=(yIdx)*numCols+xIdx;
float filter_value=filter[(r+filterWidth/2)*filterWidth+c+filterWidth/2];
sum+=filter_value*static_cast<float>(inputChannel[idx]);
/*if(thread_1D_pos==500){
printf("inputChannel[idx]=%f\t,filter_Value=%f\t,sum=%f\n",static_cast<float>(inputChannel[idx]),filter_value,sum) ;
}*/
}
}
outputChannel[thread_1D_pos]=sum;
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
__global__
void gaussian_blur_row( const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
int absolute_image_position_x=thread_2D_pos.x;
int absolute_image_position_y=thread_2D_pos.y;
if ( absolute_image_position_x >= numCols ||
absolute_image_position_y >= numRows )
{
return;
}
float sum=0.0f;
int radius=filterWidth/2;
int i=-radius;
int j=radius;
if(thread_2D_pos.x < radius)
i=-thread_2D_pos.x;
if((thread_2D_pos.x+radius)>(numCols-1))
j=numCols-1-thread_2D_pos.x;
for(int c=i; c<=j;++c){
// for(int c=-filterWidth/2; c<=filterWidth/2;++c){
int xIdx=absolute_image_position_x+c;
int idx=(thread_2D_pos.y)*numCols+xIdx;
float filter_value=filter[c+radius];
sum+=filter_value*static_cast<float>(inputChannel[idx]);
//}
}
outputChannel[thread_1D_pos]=sum;
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
__global__
void gaussian_blur_col(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
int absolute_image_position_x=thread_2D_pos.x;
int absolute_image_position_y=thread_2D_pos.y;
if ( absolute_image_position_x >= numCols ||
absolute_image_position_y >= numRows )
{
return;
}
float sum=0.0f;
int radius=filterWidth/2;
int i=-radius;
int j=radius;
if(thread_2D_pos.y < radius)
i=-thread_2D_pos.y;
if((thread_2D_pos.y+radius)>(numRows-1))
j=numRows-1-thread_2D_pos.y;
for(int r=i; r<=j;++r){
int yIdx=absolute_image_position_y+r;
int idx=(yIdx)*numCols+thread_2D_pos.x;
float filter_value=filter[r+radius];
sum+=filter_value*static_cast<float>(inputChannel[idx]);
//if(idx%10==0)
}
// fflush();
outputChannel[thread_1D_pos]=sum;
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
const int absolute_image_position_x = thread_2D_pos.x;
const int absolute_image_position_y = thread_2D_pos.y;
if ( absolute_image_position_x >= numCols ||
absolute_image_position_y >= numRows )
{
return;
}
redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x;
greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y;
blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
//if(thread_1D_pos==500)
//printf("in kernel value.x=%f\t,value.y=%f\tvalue.z=%f",static_cast<float>(outputImageRGBA[500].x),static_cast<float>(outputImageRGBA[500].y),static_cast<float>(outputImageRGBA[500].z));
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
checkCudaErrors(hipMalloc(&d_filter, sizeof( float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth, hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *grayscale,
const int filterWidth)
{
printf("here 1");
//TODO: Set reasonable block size (i.e., number of threads per block)
int tilesize=16;
const dim3 blockSize(tilesize, tilesize);
printf("here 2");
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(numCols/blockSize.x+1,numRows/blockSize.y+1);
printf("here 3");
//TODO: Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA,
numRows,
numCols,
d_red,
d_green,
d_blue);
printf("here 4");
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur_row), dim3(gridSize), dim3(blockSize), 0, 0, d_red,
grayscale,
numRows,
numCols,
d_filter,
filterWidth);
printf("here 4");
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//gaussian_blur_row<<<gridSize, blockSize>>>(d_green,
// d_greenBlurred,
// numRows,
// numCols,
// d_filter,
// filterWidth);
// hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//gaussian_blur_row<<<gridSize, blockSize>>>(d_blue,
// d_blueBlurred,
// numRows,
// numCols,
// d_filter,
// filterWidth);
//hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());hipLaunchKernelGGL((
gaussian_blur_col), dim3(gridSize), dim3(blockSize), 0, 0, grayscale,
d_red,
numRows,
numCols,
d_filter,
filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
printf("here 4");
//gaussian_blur_col<<<gridSize, blockSize>>>(d_greenBlurred,d_green,
// numRows,
// numCols,
// d_filter,
// filterWidth);
//hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// gaussian_blur_col<<<gridSize, blockSize>>>(d_blueBlurred,
/// d_blue,
// numRows,
// numCols,
// / d_filter,
// filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
//hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
printf("hererere");
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_red,
d_green,
d_blue,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
printf("cjcjjcjc");
}
| 9d60e6964300739bfd5c4507b4664a333f39fcc0.cu |
#include "utils.h"
#include <stdio.h>
__global__ void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
int absolute_image_position_x=thread_2D_pos.x;
int absolute_image_position_y=thread_2D_pos.y;
if ( absolute_image_position_x >= numCols ||
absolute_image_position_y >= numRows )
{
return;
}
float sum=0.0f;
int k=-filterWidth/2;
int l=filterWidth/2;
int m=-filterWidth/2;
int n=filterWidth/2;
if(thread_2D_pos.y < filterWidth/2)
k=-thread_2D_pos.y;
if((thread_2D_pos.y+filterWidth/2)>(numRows-1))
l=numRows-1-thread_2D_pos.y;
if(thread_2D_pos.x < filterWidth/2)
m=-thread_2D_pos.x;
if((thread_2D_pos.x+filterWidth/2)>(numCols-1))
n=numCols-1-thread_2D_pos.x;
for(int r=k; r<=l;++r){
for(int c=m; c<=n;++c){
int yIdx=absolute_image_position_y+r;
int xIdx=absolute_image_position_x+c;
int idx=(yIdx)*numCols+xIdx;
float filter_value=filter[(r+filterWidth/2)*filterWidth+c+filterWidth/2];
sum+=filter_value*static_cast<float>(inputChannel[idx]);
/*if(thread_1D_pos==500){
printf("inputChannel[idx]=%f\t,filter_Value=%f\t,sum=%f\n",static_cast<float>(inputChannel[idx]),filter_value,sum) ;
}*/
}
}
outputChannel[thread_1D_pos]=sum;
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
__global__
void gaussian_blur_row( const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
int absolute_image_position_x=thread_2D_pos.x;
int absolute_image_position_y=thread_2D_pos.y;
if ( absolute_image_position_x >= numCols ||
absolute_image_position_y >= numRows )
{
return;
}
float sum=0.0f;
int radius=filterWidth/2;
int i=-radius;
int j=radius;
if(thread_2D_pos.x < radius)
i=-thread_2D_pos.x;
if((thread_2D_pos.x+radius)>(numCols-1))
j=numCols-1-thread_2D_pos.x;
for(int c=i; c<=j;++c){
// for(int c=-filterWidth/2; c<=filterWidth/2;++c){
int xIdx=absolute_image_position_x+c;
int idx=(thread_2D_pos.y)*numCols+xIdx;
float filter_value=filter[c+radius];
sum+=filter_value*static_cast<float>(inputChannel[idx]);
//}
}
outputChannel[thread_1D_pos]=sum;
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
__global__
void gaussian_blur_col(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
int absolute_image_position_x=thread_2D_pos.x;
int absolute_image_position_y=thread_2D_pos.y;
if ( absolute_image_position_x >= numCols ||
absolute_image_position_y >= numRows )
{
return;
}
float sum=0.0f;
int radius=filterWidth/2;
int i=-radius;
int j=radius;
if(thread_2D_pos.y < radius)
i=-thread_2D_pos.y;
if((thread_2D_pos.y+radius)>(numRows-1))
j=numRows-1-thread_2D_pos.y;
for(int r=i; r<=j;++r){
int yIdx=absolute_image_position_y+r;
int idx=(yIdx)*numCols+thread_2D_pos.x;
float filter_value=filter[r+radius];
sum+=filter_value*static_cast<float>(inputChannel[idx]);
//if(idx%10==0)
}
// fflush();
outputChannel[thread_1D_pos]=sum;
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
const int absolute_image_position_x = thread_2D_pos.x;
const int absolute_image_position_y = thread_2D_pos.y;
if ( absolute_image_position_x >= numCols ||
absolute_image_position_y >= numRows )
{
return;
}
redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x;
greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y;
blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
//if(thread_1D_pos==500)
//printf("in kernel value.x=%f\t,value.y=%f\tvalue.z=%f",static_cast<float>(outputImageRGBA[500].x),static_cast<float>(outputImageRGBA[500].y),static_cast<float>(outputImageRGBA[500].z));
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
checkCudaErrors(cudaMalloc(&d_filter, sizeof( float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth, cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *grayscale,
const int filterWidth)
{
printf("here 1");
//TODO: Set reasonable block size (i.e., number of threads per block)
int tilesize=16;
const dim3 blockSize(tilesize, tilesize);
printf("here 2");
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(numCols/blockSize.x+1,numRows/blockSize.y+1);
printf("here 3");
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA,
numRows,
numCols,
d_red,
d_green,
d_blue);
printf("here 4");
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur_row<<<gridSize, blockSize>>>(d_red,
grayscale,
numRows,
numCols,
d_filter,
filterWidth);
printf("here 4");
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//gaussian_blur_row<<<gridSize, blockSize>>>(d_green,
// d_greenBlurred,
// numRows,
// numCols,
// d_filter,
// filterWidth);
// cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//gaussian_blur_row<<<gridSize, blockSize>>>(d_blue,
// d_blueBlurred,
// numRows,
// numCols,
// d_filter,
// filterWidth);
//cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gaussian_blur_col<<<gridSize, blockSize>>>(grayscale,
d_red,
numRows,
numCols,
d_filter,
filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
printf("here 4");
//gaussian_blur_col<<<gridSize, blockSize>>>(d_greenBlurred,d_green,
// numRows,
// numCols,
// d_filter,
// filterWidth);
//cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// gaussian_blur_col<<<gridSize, blockSize>>>(d_blueBlurred,
/// d_blue,
// numRows,
// numCols,
// / d_filter,
// filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
//cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
printf("hererere");
recombineChannels<<<gridSize, blockSize>>>(d_red,
d_green,
d_blue,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
printf("cjcjjcjc");
}
|
2be1f1c5fd00ade7b06e2d0bacff3cfbe76fff6f.hip | // !!! This is a file automatically generated by hipify!!!
#include "..\Prerequisites.h"
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
char const * const errMsg = "Invalid input to MEX file.";
mxInitGPU();
if (nrhs != 2 || !mxIsComplex(prhs[0]) || (int)mxGetDimensions(prhs[0])[0] != (int)mxGetPr(prhs[1])[0] / 2 + 1)
mexErrMsgIdAndTxt(errId, errMsg);
mxArrayAdapter A(prhs[0]);
int ndims = mxGetNumberOfDimensions(A.underlyingarray);
if (ndims < 1 || ndims > 3)
mexErrMsgIdAndTxt(errId, errMsg);
int3 dimensions = MWDimsToInt3(ndims, mxGetPr(prhs[1]));
tfloat* d_result;
hipMalloc((void**)&d_result, dimensions.x * dimensions.y * dimensions.z * sizeof(tfloat));
d_IFFTC2R(A.GetAsManagedDeviceTComplex(), d_result, ndims, dimensions);
mwSize realdims[3] = { dimensions.x, dimensions.y, dimensions.z };
mxArrayAdapter B(mxCreateNumericArray(mxGetNumberOfDimensions(A.underlyingarray),
realdims,
mxGetClassID(A.underlyingarray),
mxREAL));
B.SetFromDeviceTFloat(d_result);
plhs[0] = B.underlyingarray;
hipFree(d_result);
} | 2be1f1c5fd00ade7b06e2d0bacff3cfbe76fff6f.cu | #include "..\Prerequisites.h"
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
char const * const errMsg = "Invalid input to MEX file.";
mxInitGPU();
if (nrhs != 2 || !mxIsComplex(prhs[0]) || (int)mxGetDimensions(prhs[0])[0] != (int)mxGetPr(prhs[1])[0] / 2 + 1)
mexErrMsgIdAndTxt(errId, errMsg);
mxArrayAdapter A(prhs[0]);
int ndims = mxGetNumberOfDimensions(A.underlyingarray);
if (ndims < 1 || ndims > 3)
mexErrMsgIdAndTxt(errId, errMsg);
int3 dimensions = MWDimsToInt3(ndims, mxGetPr(prhs[1]));
tfloat* d_result;
cudaMalloc((void**)&d_result, dimensions.x * dimensions.y * dimensions.z * sizeof(tfloat));
d_IFFTC2R(A.GetAsManagedDeviceTComplex(), d_result, ndims, dimensions);
mwSize realdims[3] = { dimensions.x, dimensions.y, dimensions.z };
mxArrayAdapter B(mxCreateNumericArray(mxGetNumberOfDimensions(A.underlyingarray),
realdims,
mxGetClassID(A.underlyingarray),
mxREAL));
B.SetFromDeviceTFloat(d_result);
plhs[0] = B.underlyingarray;
cudaFree(d_result);
} |
06f8579274cd9f6b30e7d0825d4e3763e8aefee4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "../include/helper_cuda.h"
#include "../cuGMP.h"
__global__ void shiftRightKernel(__dev_mpz_struct *res, __dev_mpz_struct *a, unsigned int shift_limbs, unsigned int shift_bits, unsigned int result_limbs)
{
unsigned int i = THREAD_ID;
if (i < result_limbs)
{
if (i < result_limbs - 1)
{
res->_mp_d[i] = (a->_mp_d[i + shift_limbs] >> shift_bits) + (a->_mp_d[i + shift_limbs + 1] << (GMP_LIMB_BITS - shift_bits));
}
else
{
res->_mp_d[i] = (a->_mp_d[i + shift_limbs] >> shift_bits);
}
}
}
void mpz_tdiv_q_2exp(mpz_ptr res, mpz_ptr a, unsigned long int exponent)
{
unsigned int result_limbs = ABS(a->_mp_size) - exponent / GMP_LIMB_BITS;
allocate_memory(res, result_limbs, result_limbs);
unsigned int shift_limbs = exponent / GMP_LIMB_BITS;
unsigned int shift_bits = exponent % GMP_LIMB_BITS;
#ifdef KERNEL_PRINT
printf("shiftRightKernel <<<%d, %d>>>\n", result_limbs / BLOCK_SIZE + 1, BLOCK_SIZE);
#endif
shiftRightKernel << <result_limbs / BLOCK_SIZE + 1, BLOCK_SIZE >> >
(res->_dev_mp_struct, a->_dev_mp_struct, shift_limbs, shift_bits, result_limbs);
getLastCudaError("Kernel execution failed: [ shiftRightKernel ]");
#ifdef EXPLICIT_SYNCHRONIZATION
checkCudaErrors(hipDeviceSynchronize());
#endif
// mp_size could have changed on the device - reflect on the host.
copy_operand_data_without_limbs(res, MemcpyDirection::memcpyDeviceToHost);
// If the number shifted is negative, reflect it in the result.
if (a->_mp_size < 0)
{
res->_mp_size = -1 * ABS(res->_mp_size);
copy_operand_data_without_limbs(res, MemcpyDirection::memcpyHostToDevice);
}
}
| 06f8579274cd9f6b30e7d0825d4e3763e8aefee4.cu | #include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "../include/helper_cuda.h"
#include "../cuGMP.h"
__global__ void shiftRightKernel(__dev_mpz_struct *res, __dev_mpz_struct *a, unsigned int shift_limbs, unsigned int shift_bits, unsigned int result_limbs)
{
unsigned int i = THREAD_ID;
if (i < result_limbs)
{
if (i < result_limbs - 1)
{
res->_mp_d[i] = (a->_mp_d[i + shift_limbs] >> shift_bits) + (a->_mp_d[i + shift_limbs + 1] << (GMP_LIMB_BITS - shift_bits));
}
else
{
res->_mp_d[i] = (a->_mp_d[i + shift_limbs] >> shift_bits);
}
}
}
void mpz_tdiv_q_2exp(mpz_ptr res, mpz_ptr a, unsigned long int exponent)
{
unsigned int result_limbs = ABS(a->_mp_size) - exponent / GMP_LIMB_BITS;
allocate_memory(res, result_limbs, result_limbs);
unsigned int shift_limbs = exponent / GMP_LIMB_BITS;
unsigned int shift_bits = exponent % GMP_LIMB_BITS;
#ifdef KERNEL_PRINT
printf("shiftRightKernel <<<%d, %d>>>\n", result_limbs / BLOCK_SIZE + 1, BLOCK_SIZE);
#endif
shiftRightKernel << <result_limbs / BLOCK_SIZE + 1, BLOCK_SIZE >> >
(res->_dev_mp_struct, a->_dev_mp_struct, shift_limbs, shift_bits, result_limbs);
getLastCudaError("Kernel execution failed: [ shiftRightKernel ]");
#ifdef EXPLICIT_SYNCHRONIZATION
checkCudaErrors(cudaDeviceSynchronize());
#endif
// mp_size could have changed on the device - reflect on the host.
copy_operand_data_without_limbs(res, MemcpyDirection::memcpyDeviceToHost);
// If the number shifted is negative, reflect it in the result.
if (a->_mp_size < 0)
{
res->_mp_size = -1 * ABS(res->_mp_size);
copy_operand_data_without_limbs(res, MemcpyDirection::memcpyHostToDevice);
}
}
|
e01eb57cc5298df983f0707f9bb2e97b5bf1ef3b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line) {
if (err != hipSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << hipGetErrorString(err) << " " << func << std::endl;
exit(1);
}
}
__global__ void fill_one(int* d_array, size_t length) {
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
if ( index >= length ) {
return;
}
d_array[index] = 1;
}
#define BLOCK_SIZE 1024
__global__ void perfix_sum_simple(int* d_array, size_t length) {
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
printf("index %d length %d", index, length);
if ( index >= length ) {
return;
}
__shared__ int cache[BLOCK_SIZE];
cache[threadIdx.x] = d_array[index];
for ( size_t stride = 1; stride <= threadIdx.x; stride *= 2 ) {
__syncthreads();
cache[threadIdx.x] += cache[threadIdx.x - stride];
}
// write back
d_array[index] = cache[threadIdx.x];
}
__global__ void perfix_sum( int* d_array, size_t block_size, size_t length) {
const int index = threadIdx.x + blockIdx.x * blockDim.x;
const int start = index * block_size;
// printf( "id %d index %d, start %d length %d block_size %d \n", threadIdx.x, index, start, (int)length, (int) block_size );
if ( start >= length ) {
return;
}
__shared__ int cache[BLOCK_SIZE];
int local_copy[BLOCK_SIZE];
for ( size_t i = 0; i < block_size; ++i ) {
local_copy[i] = d_array[ start + i ];
// printf("id %d, local_copy[%d] = d_array[%d] = %d \n", threadIdx.x, (int)i, (int)(start+i), local_copy[i]);
}
cache[threadIdx.x] = local_copy[block_size-1];
for ( size_t stride = 1; stride <= threadIdx.x; stride *= 2 ) {
// printf("id %d, cache[%d] = local_copy[%d] = %d \n", threadIdx.x, threadIdx.x, (int)(block_size - 1), local_copy[block_size-1]);
__syncthreads();
int operend = cache[threadIdx.x-stride];
// printf("id %d, stride %d operend %d \n", threadIdx.x, stride, operend);
for ( size_t i = 0; i < block_size; ++i ) {
local_copy[i] += operend;
}
__syncthreads();
cache[threadIdx.x] = local_copy[block_size-1];
}
// write back
for ( size_t i = 0; i < block_size; ++i ) {
d_array[ start + i ] = local_copy[i];
}
}
#define BLOCK_NUM 256
int main(int argc, char** argv) {
int* d_array = NULL;
checkCudaErrors(hipMalloc(&d_array, sizeof(int) * BLOCK_SIZE * BLOCK_NUM));
hipLaunchKernelGGL(( fill_one), dim3(BLOCK_NUM), dim3(BLOCK_SIZE), 0, 0, d_array, BLOCK_SIZE * BLOCK_NUM);
hipLaunchKernelGGL(( perfix_sum), dim3(BLOCK_NUM), dim3(BLOCK_SIZE), 0, 0, d_array, 1, BLOCK_SIZE * BLOCK_NUM);
hipDeviceSynchronize();
int h_array[BLOCK_NUM*BLOCK_SIZE] = {0};
checkCudaErrors(hipMemcpy(h_array, d_array, sizeof(int) * BLOCK_SIZE * BLOCK_NUM, hipMemcpyDeviceToHost));
for ( size_t i = 0; i < BLOCK_NUM * BLOCK_SIZE; ++i ) {
std::cout << h_array[i] << " ";
if ( (i % BLOCK_SIZE) == (BLOCK_SIZE-1) ) {
std::cout << std::endl;
}
}
checkCudaErrors(hipFree(d_array));
}
| e01eb57cc5298df983f0707f9bb2e97b5bf1ef3b.cu | #include <stdio.h>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line) {
if (err != cudaSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << cudaGetErrorString(err) << " " << func << std::endl;
exit(1);
}
}
__global__ void fill_one(int* d_array, size_t length) {
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
if ( index >= length ) {
return;
}
d_array[index] = 1;
}
#define BLOCK_SIZE 1024
__global__ void perfix_sum_simple(int* d_array, size_t length) {
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
printf("index %d length %d", index, length);
if ( index >= length ) {
return;
}
__shared__ int cache[BLOCK_SIZE];
cache[threadIdx.x] = d_array[index];
for ( size_t stride = 1; stride <= threadIdx.x; stride *= 2 ) {
__syncthreads();
cache[threadIdx.x] += cache[threadIdx.x - stride];
}
// write back
d_array[index] = cache[threadIdx.x];
}
__global__ void perfix_sum( int* d_array, size_t block_size, size_t length) {
const int index = threadIdx.x + blockIdx.x * blockDim.x;
const int start = index * block_size;
// printf( "id %d index %d, start %d length %d block_size %d \n", threadIdx.x, index, start, (int)length, (int) block_size );
if ( start >= length ) {
return;
}
__shared__ int cache[BLOCK_SIZE];
int local_copy[BLOCK_SIZE];
for ( size_t i = 0; i < block_size; ++i ) {
local_copy[i] = d_array[ start + i ];
// printf("id %d, local_copy[%d] = d_array[%d] = %d \n", threadIdx.x, (int)i, (int)(start+i), local_copy[i]);
}
cache[threadIdx.x] = local_copy[block_size-1];
for ( size_t stride = 1; stride <= threadIdx.x; stride *= 2 ) {
// printf("id %d, cache[%d] = local_copy[%d] = %d \n", threadIdx.x, threadIdx.x, (int)(block_size - 1), local_copy[block_size-1]);
__syncthreads();
int operend = cache[threadIdx.x-stride];
// printf("id %d, stride %d operend %d \n", threadIdx.x, stride, operend);
for ( size_t i = 0; i < block_size; ++i ) {
local_copy[i] += operend;
}
__syncthreads();
cache[threadIdx.x] = local_copy[block_size-1];
}
// write back
for ( size_t i = 0; i < block_size; ++i ) {
d_array[ start + i ] = local_copy[i];
}
}
#define BLOCK_NUM 256
int main(int argc, char** argv) {
int* d_array = NULL;
checkCudaErrors(cudaMalloc(&d_array, sizeof(int) * BLOCK_SIZE * BLOCK_NUM));
fill_one<<<BLOCK_NUM, BLOCK_SIZE>>>(d_array, BLOCK_SIZE * BLOCK_NUM);
perfix_sum<<<BLOCK_NUM, BLOCK_SIZE>>>(d_array, 1, BLOCK_SIZE * BLOCK_NUM);
cudaDeviceSynchronize();
int h_array[BLOCK_NUM*BLOCK_SIZE] = {0};
checkCudaErrors(cudaMemcpy(h_array, d_array, sizeof(int) * BLOCK_SIZE * BLOCK_NUM, cudaMemcpyDeviceToHost));
for ( size_t i = 0; i < BLOCK_NUM * BLOCK_SIZE; ++i ) {
std::cout << h_array[i] << " ";
if ( (i % BLOCK_SIZE) == (BLOCK_SIZE-1) ) {
std::cout << std::endl;
}
}
checkCudaErrors(cudaFree(d_array));
}
|
75339c433fe3f3608c946277a7a358d6c5a5a09a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <limits.h>
#include <hipcub/hipcub.hpp>
#include <raft/cuda_utils.cuh>
namespace ML {
namespace SVM {
__global__ void set_unavailable(bool* available, int n_rows, const int* idx, int n_selected)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n_selected) { available[idx[tid]] = false; }
}
__global__ void update_priority(int* new_priority,
int n_selected,
const int* new_idx,
int n_ws,
const int* idx,
const int* priority)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n_selected) {
int my_new_idx = new_idx[tid];
// The working set size is limited (~1024 elements) so we just loop through it
for (int i = 0; i < n_ws; i++) {
if (idx[i] == my_new_idx) new_priority[tid] = priority[i] + 1;
}
}
}
} // namespace SVM
} // namespace ML
| 75339c433fe3f3608c946277a7a358d6c5a5a09a.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <limits.h>
#include <cub/cub.cuh>
#include <raft/cuda_utils.cuh>
namespace ML {
namespace SVM {
__global__ void set_unavailable(bool* available, int n_rows, const int* idx, int n_selected)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n_selected) { available[idx[tid]] = false; }
}
__global__ void update_priority(int* new_priority,
int n_selected,
const int* new_idx,
int n_ws,
const int* idx,
const int* priority)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n_selected) {
int my_new_idx = new_idx[tid];
// The working set size is limited (~1024 elements) so we just loop through it
for (int i = 0; i < n_ws; i++) {
if (idx[i] == my_new_idx) new_priority[tid] = priority[i] + 1;
}
}
}
} // namespace SVM
} // namespace ML
|
982b698292604a369db5c79cd7f1f9e001894c1d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "cuda_utils.h"
#include "linalg/svd.h"
#include "matrix/matrix.h"
#include "random/rng.h"
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename T>
struct SvdInputs {
T tolerance;
int len;
int n_row;
int n_col;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const SvdInputs<T> &dims) {
return os;
}
template <typename T>
class SvdTest : public ::testing::TestWithParam<SvdInputs<T>> {
protected:
void SetUp() override {
CUSOLVER_CHECK(hipsolverDnCreate(&cusolverH));
CUBLAS_CHECK(hipblasCreate(&cublasH));
params = ::testing::TestWithParam<SvdInputs<T>>::GetParam();
Random::Rng r(params.seed);
int len = params.len;
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
allocate(data, len);
ASSERT(params.n_row == 3, "This test only supports nrows=3!");
ASSERT(params.len == 6, "This test only supports len=6!");
T data_h[] = {1.0, 4.0, 2.0, 2.0, 5.0, 1.0};
updateDevice(data, data_h, len, stream);
int left_evl = params.n_row * params.n_col;
int right_evl = params.n_col * params.n_col;
allocate(left_eig_vectors_qr, left_evl);
allocate(right_eig_vectors_trans_qr, right_evl);
allocate(sing_vals_qr, params.n_col);
// allocate(left_eig_vectors_jacobi, left_evl);
// allocate(right_eig_vectors_trans_jacobi, right_evl);
// allocate(sing_vals_jacobi, params.n_col);
T left_eig_vectors_ref_h[] = {-0.308219, -0.906133, -0.289695,
0.488195, 0.110706, -0.865685};
T right_eig_vectors_ref_h[] = {-0.638636, -0.769509, -0.769509, 0.638636};
T sing_vals_ref_h[] = {7.065283, 1.040081};
allocate(left_eig_vectors_ref, left_evl);
allocate(right_eig_vectors_ref, right_evl);
allocate(sing_vals_ref, params.n_col);
updateDevice(left_eig_vectors_ref, left_eig_vectors_ref_h, left_evl, stream);
updateDevice(right_eig_vectors_ref, right_eig_vectors_ref_h, right_evl, stream);
updateDevice(sing_vals_ref, sing_vals_ref_h, params.n_col, stream);
auto mgr = makeDefaultAllocator();
svdQR(data, params.n_row, params.n_col, sing_vals_qr, left_eig_vectors_qr,
right_eig_vectors_trans_qr, true, true, true, cusolverH, cublasH, stream, mgr);
CUDA_CHECK(hipStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(hipFree(data));
CUDA_CHECK(hipFree(left_eig_vectors_qr));
CUDA_CHECK(hipFree(right_eig_vectors_trans_qr));
CUDA_CHECK(hipFree(sing_vals_qr));
CUDA_CHECK(hipFree(left_eig_vectors_ref));
CUDA_CHECK(hipFree(right_eig_vectors_ref));
CUDA_CHECK(hipFree(sing_vals_ref));
CUSOLVER_CHECK(hipsolverDnDestroy(cusolverH));
CUBLAS_CHECK(hipblasDestroy(cublasH));
}
protected:
SvdInputs<T> params;
T *data, *left_eig_vectors_qr, *right_eig_vectors_trans_qr, *sing_vals_qr,
*left_eig_vectors_ref, *right_eig_vectors_ref, *sing_vals_ref;
hipsolverDnHandle_t cusolverH = NULL;
hipblasHandle_t cublasH;
};
const std::vector<SvdInputs<float>> inputsf2 = {
{0.00001f, 3 * 2, 3, 2, 1234ULL}};
const std::vector<SvdInputs<double>> inputsd2 = {
{0.00001, 3 * 2, 3, 2, 1234ULL}};
typedef SvdTest<float> SvdTestValF;
TEST_P(SvdTestValF, Result) {
ASSERT_TRUE(devArrMatch(sing_vals_ref, sing_vals_qr, params.n_col,
CompareApproxAbs<float>(params.tolerance)));
}
typedef SvdTest<double> SvdTestValD;
TEST_P(SvdTestValD, Result) {
ASSERT_TRUE(devArrMatch(sing_vals_ref, sing_vals_qr, params.n_col,
CompareApproxAbs<double>(params.tolerance)));
}
typedef SvdTest<float> SvdTestLeftVecF;
TEST_P(SvdTestLeftVecF, Result) {
ASSERT_TRUE(devArrMatch(left_eig_vectors_ref, left_eig_vectors_qr,
params.n_row * params.n_col,
CompareApproxAbs<float>(params.tolerance)));
}
typedef SvdTest<double> SvdTestLeftVecD;
TEST_P(SvdTestLeftVecD, Result) {
ASSERT_TRUE(devArrMatch(left_eig_vectors_ref, left_eig_vectors_qr,
params.n_row * params.n_col,
CompareApproxAbs<double>(params.tolerance)));
}
typedef SvdTest<float> SvdTestRightVecF;
TEST_P(SvdTestRightVecF, Result) {
ASSERT_TRUE(devArrMatch(right_eig_vectors_ref, right_eig_vectors_trans_qr,
params.n_col * params.n_col,
CompareApproxAbs<float>(params.tolerance)));
}
typedef SvdTest<double> SvdTestRightVecD;
TEST_P(SvdTestRightVecD, Result) {
ASSERT_TRUE(devArrMatch(right_eig_vectors_ref, right_eig_vectors_trans_qr,
params.n_col * params.n_col,
CompareApproxAbs<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(SvdTests, SvdTestValF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(SvdTests, SvdTestValD, ::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_CASE_P(SvdTests, SvdTestLeftVecF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(SvdTests, SvdTestLeftVecD,
::testing::ValuesIn(inputsd2));
// INSTANTIATE_TEST_CASE_P(SvdTests, SvdTestRightVecF,
// ::testing::ValuesIn(inputsf2));
// INSTANTIATE_TEST_CASE_P(SvdTests, SvdTestRightVecD,
//::testing::ValuesIn(inputsd2));
} // end namespace LinAlg
} // end namespace MLCommon
| 982b698292604a369db5c79cd7f1f9e001894c1d.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "cuda_utils.h"
#include "linalg/svd.h"
#include "matrix/matrix.h"
#include "random/rng.h"
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename T>
struct SvdInputs {
T tolerance;
int len;
int n_row;
int n_col;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const SvdInputs<T> &dims) {
return os;
}
template <typename T>
class SvdTest : public ::testing::TestWithParam<SvdInputs<T>> {
protected:
void SetUp() override {
CUSOLVER_CHECK(cusolverDnCreate(&cusolverH));
CUBLAS_CHECK(cublasCreate(&cublasH));
params = ::testing::TestWithParam<SvdInputs<T>>::GetParam();
Random::Rng r(params.seed);
int len = params.len;
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
allocate(data, len);
ASSERT(params.n_row == 3, "This test only supports nrows=3!");
ASSERT(params.len == 6, "This test only supports len=6!");
T data_h[] = {1.0, 4.0, 2.0, 2.0, 5.0, 1.0};
updateDevice(data, data_h, len, stream);
int left_evl = params.n_row * params.n_col;
int right_evl = params.n_col * params.n_col;
allocate(left_eig_vectors_qr, left_evl);
allocate(right_eig_vectors_trans_qr, right_evl);
allocate(sing_vals_qr, params.n_col);
// allocate(left_eig_vectors_jacobi, left_evl);
// allocate(right_eig_vectors_trans_jacobi, right_evl);
// allocate(sing_vals_jacobi, params.n_col);
T left_eig_vectors_ref_h[] = {-0.308219, -0.906133, -0.289695,
0.488195, 0.110706, -0.865685};
T right_eig_vectors_ref_h[] = {-0.638636, -0.769509, -0.769509, 0.638636};
T sing_vals_ref_h[] = {7.065283, 1.040081};
allocate(left_eig_vectors_ref, left_evl);
allocate(right_eig_vectors_ref, right_evl);
allocate(sing_vals_ref, params.n_col);
updateDevice(left_eig_vectors_ref, left_eig_vectors_ref_h, left_evl, stream);
updateDevice(right_eig_vectors_ref, right_eig_vectors_ref_h, right_evl, stream);
updateDevice(sing_vals_ref, sing_vals_ref_h, params.n_col, stream);
auto mgr = makeDefaultAllocator();
svdQR(data, params.n_row, params.n_col, sing_vals_qr, left_eig_vectors_qr,
right_eig_vectors_trans_qr, true, true, true, cusolverH, cublasH, stream, mgr);
CUDA_CHECK(cudaStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(cudaFree(data));
CUDA_CHECK(cudaFree(left_eig_vectors_qr));
CUDA_CHECK(cudaFree(right_eig_vectors_trans_qr));
CUDA_CHECK(cudaFree(sing_vals_qr));
CUDA_CHECK(cudaFree(left_eig_vectors_ref));
CUDA_CHECK(cudaFree(right_eig_vectors_ref));
CUDA_CHECK(cudaFree(sing_vals_ref));
CUSOLVER_CHECK(cusolverDnDestroy(cusolverH));
CUBLAS_CHECK(cublasDestroy(cublasH));
}
protected:
SvdInputs<T> params;
T *data, *left_eig_vectors_qr, *right_eig_vectors_trans_qr, *sing_vals_qr,
*left_eig_vectors_ref, *right_eig_vectors_ref, *sing_vals_ref;
cusolverDnHandle_t cusolverH = NULL;
cublasHandle_t cublasH;
};
const std::vector<SvdInputs<float>> inputsf2 = {
{0.00001f, 3 * 2, 3, 2, 1234ULL}};
const std::vector<SvdInputs<double>> inputsd2 = {
{0.00001, 3 * 2, 3, 2, 1234ULL}};
typedef SvdTest<float> SvdTestValF;
TEST_P(SvdTestValF, Result) {
ASSERT_TRUE(devArrMatch(sing_vals_ref, sing_vals_qr, params.n_col,
CompareApproxAbs<float>(params.tolerance)));
}
typedef SvdTest<double> SvdTestValD;
TEST_P(SvdTestValD, Result) {
ASSERT_TRUE(devArrMatch(sing_vals_ref, sing_vals_qr, params.n_col,
CompareApproxAbs<double>(params.tolerance)));
}
typedef SvdTest<float> SvdTestLeftVecF;
TEST_P(SvdTestLeftVecF, Result) {
ASSERT_TRUE(devArrMatch(left_eig_vectors_ref, left_eig_vectors_qr,
params.n_row * params.n_col,
CompareApproxAbs<float>(params.tolerance)));
}
typedef SvdTest<double> SvdTestLeftVecD;
TEST_P(SvdTestLeftVecD, Result) {
ASSERT_TRUE(devArrMatch(left_eig_vectors_ref, left_eig_vectors_qr,
params.n_row * params.n_col,
CompareApproxAbs<double>(params.tolerance)));
}
typedef SvdTest<float> SvdTestRightVecF;
TEST_P(SvdTestRightVecF, Result) {
ASSERT_TRUE(devArrMatch(right_eig_vectors_ref, right_eig_vectors_trans_qr,
params.n_col * params.n_col,
CompareApproxAbs<float>(params.tolerance)));
}
typedef SvdTest<double> SvdTestRightVecD;
TEST_P(SvdTestRightVecD, Result) {
ASSERT_TRUE(devArrMatch(right_eig_vectors_ref, right_eig_vectors_trans_qr,
params.n_col * params.n_col,
CompareApproxAbs<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(SvdTests, SvdTestValF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(SvdTests, SvdTestValD, ::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_CASE_P(SvdTests, SvdTestLeftVecF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(SvdTests, SvdTestLeftVecD,
::testing::ValuesIn(inputsd2));
// INSTANTIATE_TEST_CASE_P(SvdTests, SvdTestRightVecF,
// ::testing::ValuesIn(inputsf2));
// INSTANTIATE_TEST_CASE_P(SvdTests, SvdTestRightVecD,
//::testing::ValuesIn(inputsd2));
} // end namespace LinAlg
} // end namespace MLCommon
|
b1d1b61c6f3d9477b92c1041f7dba084d3b2ceca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
__global__
void cudaGrayScale(float* R, float* G, float* B, float* gray, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
gray[i] = static_cast<float>((R[i] * 0.21 + G[i] * 0.71 + B[i] * 0.07) / 350.0);
}
}
void grayscale(float* R, float* G, float* B, float* grayscale, int n) {
int size = n * sizeof(float);
float* d_R, * d_G, * d_B, * d_gray;
hipMalloc((void**)&d_R, size);
hipMemcpy(d_R, R, size, hipMemcpyHostToDevice);
hipMalloc((void**)&d_G, size);
hipMemcpy(d_G, G, size, hipMemcpyHostToDevice);
hipMalloc((void**)&d_B, size);
hipMemcpy(d_B, B, size, hipMemcpyHostToDevice);
hipMalloc((void**)&d_gray, size);
cudaGrayScale << <ceil(n / 1024.0), 1024 >> > (d_R, d_G, d_B, d_gray, n);
hipMemcpy(grayscale, d_gray, size, hipMemcpyDeviceToHost);
hipFree(d_R);
hipFree(d_G);
hipFree(d_B);
hipFree(d_gray);
} | b1d1b61c6f3d9477b92c1041f7dba084d3b2ceca.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__
void cudaGrayScale(float* R, float* G, float* B, float* gray, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
gray[i] = static_cast<float>((R[i] * 0.21 + G[i] * 0.71 + B[i] * 0.07) / 350.0);
}
}
void grayscale(float* R, float* G, float* B, float* grayscale, int n) {
int size = n * sizeof(float);
float* d_R, * d_G, * d_B, * d_gray;
cudaMalloc((void**)&d_R, size);
cudaMemcpy(d_R, R, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_G, size);
cudaMemcpy(d_G, G, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_B, size);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_gray, size);
cudaGrayScale << <ceil(n / 1024.0), 1024 >> > (d_R, d_G, d_B, d_gray, n);
cudaMemcpy(grayscale, d_gray, size, cudaMemcpyDeviceToHost);
cudaFree(d_R);
cudaFree(d_G);
cudaFree(d_B);
cudaFree(d_gray);
} |
664f536dd5a5d1b43877b334088fb1fac255e36b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample implements Mersenne Twister random number generator
* and Cartesian Box-Muller transformation on the GPU.
* See supplied whitepaper for more explanations.
*/
// Utilities and system includes
#include <shrUtils.h>
#include <cutil_inline.h>
#include "MersenneTwister.h"
///////////////////////////////////////////////////////////////////////////////
// Common host and device function
///////////////////////////////////////////////////////////////////////////////
//ceil(a / b)
extern "C" int iDivUp(int a, int b){
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
//floor(a / b)
extern "C" int iDivDown(int a, int b){
return a / b;
}
//Align a to nearest higher multiple of b
extern "C" int iAlignUp(int a, int b){
return ((a % b) != 0) ? (a - a % b + b) : a;
}
//Align a to nearest lower multiple of b
extern "C" int iAlignDown(int a, int b){
return a - a % b;
}
///////////////////////////////////////////////////////////////////////////////
// Reference MT front-end and Box-Muller transform
///////////////////////////////////////////////////////////////////////////////
extern "C" void initMTRef(const char *fname);
extern "C" void RandomRef(float *h_Random, int NPerRng, unsigned int seed);
extern "C" void BoxMullerRef(float *h_Random, int NPerRng);
///////////////////////////////////////////////////////////////////////////////
// Fast GPU random number generator and Box-Muller transform
///////////////////////////////////////////////////////////////////////////////
#include "MersenneTwister_kernel.cu"
///////////////////////////////////////////////////////////////////////////////
// Data configuration
///////////////////////////////////////////////////////////////////////////////
const int PATH_N = 24000000;
const int N_PER_RNG = iAlignUp(iDivUp(PATH_N, MT_RNG_COUNT), 2);
const int RAND_N = MT_RNG_COUNT * N_PER_RNG;
const unsigned int SEED = 777;
#define DO_BOXMULLER
///////////////////////////////////////////////////////////////////////////////
// Main program
///////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
// Start logs
shrSetLogFileName ("MersenneTwister.txt");
shrLog("%s Starting...\n\n", argv[0]);
float
*d_Rand;
float
*h_RandCPU,
*h_RandGPU;
double
rCPU, rGPU, delta, sum_delta, max_delta, sum_ref, L1norm, gpuTime;
int i, j;
unsigned int hTimer;
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
hipSetDevice( cutGetMaxGflopsDeviceId() );
cutilCheckError( cutCreateTimer(&hTimer) );
shrLog("Initializing data for %i samples...\n", PATH_N);
h_RandCPU = (float *)malloc(RAND_N * sizeof(float));
h_RandGPU = (float *)malloc(RAND_N * sizeof(float));
cutilSafeCall( hipMalloc((void **)&d_Rand, RAND_N * sizeof(float)) );
shrLog("Loading CPU and GPU twisters configurations...\n");
const char *raw_path = shrFindFilePath("MersenneTwister.raw", argv[0]);
const char *dat_path = shrFindFilePath("MersenneTwister.dat", argv[0]);
initMTRef(raw_path);
loadMTGPU(dat_path);
seedMTGPU(SEED);
shrLog("Generating random numbers on GPU...\n\n");
int numIterations = 100;
for (i = -1; i < numIterations; i++)
{
if (i == 0)
{
cutilSafeCall( hipDeviceSynchronize() );
cutilCheckError( cutResetTimer(hTimer) );
cutilCheckError( cutStartTimer(hTimer) );
}
hipLaunchKernelGGL(( RandomGPU), dim3(32), dim3(128), 0, 0, d_Rand, N_PER_RNG);
cutilCheckMsg("RandomGPU() execution failed\n");
#ifdef DO_BOXMULLER
hipLaunchKernelGGL(( BoxMullerGPU), dim3(32), dim3(128), 0, 0, d_Rand, N_PER_RNG);
cutilCheckMsg("BoxMullerGPU() execution failed\n");
#endif
}
cutilSafeCall( hipDeviceSynchronize() );
cutilCheckError( cutStopTimer(hTimer) );
gpuTime = 1.0e-3 * cutGetTimerValue(hTimer)/(double)numIterations;
shrLogEx(LOGBOTH | MASTER, 0, "MersenneTwister, Throughput = %.4f GNumbers/s, Time = %.5f s, Size = %u Numbers, NumDevsUsed = %u, Workgroup = %u\n",
1.0e-9 * RAND_N / gpuTime, gpuTime, RAND_N, 1, 128);
shrLog("\nReading back the results...\n");
cutilSafeCall( hipMemcpy(h_RandGPU, d_Rand, RAND_N * sizeof(float), hipMemcpyDeviceToHost) );
shrLog("Checking GPU results...\n");
shrLog(" ...generating random numbers on CPU using reference generator\n");
RandomRef(h_RandCPU, N_PER_RNG, SEED);
#ifdef DO_BOXMULLER
shrLog(" ...applying Box-Muller transformation on CPU\n");
BoxMullerRef(h_RandCPU, N_PER_RNG);
#endif
shrLog(" ...comparing the results\n\n");
max_delta = 0;
sum_delta = 0;
sum_ref = 0;
for(i = 0; i < MT_RNG_COUNT; i++)
for(j = 0; j < N_PER_RNG; j++){
rCPU = h_RandCPU[i * N_PER_RNG + j];
rGPU = h_RandGPU[i + j * MT_RNG_COUNT];
delta = fabs(rCPU - rGPU);
sum_delta += delta;
sum_ref += fabs(rCPU);
if(delta >= max_delta) max_delta = delta;
}
L1norm = (float)(sum_delta / sum_ref);
shrLog("Max absolute error: %E\n", max_delta);
shrLog("L1 norm: %E\n\n", L1norm);
shrLog((L1norm < 1e-6) ? "PASSED\n\n" : "FAILED\n\n");
shrLog("Shutting down...\n");
cutilSafeCall( hipFree(d_Rand) );
free(h_RandGPU);
free(h_RandCPU);
cutilCheckError( cutDeleteTimer( hTimer) );
hipDeviceReset();
shrEXIT(argc, (const char**)argv);
}
| 664f536dd5a5d1b43877b334088fb1fac255e36b.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample implements Mersenne Twister random number generator
* and Cartesian Box-Muller transformation on the GPU.
* See supplied whitepaper for more explanations.
*/
// Utilities and system includes
#include <shrUtils.h>
#include <cutil_inline.h>
#include "MersenneTwister.h"
///////////////////////////////////////////////////////////////////////////////
// Common host and device function
///////////////////////////////////////////////////////////////////////////////
//ceil(a / b)
extern "C" int iDivUp(int a, int b){
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
//floor(a / b)
extern "C" int iDivDown(int a, int b){
return a / b;
}
//Align a to nearest higher multiple of b
extern "C" int iAlignUp(int a, int b){
return ((a % b) != 0) ? (a - a % b + b) : a;
}
//Align a to nearest lower multiple of b
extern "C" int iAlignDown(int a, int b){
return a - a % b;
}
///////////////////////////////////////////////////////////////////////////////
// Reference MT front-end and Box-Muller transform
///////////////////////////////////////////////////////////////////////////////
extern "C" void initMTRef(const char *fname);
extern "C" void RandomRef(float *h_Random, int NPerRng, unsigned int seed);
extern "C" void BoxMullerRef(float *h_Random, int NPerRng);
///////////////////////////////////////////////////////////////////////////////
// Fast GPU random number generator and Box-Muller transform
///////////////////////////////////////////////////////////////////////////////
#include "MersenneTwister_kernel.cu"
///////////////////////////////////////////////////////////////////////////////
// Data configuration
///////////////////////////////////////////////////////////////////////////////
const int PATH_N = 24000000;
const int N_PER_RNG = iAlignUp(iDivUp(PATH_N, MT_RNG_COUNT), 2);
const int RAND_N = MT_RNG_COUNT * N_PER_RNG;
const unsigned int SEED = 777;
#define DO_BOXMULLER
///////////////////////////////////////////////////////////////////////////////
// Main program
///////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
// Start logs
shrSetLogFileName ("MersenneTwister.txt");
shrLog("%s Starting...\n\n", argv[0]);
float
*d_Rand;
float
*h_RandCPU,
*h_RandGPU;
double
rCPU, rGPU, delta, sum_delta, max_delta, sum_ref, L1norm, gpuTime;
int i, j;
unsigned int hTimer;
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
cudaSetDevice( cutGetMaxGflopsDeviceId() );
cutilCheckError( cutCreateTimer(&hTimer) );
shrLog("Initializing data for %i samples...\n", PATH_N);
h_RandCPU = (float *)malloc(RAND_N * sizeof(float));
h_RandGPU = (float *)malloc(RAND_N * sizeof(float));
cutilSafeCall( cudaMalloc((void **)&d_Rand, RAND_N * sizeof(float)) );
shrLog("Loading CPU and GPU twisters configurations...\n");
const char *raw_path = shrFindFilePath("MersenneTwister.raw", argv[0]);
const char *dat_path = shrFindFilePath("MersenneTwister.dat", argv[0]);
initMTRef(raw_path);
loadMTGPU(dat_path);
seedMTGPU(SEED);
shrLog("Generating random numbers on GPU...\n\n");
int numIterations = 100;
for (i = -1; i < numIterations; i++)
{
if (i == 0)
{
cutilSafeCall( cudaThreadSynchronize() );
cutilCheckError( cutResetTimer(hTimer) );
cutilCheckError( cutStartTimer(hTimer) );
}
RandomGPU<<<32, 128>>>(d_Rand, N_PER_RNG);
cutilCheckMsg("RandomGPU() execution failed\n");
#ifdef DO_BOXMULLER
BoxMullerGPU<<<32, 128>>>(d_Rand, N_PER_RNG);
cutilCheckMsg("BoxMullerGPU() execution failed\n");
#endif
}
cutilSafeCall( cudaThreadSynchronize() );
cutilCheckError( cutStopTimer(hTimer) );
gpuTime = 1.0e-3 * cutGetTimerValue(hTimer)/(double)numIterations;
shrLogEx(LOGBOTH | MASTER, 0, "MersenneTwister, Throughput = %.4f GNumbers/s, Time = %.5f s, Size = %u Numbers, NumDevsUsed = %u, Workgroup = %u\n",
1.0e-9 * RAND_N / gpuTime, gpuTime, RAND_N, 1, 128);
shrLog("\nReading back the results...\n");
cutilSafeCall( cudaMemcpy(h_RandGPU, d_Rand, RAND_N * sizeof(float), cudaMemcpyDeviceToHost) );
shrLog("Checking GPU results...\n");
shrLog(" ...generating random numbers on CPU using reference generator\n");
RandomRef(h_RandCPU, N_PER_RNG, SEED);
#ifdef DO_BOXMULLER
shrLog(" ...applying Box-Muller transformation on CPU\n");
BoxMullerRef(h_RandCPU, N_PER_RNG);
#endif
shrLog(" ...comparing the results\n\n");
max_delta = 0;
sum_delta = 0;
sum_ref = 0;
for(i = 0; i < MT_RNG_COUNT; i++)
for(j = 0; j < N_PER_RNG; j++){
rCPU = h_RandCPU[i * N_PER_RNG + j];
rGPU = h_RandGPU[i + j * MT_RNG_COUNT];
delta = fabs(rCPU - rGPU);
sum_delta += delta;
sum_ref += fabs(rCPU);
if(delta >= max_delta) max_delta = delta;
}
L1norm = (float)(sum_delta / sum_ref);
shrLog("Max absolute error: %E\n", max_delta);
shrLog("L1 norm: %E\n\n", L1norm);
shrLog((L1norm < 1e-6) ? "PASSED\n\n" : "FAILED\n\n");
shrLog("Shutting down...\n");
cutilSafeCall( cudaFree(d_Rand) );
free(h_RandGPU);
free(h_RandCPU);
cutilCheckError( cutDeleteTimer( hTimer) );
cudaThreadExit();
shrEXIT(argc, (const char**)argv);
}
|
9ac325d6b00ae98d8a3621430d9ed6648a0c1513.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// libraries
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#include <cutil_inline.h>
#include "cache_kernel.h"
/* ================================================================== */
/* GPU kernel error checking function */
void gpu_check_error__srcpos (FILE* fp, const char* filename, size_t line)
{
hipError_t C_E = hipGetLastError ();
if (C_E) {
fprintf (fp, "*** [%s:%lu] CUDA ERROR %d: %s ***\n", filename, line, C_E,
hipGetErrorString (C_E));
fflush (fp);
exit (-1); /* abort program */
}
}
/* ================================================================== */
/* ================================================================== */
/* Read program input */
void readConfig(int* wordsPerThread, int* nThreads, int* bSize, char** argv)
{
*wordsPerThread = atoi (argv[1]);
*nThreads = atoi (argv[2]);
*bSize = atoi (argv[3]);
}
/* ================================================================== */
/* ================================================================== */
/* Validates the output to make sure the kernel ran correctly */
int validateResults(int nThreads, int bSize, int wordsPerThread, int* out, int* in)
{
int i, j;
int tmp, cnt;
int* test;
test = (int*) malloc (nThreads * sizeof (int));
for(i = 0; i < nThreads; i++) {
test[i] = 0;
}
for(i = 0; i < nThreads; i++) {
tmp = in[i % bSize];
for(j = 0; j < wordsPerThread; j++) {
tmp = in[tmp];
}
test[i] = tmp;
}
cnt = 0;
for(i = 0; i < nThreads; i++) {
if(test[i] != out[i]) cnt++;
}
return cnt;
}
/* ================================================================== */
/* ================================================================== */
/* Used to clear the cache before the main test */
__global__ void clear_cache (int *in)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
in[tid] = in[tid] + 1;
}
/* ================================================================== */
/* ================================================================== */
/* Kernel execution function */
float runTest (int* h_out, int* d_out, int* d_pchase, int nThreads, int bSize, int wordsPerThread)
{
hipEvent_t start, stop;
float total_time_taken;
int num_blocks = (nThreads + bSize - 1) / bSize;
dim3 grid (num_blocks);
dim3 threads (bSize);
fprintf (stderr, "number of iterations is %d\n", NUM_ITER);
/* Start timer */
cutilSafeCall (hipEventCreate (&start));
cutilSafeCall (hipEventCreate (&stop));
cutilSafeCall (hipEventRecord (start, 0));
for(int iter = 0; iter < NUM_ITER; iter++) {
switch (wordsPerThread) {
case 1:
hipLaunchKernelGGL(( cache_kernel_1) , dim3(grid), dim3(threads), 0, 0, nThreads, d_out, d_pchase);
break;
case 2:
hipLaunchKernelGGL(( cache_kernel_2) , dim3(grid), dim3(threads), 0, 0, nThreads, d_out, d_pchase);
break;
case 4:
hipLaunchKernelGGL(( cache_kernel_4) , dim3(grid), dim3(threads), 0, 0, nThreads, d_out, d_pchase);
break;
case 8:
hipLaunchKernelGGL(( cache_kernel_8) , dim3(grid), dim3(threads), 0, 0, nThreads, d_out, d_pchase);
break;
case 16:
hipLaunchKernelGGL(( cache_kernel_16) , dim3(grid), dim3(threads), 0, 0, nThreads, d_out, d_pchase);
break;
case 32:
hipLaunchKernelGGL(( cache_kernel_32) , dim3(grid), dim3(threads), 0, 0, nThreads, d_out, d_pchase);
break;
case 64:
hipLaunchKernelGGL(( cache_kernel_64) , dim3(grid), dim3(threads), 0, 0, nThreads, d_out, d_pchase);
break;
case 128:
hipLaunchKernelGGL(( cache_kernel_128) , dim3(grid), dim3(threads), 0, 0, nThreads, d_out, d_pchase);
break;
case 256:
hipLaunchKernelGGL(( cache_kernel_256) , dim3(grid), dim3(threads), 0, 0, nThreads, d_out, d_pchase);
break;
case 512:
hipLaunchKernelGGL(( cache_kernel_512) , dim3(grid), dim3(threads), 0, 0, nThreads, d_out, d_pchase);
break;
case 1024:
hipLaunchKernelGGL(( cache_kernel_1024) , dim3(grid), dim3(threads), 0, 0, nThreads, d_out, d_pchase);
break;
case 2048:
hipLaunchKernelGGL(( cache_kernel_2048) , dim3(grid), dim3(threads), 0, 0, nThreads, d_out, d_pchase);
break;
case 4096:
hipLaunchKernelGGL(( cache_kernel_4096), dim3(grid), dim3(threads), 0, 0, nThreads, d_out, d_pchase);
break;
default:
fprintf(stderr, "Invalid wordsPerThread: %d\n", wordsPerThread);
total_time_taken = -1.0f;
break;
}
}
/* End timer */
cutilSafeCall (hipEventRecord (stop, 0));
cutilSafeCall (hipDeviceSynchronize ());
cutilSafeCall (hipEventElapsedTime (&total_time_taken, start, stop));
gpu_check_error__srcpos (stderr, __FILE__, __LINE__);
/* Copy results back to host */
cutilSafeCall (hipMemcpy (h_out, d_out, nThreads * sizeof (int),
hipMemcpyDeviceToHost));
return total_time_taken;
}
/* ================================================================== */
int main(int argc, char** argv)
{
int i;
long double total_dram;
/* Timer */
float total_time_taken;
/* Execution parameters */
int wordsPerThread;
int nThreads;
int bSize;
/* Data structures */
int* d_out;
int* h_out;
int* h_pchase;
int* d_pchase;
/* Read program parameters */
if(argc < 4) {
fprintf(stderr, "usage: %s <word/thread> <# threads> <threads/block>\n",
argv[0]);
exit (0);
} else {
readConfig (&wordsPerThread, &nThreads, &bSize, argv);
}
/* Find the best GPU in the system */
hipSetDevice(cutGetMaxGflopsDeviceId ());
/* Clear the cache before starting the test */
/* For the shared memory test this isn't really necessary but we do it
just in case
*/
{
int cache_clear = 4 * 1024 * 1024; /* 4 MB */
int* h_temp = (int*) malloc (cache_clear);
for(int i = 0; i < cache_clear / sizeof (int); i++) h_temp[i] = 1;
int* d_temp;
cutilSafeCall (hipMalloc ((void**) &d_temp, cache_clear));
cutilSafeCall (hipMemcpy (d_temp, h_temp, cache_clear,
hipMemcpyHostToDevice));
int nt = cache_clear / sizeof (int);
int nb = nt / 512;
hipLaunchKernelGGL(( clear_cache) , dim3(nb), dim3(512), 0, 0, d_temp);
cutilSafeCall (hipDeviceSynchronize ());
free (h_temp);
cutilSafeCall (hipFree (d_temp));
}
/* Allocate memory */
h_out = (int*) malloc (nThreads * sizeof (int));
h_pchase = (int*) malloc (bSize * sizeof (int));
cutilSafeCall (hipMalloc ((void**) &d_out, nThreads * sizeof (int)));
cutilSafeCall (hipMalloc ((void**) &d_pchase, bSize * sizeof (int)));
/* Initialize memory */
for(i = 0; i < bSize - 1; i++) {
h_pchase[i] = i;
}
h_pchase[bSize - 1] = bSize - 1;
/* Copy memory to device */
cutilSafeCall (hipMemcpy (d_pchase, h_pchase, bSize * sizeof (int),
hipMemcpyHostToDevice));
/* Execute the kernel */
total_time_taken = runTest (h_out, d_out, d_pchase, nThreads, bSize,
wordsPerThread);
/* Validate results */
/* This will take some time so you may want to turn it off when doing
multiple tests */
fprintf(stderr, "Results validated: %d errors\n", validateResults (nThreads,
bSize,
wordsPerThread,
h_out, h_pchase));
/* Print performance statistics */
total_time_taken = total_time_taken / NUM_ITER;
total_dram = ((wordsPerThread + 1) * (1.0 * nThreads/1e9)) * sizeof (int);
fprintf (stderr, "Time taken to load %Lg GBs: %f (ms)\n", total_dram,
total_time_taken);
fprintf (stderr, "Effective bandwidth: %Lg (GB/s)\n",
(total_dram/total_time_taken * 1e3));
/* Free memory */
free (h_out);
cutilSafeCall (hipFree (d_out));
free (h_pchase);
cutilSafeCall (hipFree (d_pchase));
return 0;
}
| 9ac325d6b00ae98d8a3621430d9ed6648a0c1513.cu | // libraries
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#include <cutil_inline.h>
#include "cache_kernel.h"
/* ================================================================== */
/* GPU kernel error checking function */
void gpu_check_error__srcpos (FILE* fp, const char* filename, size_t line)
{
cudaError_t C_E = cudaGetLastError ();
if (C_E) {
fprintf (fp, "*** [%s:%lu] CUDA ERROR %d: %s ***\n", filename, line, C_E,
cudaGetErrorString (C_E));
fflush (fp);
exit (-1); /* abort program */
}
}
/* ================================================================== */
/* ================================================================== */
/* Read program input */
void readConfig(int* wordsPerThread, int* nThreads, int* bSize, char** argv)
{
*wordsPerThread = atoi (argv[1]);
*nThreads = atoi (argv[2]);
*bSize = atoi (argv[3]);
}
/* ================================================================== */
/* ================================================================== */
/* Validates the output to make sure the kernel ran correctly */
int validateResults(int nThreads, int bSize, int wordsPerThread, int* out, int* in)
{
int i, j;
int tmp, cnt;
int* test;
test = (int*) malloc (nThreads * sizeof (int));
for(i = 0; i < nThreads; i++) {
test[i] = 0;
}
for(i = 0; i < nThreads; i++) {
tmp = in[i % bSize];
for(j = 0; j < wordsPerThread; j++) {
tmp = in[tmp];
}
test[i] = tmp;
}
cnt = 0;
for(i = 0; i < nThreads; i++) {
if(test[i] != out[i]) cnt++;
}
return cnt;
}
/* ================================================================== */
/* ================================================================== */
/* Used to clear the cache before the main test */
__global__ void clear_cache (int *in)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
in[tid] = in[tid] + 1;
}
/* ================================================================== */
/* ================================================================== */
/* Kernel execution function */
float runTest (int* h_out, int* d_out, int* d_pchase, int nThreads, int bSize, int wordsPerThread)
{
cudaEvent_t start, stop;
float total_time_taken;
int num_blocks = (nThreads + bSize - 1) / bSize;
dim3 grid (num_blocks);
dim3 threads (bSize);
fprintf (stderr, "number of iterations is %d\n", NUM_ITER);
/* Start timer */
cutilSafeCall (cudaEventCreate (&start));
cutilSafeCall (cudaEventCreate (&stop));
cutilSafeCall (cudaEventRecord (start, 0));
for(int iter = 0; iter < NUM_ITER; iter++) {
switch (wordsPerThread) {
case 1:
cache_kernel_1 <<<grid, threads>>> (nThreads, d_out, d_pchase);
break;
case 2:
cache_kernel_2 <<<grid, threads>>> (nThreads, d_out, d_pchase);
break;
case 4:
cache_kernel_4 <<<grid, threads>>> (nThreads, d_out, d_pchase);
break;
case 8:
cache_kernel_8 <<<grid, threads>>> (nThreads, d_out, d_pchase);
break;
case 16:
cache_kernel_16 <<<grid, threads>>> (nThreads, d_out, d_pchase);
break;
case 32:
cache_kernel_32 <<<grid, threads>>> (nThreads, d_out, d_pchase);
break;
case 64:
cache_kernel_64 <<<grid, threads>>> (nThreads, d_out, d_pchase);
break;
case 128:
cache_kernel_128 <<<grid, threads>>> (nThreads, d_out, d_pchase);
break;
case 256:
cache_kernel_256 <<<grid, threads>>> (nThreads, d_out, d_pchase);
break;
case 512:
cache_kernel_512 <<<grid, threads>>> (nThreads, d_out, d_pchase);
break;
case 1024:
cache_kernel_1024 <<<grid, threads>>> (nThreads, d_out, d_pchase);
break;
case 2048:
cache_kernel_2048 <<<grid, threads>>> (nThreads, d_out, d_pchase);
break;
case 4096:
cache_kernel_4096<<<grid, threads>>> (nThreads, d_out, d_pchase);
break;
default:
fprintf(stderr, "Invalid wordsPerThread: %d\n", wordsPerThread);
total_time_taken = -1.0f;
break;
}
}
/* End timer */
cutilSafeCall (cudaEventRecord (stop, 0));
cutilSafeCall (cudaThreadSynchronize ());
cutilSafeCall (cudaEventElapsedTime (&total_time_taken, start, stop));
gpu_check_error__srcpos (stderr, __FILE__, __LINE__);
/* Copy results back to host */
cutilSafeCall (cudaMemcpy (h_out, d_out, nThreads * sizeof (int),
cudaMemcpyDeviceToHost));
return total_time_taken;
}
/* ================================================================== */
int main(int argc, char** argv)
{
int i;
long double total_dram;
/* Timer */
float total_time_taken;
/* Execution parameters */
int wordsPerThread;
int nThreads;
int bSize;
/* Data structures */
int* d_out;
int* h_out;
int* h_pchase;
int* d_pchase;
/* Read program parameters */
if(argc < 4) {
fprintf(stderr, "usage: %s <word/thread> <# threads> <threads/block>\n",
argv[0]);
exit (0);
} else {
readConfig (&wordsPerThread, &nThreads, &bSize, argv);
}
/* Find the best GPU in the system */
cudaSetDevice(cutGetMaxGflopsDeviceId ());
/* Clear the cache before starting the test */
/* For the shared memory test this isn't really necessary but we do it
just in case
*/
{
int cache_clear = 4 * 1024 * 1024; /* 4 MB */
int* h_temp = (int*) malloc (cache_clear);
for(int i = 0; i < cache_clear / sizeof (int); i++) h_temp[i] = 1;
int* d_temp;
cutilSafeCall (cudaMalloc ((void**) &d_temp, cache_clear));
cutilSafeCall (cudaMemcpy (d_temp, h_temp, cache_clear,
cudaMemcpyHostToDevice));
int nt = cache_clear / sizeof (int);
int nb = nt / 512;
clear_cache <<<nb, 512>>> (d_temp);
cutilSafeCall (cudaThreadSynchronize ());
free (h_temp);
cutilSafeCall (cudaFree (d_temp));
}
/* Allocate memory */
h_out = (int*) malloc (nThreads * sizeof (int));
h_pchase = (int*) malloc (bSize * sizeof (int));
cutilSafeCall (cudaMalloc ((void**) &d_out, nThreads * sizeof (int)));
cutilSafeCall (cudaMalloc ((void**) &d_pchase, bSize * sizeof (int)));
/* Initialize memory */
for(i = 0; i < bSize - 1; i++) {
h_pchase[i] = i;
}
h_pchase[bSize - 1] = bSize - 1;
/* Copy memory to device */
cutilSafeCall (cudaMemcpy (d_pchase, h_pchase, bSize * sizeof (int),
cudaMemcpyHostToDevice));
/* Execute the kernel */
total_time_taken = runTest (h_out, d_out, d_pchase, nThreads, bSize,
wordsPerThread);
/* Validate results */
/* This will take some time so you may want to turn it off when doing
multiple tests */
fprintf(stderr, "Results validated: %d errors\n", validateResults (nThreads,
bSize,
wordsPerThread,
h_out, h_pchase));
/* Print performance statistics */
total_time_taken = total_time_taken / NUM_ITER;
total_dram = ((wordsPerThread + 1) * (1.0 * nThreads/1e9)) * sizeof (int);
fprintf (stderr, "Time taken to load %Lg GBs: %f (ms)\n", total_dram,
total_time_taken);
fprintf (stderr, "Effective bandwidth: %Lg (GB/s)\n",
(total_dram/total_time_taken * 1e3));
/* Free memory */
free (h_out);
cutilSafeCall (cudaFree (d_out));
free (h_pchase);
cutilSafeCall (cudaFree (d_pchase));
return 0;
}
|
3166cfd2b044c02352e646e586a788aa3aa6584a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sigmoid_float.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
int idx = 1;
float *dy = NULL;
hipMalloc(&dy, XSIZE*YSIZE);
int incy = 1;
float *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sigmoid_float), dim3(gridBlock),dim3(threadBlock), 0, 0, n,idx,dy,incy,result);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sigmoid_float), dim3(gridBlock),dim3(threadBlock), 0, 0, n,idx,dy,incy,result);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sigmoid_float), dim3(gridBlock),dim3(threadBlock), 0, 0, n,idx,dy,incy,result);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 3166cfd2b044c02352e646e586a788aa3aa6584a.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sigmoid_float.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
int idx = 1;
float *dy = NULL;
cudaMalloc(&dy, XSIZE*YSIZE);
int incy = 1;
float *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sigmoid_float<<<gridBlock,threadBlock>>>(n,idx,dy,incy,result);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sigmoid_float<<<gridBlock,threadBlock>>>(n,idx,dy,incy,result);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sigmoid_float<<<gridBlock,threadBlock>>>(n,idx,dy,incy,result);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
3e73dc2816601a57d0674101ccbfefad215b7056.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2020 Xiaomi Corporation (authors: Haowen Qiu, Fangjun Kuang)
*
* See LICENSE for clarification regarding multiple authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <cstdio>
#include <functional>
#include <iostream>
#include <limits>
#include <numeric>
#include <random>
#include <utility>
#include <vector>
#include "k2/csrc/array.h"
#include "k2/csrc/array_ops.h"
#include "k2/csrc/context.h"
#include "k2/csrc/macros.h"
#include "k2/csrc/math.h"
#include "k2/csrc/ragged.h"
#include "k2/csrc/ragged_ops.h"
#include "k2/csrc/rand.h"
#include "k2/csrc/test_utils.h"
#include "k2/csrc/timer.h"
namespace k2 {
template <typename T>
void MatrixTanspose(int32_t num_rows, int32_t num_cols, const T *src, T *dest) {
for (int32_t i = 0; i < num_rows; ++i) {
for (int32_t j = 0; j < num_cols; ++j) {
dest[j * num_rows + i] = src[i * num_cols + j];
}
}
}
template <typename T, DeviceType d>
void TestTranspose(int32_t num_rows, int32_t num_cols, int32_t num_reps = 1,
bool print_bandwidth = false) {
ContextPtr cpu = GetCpuContext(); // will use to copy data
ContextPtr context = nullptr;
if (d == kCpu) {
context = GetCpuContext();
} else {
K2_CHECK_EQ(d, kCuda);
context = GetCudaContext();
}
int32_t num_elements = num_rows * num_cols;
std::vector<T> host_src(num_elements);
std::iota(host_src.begin(), host_src.end(), 0);
std::vector<T> gold(num_elements);
MatrixTanspose<T>(num_rows, num_cols, host_src.data(), gold.data());
int32_t num_bytes = num_elements * sizeof(T);
auto src_region = NewRegion(context, num_bytes);
Array2<T> src(num_rows, num_cols, num_cols, 0, src_region);
cpu->CopyDataTo(num_bytes, host_src.data(), src.Context(), src.Data());
auto dest_region = NewRegion(context, num_bytes);
Array2<T> dest(num_cols, num_rows, num_rows, 0, dest_region);
// warm up in case that the first kernel launch takes longer time.
Transpose<T>(context, src, &dest);
Timer t(dest.Context());
for (int32_t i = 0; i < num_reps; ++i) {
Transpose<T>(context, src, &dest);
}
double elapsed = t.Elapsed();
std::vector<T> host_dest(num_elements);
dest.Context()->CopyDataTo(num_bytes, dest.Data(), cpu, host_dest.data());
ASSERT_EQ(host_dest, gold);
if (print_bandwidth) {
// effective_bandwidth (GB/s) = (read_bytes + write_bytes) / (time_seconds *
// 10^9), for matrix transpose, read_bytes + write_bytes = 2 * num_bytes
printf("Average time is: %.6f s, effective bandwidth is: %.2f GB/s\n",
elapsed / num_reps, 2 * num_bytes * 1e-9 * num_reps / elapsed);
}
}
TEST(OpsTest, TransposeTest) {
{
// test with some corner cases
std::vector<std::pair<int32_t, int32_t>> shapes = {
{0, 0}, {1, 1}, {5, 4}, {100, 0}, {128, 64}, {15, 13}, {115, 180},
};
for (const auto &v : shapes) {
TestTranspose<int32_t, kCpu>(v.first, v.second);
TestTranspose<int32_t, kCuda>(v.first, v.second);
}
}
{
// test with random shapes
for (int32_t i = 0; i != 5; ++i) {
auto rows = RandInt(0, 3000);
auto cols = RandInt(0, 3000);
TestTranspose<int32_t, kCpu>(rows, cols);
TestTranspose<int32_t, kCuda>(rows, cols);
}
}
#ifdef K2_WITH_CUDA
{
// speed test for different data type
// TODO(haowen): we may need to allocate different size of shared memory for
// different data type to get the best performance
TestTranspose<char, kCuda>(1000, 2000, 100, true);
TestTranspose<int16_t, kCuda>(1000, 2000, 100, true);
TestTranspose<int32_t, kCuda>(1000, 2000, 100, true);
TestTranspose<float, kCuda>(1000, 2000, 100, true);
TestTranspose<double, kCuda>(1000, 2000, 100, true);
}
#endif
}
template <typename S, typename T>
void ComputeExclusiveSum(const std::vector<S> &src, std ::vector<T> *dest) {
auto &dst = *dest;
K2_CHECK_GE(dst.size(), src.size());
T sum = T(0);
size_t dst_size = dst.size();
size_t src_size = src.size();
for (size_t i = 0; i != dst_size; ++i) {
dst[i] = sum;
if (i >= src_size) break;
sum += src[i];
}
}
template <typename S, typename T>
void CheckExclusiveSumArray1Result(const std::vector<S> &src_data,
const Array1<T> &dest) {
// copy data from CPU to CPU/GPU
std::vector<T> dest_data(dest.Dim());
dest.Context()->CopyDataTo(dest.Dim() * dest.ElementSize(), dest.Data(),
GetCpuContext(), dest_data.data());
std::vector<T> expected_data(dest.Dim());
ComputeExclusiveSum(src_data, &expected_data);
ASSERT_EQ(dest_data.size(), expected_data.size());
for (size_t i = 0; i != dest_data.size(); ++i) {
EXPECT_EQ(dest_data[i], expected_data[i]);
}
}
template <typename S, typename T>
void TestExclusiveSumArray1(int32_t num_elem) {
ContextPtr cpu = GetCpuContext(); // will be used to copy data
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
{
// Test ExclusiveSum(Array1<T> &src)
std::vector<S> data(num_elem);
int32_t start = RandInt(0, 2);
std::iota(data.begin(), data.end(), static_cast<S>(start));
Array1<S> src(context, data);
Array1<S> dest = ExclusiveSum(src);
CheckExclusiveSumArray1Result(data, dest);
}
{
// Test ExclusiveSum(Array1<S> &src, Array1<T> *dest) with
// dest.Dim() == src.Dim()
std::vector<S> data(num_elem);
int32_t start = RandInt(0, 2);
std::iota(data.begin(), data.end(), static_cast<S>(start));
Array1<S> src(context, data);
Array1<T> dest(context, num_elem);
ExclusiveSum(src, &dest);
CheckExclusiveSumArray1Result(data, dest);
}
{
// Test ExclusiveSum(Array1<T> &src, Array1<T> *dest) where
// &dest = &src
std::vector<S> data(num_elem);
int32_t start = RandInt(0, 2);
std::iota(data.begin(), data.end(), static_cast<S>(start));
Array1<S> src(context, data);
ExclusiveSum(src, &src);
CheckExclusiveSumArray1Result(data, src);
}
{
// Test ExclusiveSum(Array1<T> &src, Array1<T> *dest) with
// dest.Dim() == src.Dim() + 1
int32_t src_dim = num_elem - 1;
std::vector<S> data(src_dim);
int32_t start = RandInt(0, 2);
std::iota(data.begin(), data.end(), static_cast<S>(start));
// note we allocate one extra element in region for `src`,
// but its value will not be set.
int32_t num_bytes = num_elem * sizeof(T);
RegionPtr region = NewRegion(context, num_bytes);
cpu->CopyDataTo(src_dim * sizeof(T), data.data(), region->context,
region->data);
Array1<S> src(src_dim, region, 0);
Array1<T> dest(context, num_elem);
ASSERT_EQ(dest.Dim(), src.Dim() + 1);
ExclusiveSum(src, &dest);
CheckExclusiveSumArray1Result(data, dest);
}
{
// Test ExclusiveSumDeref(Array1<S*> &src, Array1<S> *dest) with
// dest.Dim() == src.Dim()
std::vector<S> data(num_elem);
int32_t start = RandInt(0, 2);
std::iota(data.begin(), data.end(), static_cast<S>(start));
Array1<S> src(context, data);
S *src_data = src.Data();
Array1<const S *> src_ptr(context, num_elem);
const S **src_ptr_data = src_ptr.Data();
K2_EVAL(
context, num_elem, lambda_set_values,
(int32_t i)->void { src_ptr_data[i] = src_data + i; });
Array1<S> dest(context, num_elem);
ExclusiveSumDeref(src_ptr, &dest);
CheckExclusiveSumArray1Result(data, dest);
}
{
// Test ExclusiveSumDeref(Array1<S*> &src, Array1<S> *dest) with
// dest.Dim() == src.Dim() + 1
int32_t src_dim = num_elem - 1;
std::vector<S> data(num_elem);
int32_t start = RandInt(0, 2);
std::iota(data.begin(), data.end(), static_cast<S>(start));
// note we allocate one extra element in region for `src`,
// but its value will not be set.
Array1<S> src(context, data);
S *src_data = src.Data();
int32_t num_bytes = num_elem * sizeof(S *);
RegionPtr region = NewRegion(context, num_bytes);
S **region_data = region->GetData<S *>();
K2_EVAL(
context, num_elem, lambda_set_values,
(int32_t i)->void { region_data[i] = &src_data[i]; });
// not src_ptr.Dim() == src_dim == num_elem - 1
Array1<const S *> src_ptr(src_dim, region, 0);
Array1<S> dest(context, num_elem);
ASSERT_EQ(dest.Dim(), src_ptr.Dim() + 1);
ExclusiveSumDeref(src_ptr, &dest);
CheckExclusiveSumArray1Result(data, dest);
}
}
}
#ifndef _MSC_VER
// It results in the following error SOMETIMES on windows
//
// unknown file: error: SEH exception with code 0xc0000005 thrown in the test
// body.
//
// To make the CI happy, we just disable it for Windows.
TEST(OpsTest, ExclusiveSumArray1Test) {
TestExclusiveSumArray1<int32_t, int32_t>(1000);
TestExclusiveSumArray1<float, double>(1000);
}
#endif
template <typename T>
void ComputeExclusiveSumArray2(const std::vector<T> &src, int32_t dest_rows,
int32_t dest_cols, std ::vector<T> *dest,
int32_t axis) {
auto &dst = *dest;
int32_t src_num_elems = static_cast<int32_t>(src.size());
if (axis == 0) {
if (dst.size() > src.size()) {
// dst.rows == src.rows + 1
K2_CHECK_EQ((int32_t)src.size(), dest_cols * (dest_rows - 1));
}
for (int32_t j = 0; j != dest_cols; ++j) {
T sum = T(0);
for (auto i = 0; i != dest_rows; ++i) {
int32_t dest_pos = i * dest_cols + j;
dst[dest_pos] = sum;
int32_t src_pos = i * dest_cols + j; // src_cols == dest_cols
if (src_pos >= src_num_elems) break;
sum += src[src_pos];
}
}
} else {
K2_CHECK_EQ(axis, 1);
int32_t src_cols = dest_cols;
if (dst.size() > src.size()) {
// dst.cols == src.cols + 1
K2_CHECK_EQ((int32_t)src.size(), dest_rows * (dest_cols - 1));
src_cols = dest_cols - 1;
}
for (int32_t i = 0; i != dest_rows; ++i) {
T sum = T(0);
for (auto j = 0; j != dest_cols; ++j) {
int32_t dest_pos = i * dest_cols + j;
dst[dest_pos] = sum;
int32_t src_pos = i * src_cols + j;
if (src_pos >= src_num_elems) break;
sum += src[src_pos];
}
}
}
}
template <typename T>
void CheckExclusiveSumArray2Result(const std::vector<T> &src_data,
Array2<T> &dest, int32_t axis) {
int32_t dest_rows = dest.Dim0();
int32_t dest_cols = dest.Dim1();
// just make test code simple by call `Flatten` even though it's not so
// efficient.
Array1<T> dest_array1 = dest.Flatten();
// copy data from CPU to CPU/GPU
std::vector<T> dest_data(dest_rows * dest_cols);
dest_array1.Context()->CopyDataTo(
dest_array1.Dim() * dest_array1.ElementSize(), dest_array1.Data(),
GetCpuContext(), dest_data.data());
std::vector<T> expected_data(dest_rows * dest_cols);
ComputeExclusiveSumArray2(src_data, dest_rows, dest_cols, &expected_data,
axis);
EXPECT_EQ(dest_data, expected_data);
}
template <typename T>
void TestExclusiveSumArray2(int32_t rows, int32_t cols) {
ContextPtr cpu = GetCpuContext(); // will be used to copy data
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
{
// axis == 0 && &dest == &src, ElementStride0 == cols
int32_t axis = 0;
int32_t num_elems = rows * cols;
std::vector<T> data(num_elems);
std::iota(data.begin(), data.end(), 0);
Array1<T> src_array1(context, data);
Array2<T> src(src_array1, rows, cols);
ExclusiveSum(src, &src, axis);
CheckExclusiveSumArray2Result(data, src, axis);
}
{
// axis == 0 && &dest == &src, ElementStride0 > cols
int32_t axis = 0;
int32_t stride0 = RandInt(cols + 1, cols + 10);
int32_t num_elems = rows * cols;
std::vector<T> data(num_elems);
std::iota(data.begin(), data.end(), 0);
Array1<T> data_array(context, data);
const T *src_data = data_array.Data();
// allocate extra memory as there stride0 > 0
int32_t num_bytes = rows * stride0 * sizeof(T);
RegionPtr region = NewRegion(context, num_bytes);
T *region_data = region->GetData<T>();
K2_EVAL2(
context, rows, cols, lambda_set_elems, (int32_t i, int32_t j)->void {
region_data[i * stride0 + j] = src_data[i * cols + j];
});
Array2<T> src(rows, cols, stride0, 0, region);
ExclusiveSum(src, &src, axis);
CheckExclusiveSumArray2Result(data, src, axis);
}
{
// axis == 0 && dest.Dim0() == src.Dim0(), ElementStride0 == cols
int32_t axis = 0;
int32_t num_elems = rows * cols;
std::vector<T> data(num_elems);
std::iota(data.begin(), data.end(), 0);
Array1<T> src_array1(context, data);
Array2<T> src(src_array1, rows, cols);
Array2<T> dest(context, rows, cols);
ExclusiveSum(src, &dest, axis);
CheckExclusiveSumArray2Result(data, dest, axis);
}
{
// axis == 0 && dest.Dim0() == src.Dim0(), ElementStride0 > cols
int32_t axis = 0;
int32_t stride0 = RandInt(cols + 1, cols + 10);
int32_t num_elems = rows * cols;
std::vector<T> data(num_elems);
std::iota(data.begin(), data.end(), 0);
Array1<T> data_array(context, data);
const T *src_data = data_array.Data();
// allocate extra memory as there stride0 > 0
int32_t num_bytes = rows * stride0 * sizeof(T);
RegionPtr region = NewRegion(context, num_bytes);
T *region_data = region->GetData<T>();
K2_EVAL2(
context, rows, cols, lambda_set_elems, (int32_t i, int32_t j)->void {
region_data[i * stride0 + j] = src_data[i * cols + j];
});
Array2<T> src(rows, cols, stride0, 0, region);
Array2<T> dest(context, rows, cols);
ExclusiveSum(src, &dest, axis);
CheckExclusiveSumArray2Result(data, dest, axis);
}
{
// axis == 0 && dest.Dim0() == src.Dim0() + 1, we need to allocate one
// extra element for src
int32_t axis = 0;
int32_t num_elems = rows * cols;
std::vector<T> data(num_elems);
std::iota(data.begin(), data.end(), 0);
Array1<T> data_array(context, data);
const T *src_data = data_array.Data();
int32_t num_bytes = (rows * cols + 1) * sizeof(T);
RegionPtr region = NewRegion(context, num_bytes);
T *region_data = region->GetData<T>();
K2_EVAL2(
context, rows, cols, lambda_set_elems, (int32_t i, int32_t j)->void {
region_data[i * cols + j] = src_data[i * cols + j];
});
Array2<T> src(rows, cols, cols, 0, region);
{
// dest.stride0 == dest.cols
Array2<T> dest(context, rows + 1, cols);
ExclusiveSum(src, &dest, axis);
CheckExclusiveSumArray2Result(data, dest, axis);
}
{
// dest.stride0 > dest.cols
int32_t dest_stride0 = cols + 5;
int32_t dest_rows = rows + 1;
RegionPtr dest_region =
NewRegion(context, dest_rows * dest_stride0 * sizeof(T));
Array2<T> dest(dest_rows, cols, dest_stride0, 0, dest_region);
ExclusiveSum(src, &dest, axis);
CheckExclusiveSumArray2Result(data, dest, axis);
}
}
{
// axis == 1 && &dest == &src, ElementStride0 == cols
int32_t axis = 1;
int32_t num_elems = rows * cols;
std::vector<T> data(num_elems);
std::iota(data.begin(), data.end(), 0);
Array1<T> src_array1(context, data);
Array2<T> src(src_array1, rows, cols);
ExclusiveSum(src, &src, axis);
CheckExclusiveSumArray2Result(data, src, axis);
}
{
// axis == 1 && &dest == &src, ElementStride0 > cols
int32_t axis = 1;
int32_t stride0 = RandInt(cols + 1, cols + 10);
int32_t num_elems = rows * cols;
std::vector<T> data(num_elems);
std::iota(data.begin(), data.end(), 0);
Array1<T> data_array(context, data);
const T *src_data = data_array.Data();
// allocate extra memory as there stride0 > 0
int32_t num_bytes = rows * stride0 * sizeof(T);
RegionPtr region = NewRegion(context, num_bytes);
T *region_data = region->GetData<T>();
K2_EVAL2(
context, rows, cols, lambda_set_elems, (int32_t i, int32_t j)->void {
region_data[i * stride0 + j] = src_data[i * cols + j];
});
Array2<T> src(rows, cols, stride0, 0, region);
ExclusiveSum(src, &src, axis);
CheckExclusiveSumArray2Result(data, src, axis);
}
{
// axis == 1 && dest.Dim1() == src.Dim1(), ElementStride0 == cols
int32_t axis = 1;
int32_t num_elems = rows * cols;
std::vector<T> data(num_elems);
std::iota(data.begin(), data.end(), 0);
Array1<T> src_array1(context, data);
Array2<T> src(src_array1, rows, cols);
Array2<T> dest(context, rows, cols);
ExclusiveSum(src, &dest, axis);
CheckExclusiveSumArray2Result(data, dest, axis);
}
{
// axis == 1 && dest.Dim1() == src.Dim1() + 1, we need to allocate one
// extra element for src
int32_t axis = 1;
int32_t num_elems = rows * cols;
std::vector<T> data(num_elems);
std::iota(data.begin(), data.end(), 0);
Array1<T> data_array(context, data);
const T *src_data = data_array.Data();
int32_t num_bytes = (rows * cols + 1) * sizeof(T);
RegionPtr region = NewRegion(context, num_bytes);
T *region_data = region->GetData<T>();
K2_EVAL2(
context, rows, cols, lambda_set_elems, (int32_t i, int32_t j)->void {
region_data[i * cols + j] = src_data[i * cols + j];
});
Array2<T> src(rows, cols, cols, 0, region);
{
// dest.stride0 == dest.cols
Array2<T> dest(context, rows, cols + 1);
ExclusiveSum(src, &dest, axis);
CheckExclusiveSumArray2Result(data, dest, axis);
}
{
// dest.stride0 > dest.cols
int32_t dest_stride0 = cols + 5;
int32_t dest_cols = cols + 1;
RegionPtr dest_region =
NewRegion(context, rows * dest_stride0 * sizeof(T));
Array2<T> dest(rows, dest_cols, dest_stride0, 0, dest_region);
ExclusiveSum(src, &dest, axis);
CheckExclusiveSumArray2Result(data, dest, axis);
}
}
}
}
TEST(OpsTest, ExclusiveSumArray2Test) {
int32_t rows = RandInt(500, 1000);
int32_t cols = RandInt(500, 1000);
TestExclusiveSumArray2<int32_t>(rows, cols);
}
template <typename T>
void TestArrayMaxAndOr() {
ContextPtr cpu = GetCpuContext(); // will be used to copy data
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
{
// Max
const std::vector<T> values = {1, 3, 2, 8, 0, -1};
Array1<T> src(context, values);
Array1<T> dst(context, 1);
T default_value = 0;
Max(src, default_value, &dst);
EXPECT_EQ(dst[0], 8);
}
{
// Max, dst is one of element of src
const std::vector<T> values = {1, 3, 2, 8, 0, -1};
Array1<T> src(context, values);
Array1<T> dst = src.Range(2, 1);
T default_value = 0;
Max(src, default_value, &dst);
EXPECT_EQ(dst[0], 8);
// src has been changed as well
EXPECT_EQ(src[2], 8);
// other values are not changed
src = src.To(cpu);
std::vector<T> cpu_data(src.Data(), src.Data() + src.Dim());
const std::vector<T> expected_data = {1, 3, 8, 8, 0, -1};
EXPECT_EQ(cpu_data, expected_data);
}
{
// Max, with random large size
int32_t num_elems = RandInt(1000, 10000);
std::vector<T> data(num_elems);
std::iota(data.begin(), data.end(), num_elems);
// random set a value to `max_value`
int32_t pos = RandInt(0, num_elems - 1);
T max_value = static_cast<T>(num_elems * 2);
data[pos] = max_value;
Array1<T> src(context, data);
Array1<T> dst(context, 1);
T default_value = 0;
Max(src, default_value, &dst);
EXPECT_EQ(dst[0], max_value);
}
{
// And
const std::vector<T> values = {3, 6, 11};
Array1<T> src(context, values);
Array1<T> dst(context, 1);
T default_value = -1;
And(src, default_value, &dst);
EXPECT_EQ(dst[0], 2);
}
{
// Or
const std::vector<T> values = {3, 6, 4};
Array1<T> src(context, values);
Array1<T> dst(context, 1);
T default_value = 0;
Or(src, default_value, &dst);
EXPECT_EQ(dst[0], 7);
}
}
}
TEST(OpsTest, ArrayMaxAndOrTest) { TestArrayMaxAndOr<int32_t>(); }
template <typename T>
void TestCat() {
ContextPtr cpu = GetCpuContext(); // will be used to copy data
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
{
// a case with small size
std::vector<T> data1 = {3, 1, 2};
std::vector<T> data2 = {5, 6, 7, 8};
std::vector<T> data3 = {}; // empty
std::vector<T> data4 = {9};
std::vector<T> expected_data = {3, 1, 2, 5, 6, 7, 8, 9};
Array1<T> array1(context, data1);
Array1<T> array2(context, data2);
Array1<T> array3(context, data3);
Array1<T> array4(context, data4);
{
// test Cat(int32_t, Array1<T>**)
std::vector<const Array1<T> *> arrays = {&array1, &array2, &array3,
&array4};
const Array1<T> **src = arrays.data();
Array1<T> dst = Cat(context, 4, src);
EXPECT_EQ(dst.Dim(), 8);
// copy memory from GPU/CPU to CPU
std::vector<T> cpu_data(dst.Dim());
dst.Context()->CopyDataTo(dst.Dim() * dst.ElementSize(), dst.Data(),
cpu, cpu_data.data());
EXPECT_EQ(cpu_data, expected_data);
}
{
// test Cat(int32_t, Array1<T>*)
std::vector<Array1<T>> arrays = {array1, array2, array3, array4};
const Array1<T> *src = arrays.data();
Array1<T> dst = Cat(context, 4, src);
EXPECT_EQ(dst.Dim(), 8);
// copy memory from GPU/CPU to CPU
std::vector<T> cpu_data(dst.Dim());
dst.Context()->CopyDataTo(dst.Dim() * dst.ElementSize(), dst.Data(),
cpu, cpu_data.data());
EXPECT_EQ(cpu_data, expected_data);
}
}
{
// test with random large size, the arrays' sizes are fairly balanced.
for (int32_t i = 0; i != 2; ++i) {
int32_t num_array = RandInt(10, 1000);
std::vector<Array1<T>> arrays_vec(num_array);
std::vector<const Array1<T> *> arrays(num_array);
int32_t total_size = 0;
for (int32_t j = 0; j != num_array; ++j) {
int32_t curr_array_size = RandInt(0, 10000);
std::vector<T> data(curr_array_size);
std::iota(data.begin(), data.end(), total_size);
total_size += curr_array_size;
arrays_vec[j] = Array1<T>(context, data);
arrays[j] = &arrays_vec[j];
}
const Array1<T> **src = arrays.data();
Array1<T> dst = Cat(context, num_array, src);
EXPECT_EQ(dst.Dim(), total_size);
// copy memory from GPU/CPU to CPU
std::vector<T> cpu_data(dst.Dim());
dst.Context()->CopyDataTo(dst.Dim() * dst.ElementSize(), dst.Data(),
cpu, cpu_data.data());
std::vector<T> expected_data(dst.Dim());
std::iota(expected_data.begin(), expected_data.end(), 0);
EXPECT_EQ(cpu_data, expected_data);
}
}
{
// test with random large size: the arrays' sizes are not balanced.
for (int32_t i = 0; i != 2; ++i) {
int32_t num_array = RandInt(10, 1000);
std::vector<Array1<T>> arrays_vec(num_array);
std::vector<const Array1<T> *> arrays(num_array);
int32_t total_size = 0, max_size = 0;
// notice `j != num_array - 1`, we would push a very long array
// after the loop
for (int32_t j = 0; j != num_array - 1; ++j) {
int32_t curr_array_size = RandInt(0, 10000);
std::vector<T> data(curr_array_size);
std::iota(data.begin(), data.end(), total_size);
total_size += curr_array_size;
arrays_vec[j] = Array1<T>(context, data);
arrays[j] = &arrays_vec[j];
if (curr_array_size > max_size) max_size = curr_array_size;
}
// generate an array with very large size
{
int32_t average_size = total_size / num_array;
int32_t long_size = average_size * 10;
std::vector<T> data(long_size);
std::iota(data.begin(), data.end(), total_size);
total_size += long_size;
arrays_vec[num_array - 1] = Array1<T>(context, data);
arrays[num_array - 1] = &arrays_vec[num_array - 1];
}
const Array1<T> **src = arrays.data();
Array1<T> dst = Cat(context, num_array, src);
EXPECT_EQ(dst.Dim(), total_size);
// copy memory from GPU/CPU to CPU
std::vector<T> cpu_data(dst.Dim());
dst.Context()->CopyDataTo(dst.Dim() * dst.ElementSize(), dst.Data(),
cpu, cpu_data.data());
std::vector<T> expected_data(dst.Dim());
std::iota(expected_data.begin(), expected_data.end(), 0);
EXPECT_EQ(cpu_data, expected_data);
}
}
}
}
TEST(OpsTest, CatTest) {
TestCat<int32_t>();
TestCat<float>();
}
TEST(OpsTest, CatWithOffsets) {
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
{
// a case with small size
std::vector<int32_t> data1 = {3, 1, 2};
std::vector<int32_t> data2 = {5, 6, 7, 8};
std::vector<int32_t> data3 = {}; // empty
std::vector<int32_t> data4 = {9};
std::vector<int32_t> data_offsets = {1, 2, 3, 4};
std::vector<int32_t> expected_data = {4, 2, 3, 7, 8, 9, 10, 13};
Array1<int32_t> array1(context, data1);
Array1<int32_t> array2(context, data2);
Array1<int32_t> array3(context, data3);
Array1<int32_t> array4(context, data4);
Array1<int32_t> offsets(context, data_offsets);
std::vector<const Array1<int32_t> *> arrays = {&array1, &array2, &array3,
&array4};
const Array1<int32_t> **src = arrays.data();
Array1<int32_t> dst = CatWithOffsets(offsets, src);
CheckArrayData(dst, expected_data);
}
{
// test with random large size
for (int32_t i = 0; i != 2; ++i) {
int32_t num_array = RandInt(10, 1000);
Array1<int32_t> offsets = RandUniformArray1(GetCpuContext(), num_array,
-num_array, num_array);
const int32_t *offsets_data = offsets.Data();
std::vector<int32_t> expected_data;
std::vector<Array1<int32_t>> arrays_vec(num_array);
std::vector<const Array1<int32_t> *> arrays(num_array);
int32_t total_size = 0;
for (int32_t j = 0; j != num_array; ++j) {
int32_t curr_array_size = RandInt(0, 10000);
std::vector<int32_t> data(curr_array_size);
std::iota(data.begin(), data.end(), total_size);
arrays_vec[j] = Array1<int32_t>(context, data);
arrays[j] = &arrays_vec[j];
std::vector<int32_t> curr_expected_data(curr_array_size);
std::iota(curr_expected_data.begin(), curr_expected_data.end(),
total_size + offsets_data[j]);
expected_data.insert(expected_data.end(), curr_expected_data.begin(),
curr_expected_data.end());
total_size += curr_array_size;
}
const Array1<int32_t> **src = arrays.data();
offsets = offsets.To(context);
Array1<int32_t> dst = CatWithOffsets(offsets, src);
CheckArrayData(dst, expected_data);
}
}
}
}
TEST(OpsTest, SpliceRowSplitsTest) {
ContextPtr cpu = GetCpuContext(); // will be used to copy data
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
{
// a case with small size
std::vector<int32_t> data1 = {0, 2, 5};
std::vector<int32_t> data2 = {0, 2, 2, 3};
std::vector<int32_t> data3 = {0};
std::vector<int32_t> data4 = {0, 3};
std::vector<int32_t> expected_data = {0, 2, 5, 7, 7, 8, 11};
Array1<int32_t> array1(context, data1);
Array1<int32_t> array2(context, data2);
Array1<int32_t> array3(context, data3);
Array1<int32_t> array4(context, data4);
std::vector<const Array1<int32_t> *> arrays = {&array1, &array2, &array3,
&array4};
const Array1<int32_t> **src = arrays.data();
Array1<int32_t> dst = SpliceRowSplits(4, src);
EXPECT_EQ(dst.Dim(), expected_data.size());
// copy memory from GPU/CPU to CPU
dst = dst.To(cpu);
std::vector<int32_t> cpu_data(dst.Data(), dst.Data() + dst.Dim());
EXPECT_EQ(cpu_data, expected_data);
}
{
// test with random large size, the arrays' sizes are fairly balanced.
for (int32_t i = 0; i != 2; ++i) {
int32_t num_array = RandInt(10, 1000);
std::vector<Array1<int32_t>> arrays_vec(num_array);
std::vector<const Array1<int32_t> *> arrays(num_array);
std::vector<int32_t> expected_data;
int32_t data_offset = 0;
for (int32_t j = 0; j != num_array; ++j) {
int32_t curr_array_size = RandInt(0, 10000);
RaggedShape shape =
RandomRaggedShape(false, 2, 2, curr_array_size, curr_array_size);
ASSERT_EQ(shape.NumAxes(), 2);
Array1<int32_t> cpu_row_splits = shape.RowSplits(1).To(cpu);
int32_t num_splits = cpu_row_splits.Dim();
ASSERT_GE(num_splits, 1);
const int32_t *splits_data = cpu_row_splits.Data();
for (int32_t n = 0; n < num_splits; ++n) {
expected_data.push_back(splits_data[n] + data_offset);
}
if (j + 1 < num_array) expected_data.pop_back();
data_offset += splits_data[num_splits - 1];
Array1<int32_t> row_splits = shape.RowSplits(1).To(context);
ASSERT_GE(row_splits.Dim(), 1);
arrays_vec[j] = row_splits;
arrays[j] = &arrays_vec[j];
}
const Array1<int32_t> **src = arrays.data();
Array1<int32_t> dst = SpliceRowSplits(num_array, src);
EXPECT_EQ(dst.Dim(), expected_data.size());
// copy memory from GPU/CPU to CPU
dst = dst.To(cpu);
std::vector<int32_t> cpu_data(dst.Data(), dst.Data() + dst.Dim());
EXPECT_EQ(cpu_data, expected_data);
}
}
{
// test with random large size: the arrays' sizes are not balanced.
for (int32_t i = 0; i != 2; ++i) {
int32_t num_array = RandInt(10, 1000);
std::vector<Array1<int32_t>> arrays_vec(num_array);
std::vector<const Array1<int32_t> *> arrays(num_array);
std::vector<int32_t> expected_data;
int32_t data_offset = 0;
int32_t max_size = 0;
for (int32_t j = 0; j != num_array - 1; ++j) {
int32_t curr_array_size = RandInt(0, 10000);
RaggedShape shape =
RandomRaggedShape(false, 2, 2, curr_array_size, curr_array_size);
ASSERT_EQ(shape.NumAxes(), 2);
Array1<int32_t> cpu_row_splits = shape.RowSplits(1).To(cpu);
int32_t num_splits = cpu_row_splits.Dim();
ASSERT_GE(num_splits, 1);
const int32_t *splits_data = cpu_row_splits.Data();
for (int32_t n = 0; n < num_splits; ++n) {
expected_data.push_back(splits_data[n] + data_offset);
}
expected_data.pop_back();
data_offset += splits_data[num_splits - 1];
Array1<int32_t> row_splits = shape.RowSplits(1).To(context);
ASSERT_GE(row_splits.Dim(), 1);
arrays_vec[j] = row_splits;
arrays[j] = &arrays_vec[j];
if (num_splits > max_size) max_size = num_splits;
}
// generate an array with very large size
{
int32_t total_size = static_cast<int32_t>(expected_data.size());
int32_t average_size = total_size / num_array;
int32_t long_size = average_size * 10;
RaggedShape shape =
RandomRaggedShape(false, 2, 2, long_size, long_size);
ASSERT_EQ(shape.NumAxes(), 2);
Array1<int32_t> cpu_row_splits = shape.RowSplits(1).To(cpu);
int32_t num_splits = cpu_row_splits.Dim();
ASSERT_GE(num_splits, 1);
const int32_t *splits_data = cpu_row_splits.Data();
for (int32_t n = 0; n < num_splits; ++n) {
expected_data.push_back(splits_data[n] + data_offset);
}
Array1<int32_t> row_splits = shape.RowSplits(1).To(context);
ASSERT_GE(row_splits.Dim(), 1);
arrays_vec[num_array - 1] = row_splits;
arrays[num_array - 1] = &arrays_vec[num_array - 1];
}
const Array1<int32_t> **src = arrays.data();
Array1<int32_t> dst = SpliceRowSplits(num_array, src);
EXPECT_EQ(dst.Dim(), expected_data.size());
// copy memory from GPU/CPU to CPU
dst = dst.To(cpu);
std::vector<int32_t> cpu_data(dst.Data(), dst.Data() + dst.Dim());
EXPECT_EQ(cpu_data, expected_data);
}
}
}
}
template <typename T>
void TestRangeAndRandomArray1() {
ContextPtr cpu = GetCpuContext(); // will be used to copy data
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
{
// test Range with small size
Array1<T> result = Range<T>(context, 6, 3, 2);
const std::vector<T> values = {3, 5, 7, 9, 11, 13};
result = result.To(cpu);
std::vector<T> cpu_data(result.Data(), result.Data() + result.Dim());
EXPECT_EQ(cpu_data, values);
}
{
// test Range with random large size
int32_t num_elems = RandInt(1000, 10000);
std::vector<T> data(num_elems);
std::iota(data.begin(), data.end(), 0);
Array1<T> result = Range<T>(context, num_elems, 0);
result = result.To(cpu);
std::vector<T> cpu_data(result.Data(), result.Data() + result.Dim());
EXPECT_EQ(cpu_data, data);
}
{
// test RandUniformArray1
Array1<T> result = RandUniformArray1<T>(context, 1000, 0, 10000);
result = result.To(cpu);
}
}
}
TEST(OpsTest, RangeTest) {
TestRangeAndRandomArray1<int32_t>();
TestRangeAndRandomArray1<float>();
TestRangeAndRandomArray1<double>();
}
TEST(OpsTest, ValidateRowSplitsAndIdsTest) {
ContextPtr cpu = GetCpuContext(); // will be used to copy data
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
{
// test RowSplitsToRowIds and RowIdsToRowSplits
const std::vector<int32_t> row_splits_vec = {0, 2, 3, 5, 8, 9,
12, 13, 15, 15, 16};
const std::vector<int32_t> row_ids_vec = {0, 0, 1, 2, 2, 3, 3, 3,
4, 5, 5, 5, 6, 7, 7, 9};
{
Array1<int32_t> row_splits(context, row_splits_vec);
Array1<int32_t> row_ids(context, row_ids_vec.size());
RowSplitsToRowIds(row_splits, &row_ids);
row_ids = row_ids.To(cpu);
std::vector<int32_t> cpu_data(row_ids.Data(),
row_ids.Data() + row_ids.Dim());
EXPECT_EQ(cpu_data, row_ids_vec);
}
{
Array1<int32_t> row_ids(context, row_ids_vec);
Array1<int32_t> row_splits(context, row_splits_vec.size());
RowIdsToRowSplits(row_ids, &row_splits);
row_splits = row_splits.To(cpu);
std::vector<int32_t> cpu_data(row_splits.Data(),
row_splits.Data() + row_splits.Dim());
EXPECT_EQ(cpu_data, row_splits_vec);
}
}
{
// empty case for row splits and row ids
const std::vector<int32_t> row_splits_vec;
const std::vector<int32_t> row_ids_vec;
Array1<int32_t> row_ids(context, row_ids_vec);
Array1<int32_t> row_splits(context, row_splits_vec);
EXPECT_FALSE(ValidateRowSplits(row_splits));
EXPECT_TRUE(ValidateRowIds(row_ids));
EXPECT_FALSE(ValidateRowSplitsAndIds(row_splits, row_ids));
}
{
// valid case for row splits and row ids
const std::vector<int32_t> row_splits_vec = {0, 2, 3, 5, 8, 9,
12, 13, 15, 15, 16};
const std::vector<int32_t> row_ids_vec = {0, 0, 1, 2, 2, 3, 3, 3,
4, 5, 5, 5, 6, 7, 7, 9};
Array1<int32_t> row_ids(context, row_ids_vec);
Array1<int32_t> row_splits(context, row_splits_vec);
EXPECT_TRUE(ValidateRowSplits(row_splits));
EXPECT_TRUE(ValidateRowIds(row_ids));
EXPECT_TRUE(ValidateRowSplitsAndIds(row_splits, row_ids));
}
{
// valid case for row splits and row ids with random size
for (int32_t i = 0; i != 5; ++i) {
RaggedShape shape = RandomRaggedShape(true, 2, 2, 2000, 10000);
ASSERT_EQ(shape.NumAxes(), 2);
// note shape is on CPU
Array1<int32_t> row_splits = shape.RowSplits(1).To(context);
Array1<int32_t> row_ids = shape.RowIds(1).To(context);
EXPECT_TRUE(ValidateRowSplits(row_splits));
EXPECT_TRUE(ValidateRowIds(row_ids));
EXPECT_TRUE(ValidateRowSplitsAndIds(row_splits, row_ids));
}
}
{
// provided tmp storage
const std::vector<int32_t> row_splits_vec = {0, 2, 3, 5, 8, 9,
12, 13, 15, 15, 16};
const std::vector<int32_t> row_ids_vec = {0, 0, 1, 2, 2, 3, 3, 3,
4, 5, 5, 5, 6, 7, 7, 9};
Array1<int32_t> row_ids(context, row_ids_vec);
Array1<int32_t> row_splits(context, row_splits_vec);
{
Array1<int32_t> tmp(context, 3, 2);
EXPECT_TRUE(ValidateRowSplits(row_splits, &tmp));
// check elments
tmp = tmp.To(cpu);
std::vector<int32_t> cpu_data(tmp.Data(), tmp.Data() + tmp.Dim());
EXPECT_THAT(cpu_data, ::testing::ElementsAre(0, 2, 2));
}
{
Array1<int32_t> tmp(context, 3, 2);
EXPECT_TRUE(ValidateRowIds(row_ids, &tmp));
// check elments
tmp = tmp.To(cpu);
std::vector<int32_t> cpu_data(tmp.Data(), tmp.Data() + tmp.Dim());
EXPECT_THAT(cpu_data, ::testing::ElementsAre(0, 2, 2));
}
{
Array1<int32_t> tmp(context, 3, 2);
EXPECT_TRUE(ValidateRowSplitsAndIds(row_splits, row_ids, &tmp));
// check elments
tmp = tmp.To(cpu);
std::vector<int32_t> cpu_data(tmp.Data(), tmp.Data() + tmp.Dim());
EXPECT_THAT(cpu_data, ::testing::ElementsAre(0, 2, 2));
}
}
{
// bad case for row splits, not starts with 0
const std::vector<int32_t> row_splits_vec = {1, 2, 3, 5, 8, 9,
12, 13, 15, 15, 16};
const std::vector<int32_t> row_ids_vec = {0, 0, 1, 2, 2, 3, 3, 3,
4, 5, 5, 5, 6, 7, 7, 9};
Array1<int32_t> row_ids(context, row_ids_vec);
Array1<int32_t> row_splits(context, row_splits_vec);
EXPECT_FALSE(ValidateRowSplits(row_splits));
EXPECT_TRUE(ValidateRowIds(row_ids));
EXPECT_FALSE(ValidateRowSplitsAndIds(row_splits, row_ids));
}
{
// bad case for row splits, contains negative value
const std::vector<int32_t> row_splits_vec = {0, 2, 3, -5, 8, 9,
12, 13, 15, 15, 16};
const std::vector<int32_t> row_ids_vec = {0, 0, 1, 2, 2, 3, 3, 3,
4, 5, 5, 5, 6, 7, 7, 9};
Array1<int32_t> row_ids(context, row_ids_vec);
Array1<int32_t> row_splits(context, row_splits_vec);
EXPECT_FALSE(ValidateRowSplits(row_splits));
EXPECT_TRUE(ValidateRowIds(row_ids));
EXPECT_FALSE(ValidateRowSplitsAndIds(row_splits, row_ids));
}
{
// bad case for row splits, not non-decreasing
const std::vector<int32_t> row_splits_vec = {0, 2, 3, 1, 8, 9,
12, 13, 15, 15, 16};
const std::vector<int32_t> row_ids_vec = {0, 0, 1, 2, 2, 3, 3, 3,
4, 5, 5, 5, 6, 7, 7, 9};
Array1<int32_t> row_ids(context, row_ids_vec);
Array1<int32_t> row_splits(context, row_splits_vec);
EXPECT_FALSE(ValidateRowSplits(row_splits));
EXPECT_TRUE(ValidateRowIds(row_ids));
EXPECT_FALSE(ValidateRowSplitsAndIds(row_splits, row_ids));
}
{
// bad case row ids, contains negative value
const std::vector<int32_t> row_splits_vec = {0, 2, 3, 5, 8, 9,
12, 13, 15, 15, 16};
const std::vector<int32_t> row_ids_vec = {0, 0, 1, 2, -2, 3, 3, 3,
4, 5, 5, 5, 6, 7, 7, 9};
Array1<int32_t> row_ids(context, row_ids_vec);
Array1<int32_t> row_splits(context, row_splits_vec);
EXPECT_TRUE(ValidateRowSplits(row_splits));
EXPECT_FALSE(ValidateRowIds(row_ids));
EXPECT_FALSE(ValidateRowSplitsAndIds(row_splits, row_ids));
}
{
// bad case row ids, not non-decreasing
const std::vector<int32_t> row_splits_vec = {0, 2, 3, 5, 8, 9,
12, 13, 15, 15, 16};
const std::vector<int32_t> row_ids_vec = {0, 0, 1, 2, 2, 3, 3, 3,
4, 5, 5, 5, 6, 7, 6, 9};
Array1<int32_t> row_ids(context, row_ids_vec);
Array1<int32_t> row_splits(context, row_splits_vec);
EXPECT_TRUE(ValidateRowSplits(row_splits));
EXPECT_FALSE(ValidateRowIds(row_ids));
EXPECT_FALSE(ValidateRowSplitsAndIds(row_splits, row_ids));
}
{
// bad case row ids and row splits don't agree with each other
// i < row_splits[row_ids[i]]
const std::vector<int32_t> row_splits_vec = {0, 2, 3, 5, 8, 9,
12, 13, 15, 15, 16};
const std::vector<int32_t> row_ids_vec = {0, 0, 1, 2, 2, 3, 3, 3,
4, 5, 5, 5, 6, 7, 8, 9};
Array1<int32_t> row_ids(context, row_ids_vec);
Array1<int32_t> row_splits(context, row_splits_vec);
EXPECT_TRUE(ValidateRowSplits(row_splits));
EXPECT_TRUE(ValidateRowIds(row_ids));
EXPECT_FALSE(ValidateRowSplitsAndIds(row_splits, row_ids));
}
{
// another bad case that row ids and row splits don't agree with each
// other i > = row_splits[row_ids[i]]
const std::vector<int32_t> row_splits_vec = {0, 2, 3, 5, 8, 9,
12, 13, 15, 15, 16};
const std::vector<int32_t> row_ids_vec = {0, 0, 1, 2, 2, 3, 3, 3,
4, 5, 5, 5, 5, 7, 7, 9};
Array1<int32_t> row_ids(context, row_ids_vec);
Array1<int32_t> row_splits(context, row_splits_vec);
EXPECT_TRUE(ValidateRowSplits(row_splits));
EXPECT_TRUE(ValidateRowIds(row_ids));
EXPECT_FALSE(ValidateRowSplitsAndIds(row_splits, row_ids));
}
{
// bad case for row ids, num_elems != row_splits[-1]
const std::vector<int32_t> row_splits_vec = {0, 2, 3, 5, 8, 9,
12, 13, 15, 15, 16};
const std::vector<int32_t> row_ids_vec = {0, 0, 1, 2, 2, 3, 3, 3,
4, 5, 5, 5, 6, 7, 7};
Array1<int32_t> row_ids(context, row_ids_vec);
Array1<int32_t> row_splits(context, row_splits_vec);
EXPECT_TRUE(ValidateRowSplits(row_splits));
EXPECT_TRUE(ValidateRowIds(row_ids));
EXPECT_FALSE(ValidateRowSplitsAndIds(row_splits, row_ids));
}
}
}
TEST(OpsTest, GetCountsTest) {
ContextPtr cpu = GetCpuContext(); // will be used to copy data
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
{
// empty case
int32_t n = 0;
std::vector<int32_t> values;
Array1<int32_t> src(context, values);
Array1<int32_t> ans = GetCounts(src, n);
EXPECT_EQ(ans.Dim(), 0);
}
{
// simple case
int32_t n = 8;
std::vector<int32_t> values = {0, 1, 2, 1, 5, 5, 7, 6, 3, 2};
std::vector<int32_t> expected_data = {1, 2, 2, 1, 0, 2, 1, 1};
Array1<int32_t> src(context, values);
Array1<int32_t> ans = GetCounts(src, n);
ans = ans.To(cpu);
std::vector<int32_t> data(ans.Data(), ans.Data() + ans.Dim());
EXPECT_EQ(data, expected_data);
}
{
// random large case
for (int32_t i = 0; i != 2; ++i) {
int32_t n = RandInt(1, 10000);
int32_t src_dim = RandInt(0, 10000);
Array1<int32_t> src = RandUniformArray1(context, src_dim, 0, n - 1);
Array1<int32_t> ans = GetCounts(src, n);
ans = ans.To(cpu);
std::vector<int32_t> data(ans.Data(), ans.Data() + ans.Dim());
src = src.To(cpu);
int32_t *src_data = src.Data();
std::vector<int32_t> expected_data(n, 0);
for (int32_t j = 0; j < src.Dim(); ++j) {
++expected_data[src_data[j]];
}
EXPECT_EQ(data, expected_data);
}
}
}
}
template <typename S, typename T>
void TestMonotonicLowerBound() {
ContextPtr cpu = GetCpuContext(); // will be used to copy data
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
{
// empty case
std::vector<S> values;
Array1<S> src(context, values);
Array1<T> dest(context, 0);
MonotonicLowerBound(src, &dest);
EXPECT_EQ(dest.Dim(), 0);
}
{
// simple case
std::vector<S> values = {2, 1, 3, 7, 5, 8, 20, 15};
std::vector<T> expected_data = {1, 1, 3, 5, 5, 8, 15, 15};
ASSERT_EQ(values.size(), expected_data.size());
Array1<S> src(context, values);
Array1<T> dest(context, static_cast<int32_t>(values.size()));
MonotonicLowerBound(src, &dest);
dest = dest.To(cpu);
std::vector<T> data(dest.Data(), dest.Data() + dest.Dim());
EXPECT_EQ(data, expected_data);
}
{
// simple case with dest = &src
std::vector<S> values = {2, 1, 3, 7, 5, 8, 20, 15};
std::vector<T> expected_data = {1, 1, 3, 5, 5, 8, 15, 15};
ASSERT_EQ(values.size(), expected_data.size());
Array1<S> src(context, values);
MonotonicLowerBound(src, &src);
src = src.To(cpu);
std::vector<T> data(src.Data(), src.Data() + src.Dim());
EXPECT_EQ(data, expected_data);
}
{
// random large case
for (int32_t i = 0; i != 2; ++i) {
int32_t n = RandInt(1, 10000);
int32_t src_dim = RandInt(0, 10000);
Array1<S> src = RandUniformArray1(context, src_dim, 0, n - 1);
Array1<T> dest(context, src_dim);
MonotonicLowerBound(src, &dest);
dest = dest.To(cpu);
std::vector<T> data(dest.Data(), dest.Data() + dest.Dim());
src = src.To(cpu);
int32_t *src_data = src.Data();
S min_value = std::numeric_limits<S>::max();
std::vector<T> expected_data(src_dim);
for (int32_t i = src_dim - 1; i >= 0; --i) {
min_value = ::min(src_data[i], min_value);
expected_data[i] = min_value;
}
EXPECT_EQ(data, expected_data);
}
}
}
}
TEST(OpsTest, MonotonicLowerBoundTest) {
TestMonotonicLowerBound<int32_t, int32_t>();
TestMonotonicLowerBound<int32_t, double>();
}
template <typename S, typename T>
void TestMonotonicDecreasingUpperBound() {
ContextPtr cpu = GetCpuContext(); // will be used to copy data
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
{
// empty case
std::vector<S> values;
Array1<S> src(context, values);
Array1<T> dest(context, 0);
MonotonicDecreasingUpperBound(src, &dest);
EXPECT_EQ(dest.Dim(), 0);
}
{
// simple case
std::vector<S> values = {10, 7, 3, 5, 4, 1, 0, 2};
std::vector<T> expected_data = {10, 7, 5, 5, 4, 2, 2, 2};
ASSERT_EQ(values.size(), expected_data.size());
Array1<S> src(context, values);
Array1<T> dest(context, static_cast<int32_t>(values.size()));
MonotonicDecreasingUpperBound(src, &dest);
dest = dest.To(cpu);
std::vector<T> data(dest.Data(), dest.Data() + dest.Dim());
EXPECT_EQ(data, expected_data);
}
{
// simple case with dest = &src
std::vector<S> values = {10, 7, 3, 5, 4, 1, 0, 2};
std::vector<T> expected_data = {10, 7, 5, 5, 4, 2, 2, 2};
ASSERT_EQ(values.size(), expected_data.size());
Array1<S> src(context, values);
MonotonicDecreasingUpperBound(src, &src);
src = src.To(cpu);
std::vector<T> data(src.Data(), src.Data() + src.Dim());
EXPECT_EQ(data, expected_data);
}
{
// random large case
for (int32_t i = 0; i != 2; ++i) {
int32_t n = RandInt(1, 10000);
int32_t src_dim = RandInt(0, 10000);
Array1<S> src = RandUniformArray1(context, src_dim, 0, n - 1);
Array1<T> dest(context, src_dim);
MonotonicDecreasingUpperBound(src, &dest);
dest = dest.To(cpu);
std::vector<T> data(dest.Data(), dest.Data() + dest.Dim());
src = src.To(cpu);
int32_t *src_data = src.Data();
S max_value = std::numeric_limits<S>::min();
std::vector<T> expected_data(src_dim);
for (int32_t i = src_dim - 1; i >= 0; --i) {
max_value = ::max(src_data[i], max_value);
expected_data[i] = max_value;
}
EXPECT_EQ(data, expected_data);
}
}
}
}
TEST(OpsTest, MonotonicDecreasingUpperBoundTest) {
TestMonotonicDecreasingUpperBound<int32_t, int32_t>();
TestMonotonicDecreasingUpperBound<int32_t, double>();
}
TEST(OpsTest, InvertMonotonicDecreasingTest) {
ContextPtr cpu = GetCpuContext(); // will be used to copy data
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
{
// empty case
std::vector<int32_t> values;
Array1<int32_t> src(context, values);
Array1<int32_t> dest = InvertMonotonicDecreasing(src);
EXPECT_EQ(dest.Dim(), 0);
}
{
// simple case
std::vector<int32_t> values = {6, 4, 4, 2};
Array1<int32_t> src(context, values);
Array1<int32_t> dest = InvertMonotonicDecreasing(src);
EXPECT_EQ(dest.Dim(), 6);
dest = dest.To(cpu);
std::vector<int32_t> data(dest.Data(), dest.Data() + dest.Dim());
std::vector<int32_t> expected_data = {4, 4, 3, 3, 1, 1};
EXPECT_EQ(data, expected_data);
// convert back
dest = dest.To(context);
Array1<int32_t> src1 = InvertMonotonicDecreasing(dest);
EXPECT_TRUE(Equal(src1, src));
}
{
// random large case
for (int32_t i = 0; i != 2; ++i) {
int32_t n = RandInt(1, 1000);
int32_t src_dim = RandInt(0, 1000);
Array1<int32_t> src = RandUniformArray1(context, src_dim, 1, n);
Sort<int32_t, GreaterThan<int32_t>>(&src);
Array1<int32_t> dest = InvertMonotonicDecreasing(src);
// convert back
Array1<int32_t> src1 = InvertMonotonicDecreasing(dest);
EXPECT_TRUE(Equal(src1, src));
}
}
}
}
template <typename T>
void ArrayPlusTest() {
ContextPtr cpu = GetCpuContext(); // will be used to copy data
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
for (int32_t i = 0; i != 2; ++i) {
{
// normal case
int32_t dim = RandInt(0, 1000);
Array1<T> src1 = RandUniformArray1<T>(context, dim, 0, 1000);
Array1<T> src2 = RandUniformArray1<T>(context, dim, 0, 1000);
Array1<T> dest(context, dim);
Plus(src1, src2, &dest);
Array1<T> ans = Plus(src1, src2);
EXPECT_EQ(ans.Dim(), dim);
src1.To(cpu);
src2.To(cpu);
Array1<T> expected(cpu, dim);
T *expected_data = expected.Data();
for (int32_t n = 0; n != dim; ++n) {
expected_data[n] = src1[n] + src2[n];
}
CheckArrayData(dest, expected);
CheckArrayData(ans, expected);
}
{
// special case: &src1 == &src2 == dest
int32_t dim = RandInt(0, 1000);
Array1<T> src = RandUniformArray1<T>(context, dim, 0, 1000);
Array1<T> src_copy = src.Clone();
Plus(src, src, &src);
src_copy.To(cpu);
Array1<T> expected(cpu, dim);
T *expected_data = expected.Data();
for (int32_t n = 0; n != dim; ++n) {
expected_data[n] = src_copy[n] + src_copy[n];
}
CheckArrayData(src, expected);
}
}
}
}
TEST(OpsTest, PlusTest) {
ArrayPlusTest<int32_t>();
ArrayPlusTest<float>();
}
template <typename T>
void ArrayMinusTest() {
ContextPtr cpu = GetCpuContext(); // will be used to copy data
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
for (int32_t i = 0; i != 2; ++i) {
{
// normal case
int32_t dim = RandInt(0, 1000);
Array1<T> src1 = RandUniformArray1<T>(context, dim, 0, 1000);
Array1<T> src2 = RandUniformArray1<T>(context, dim, 0, 1000);
Array1<T> dest(context, dim);
Minus(src1, src2, &dest);
Array1<T> ans = Minus(src1, src2);
EXPECT_EQ(ans.Dim(), dim);
src1.To(cpu);
src2.To(cpu);
Array1<T> expected(cpu, dim);
T *expected_data = expected.Data();
for (int32_t n = 0; n != dim; ++n) {
expected_data[n] = src1[n] - src2[n];
}
CheckArrayData(dest, expected);
CheckArrayData(ans, expected);
}
{
// special case: &src1 == &src2 == dest
int32_t dim = RandInt(0, 1000);
Array1<T> src = RandUniformArray1<T>(context, dim, 0, 1000);
Minus(src, src, &src);
Array1<T> expected(context, dim, T(0));
CheckArrayData(src, expected);
}
}
}
}
TEST(OpsTest, MinusTest) {
ArrayMinusTest<int32_t>();
ArrayMinusTest<float>();
}
template <typename T>
void ArrayTimesTest() {
ContextPtr cpu = GetCpuContext(); // will be used to copy data
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
for (int32_t i = 0; i != 2; ++i) {
{
// normal case
int32_t dim = RandInt(0, 1000);
Array1<T> src1 = RandUniformArray1<T>(context, dim, 0, 1000);
Array1<T> src2 = RandUniformArray1<T>(context, dim, 0, 1000);
Array1<T> dest(context, dim);
Times(src1, src2, &dest);
Array1<T> ans = Times(src1, src2);
EXPECT_EQ(ans.Dim(), dim);
src1.To(cpu);
src2.To(cpu);
Array1<T> expected(cpu, dim);
T *expected_data = expected.Data();
for (int32_t n = 0; n != dim; ++n) {
expected_data[n] = src1[n] * src2[n];
}
CheckArrayData(dest, expected);
CheckArrayData(ans, expected);
}
{
// special case: &src1 == &src2 == dest
int32_t dim = RandInt(0, 1000);
Array1<T> src = RandUniformArray1<T>(context, dim, 0, 1000);
Array1<T> src_copy = src.Clone();
Times(src, src, &src);
src_copy.To(cpu);
Array1<T> expected(cpu, dim);
T *expected_data = expected.Data();
for (int32_t n = 0; n != dim; ++n) {
expected_data[n] = src_copy[n] * src_copy[n];
}
CheckArrayData(src, expected);
}
}
}
}
TEST(OpsTest, TimesTest) {
ArrayTimesTest<int32_t>();
ArrayTimesTest<float>();
}
TEST(OpsTest, Array1IndexTest) {
for (int loop = 0; loop < 2; loop++) {
ContextPtr c = (loop == 0 ? GetCpuContext() : GetCudaContext()),
cpu_context = GetCpuContext();
int32_t src_dim = RandInt(1, 10), ans_dim = RandInt(1, 10);
using T = int64_t;
Array1<T> src = RandUniformArray1<T>(c, src_dim, 0, 100);
Array1<int32_t> indexes_no_minus_one =
RandUniformArray1<int32_t>(c, ans_dim, 0, src_dim - 1),
indexes_minus_one =
RandUniformArray1<int32_t>(c, ans_dim, -1, src_dim - 1);
T default_value = loop - 1;
Array1<T> ans_no_minus_one =
Index(src, indexes_no_minus_one, false, default_value),
ans_no_minus_one_check = src[indexes_no_minus_one],
ans_no_minus_one_check2 =
Index(src, indexes_no_minus_one, true, default_value);
ASSERT_TRUE(Equal(ans_no_minus_one, ans_no_minus_one_check));
ASSERT_TRUE(Equal(ans_no_minus_one, ans_no_minus_one_check2));
Array1<T> ans_minus_one =
Index(src, indexes_minus_one, true, default_value);
ans_minus_one = ans_minus_one.To(cpu_context);
src = src.To(cpu_context);
indexes_minus_one = indexes_minus_one.To(cpu_context);
for (int32_t i = 0; i < indexes_minus_one.Dim(); i++) {
int32_t index = indexes_minus_one[i];
ASSERT_EQ(ans_minus_one[i], (index < 0 ? default_value : src[index]));
}
}
}
TEST(OpsTest, InvertPermutationTest) {
for (int loop = 0; loop < 2; loop++) {
ContextPtr c = (loop == 0 ? GetCpuContext() : GetCudaContext()),
cpu_context = GetCpuContext();
for (int i = 0; i < 10; i++) {
int32_t len = RandInt(0, 10);
std::vector<int32_t> permutation(len);
std::iota(permutation.begin(), permutation.end(), 0);
std::random_shuffle(permutation.begin(), permutation.end());
Array1<int32_t> permutation_array(c, permutation);
Array1<int32_t> permutation_array_inv =
InvertPermutation(permutation_array);
Array1<int32_t> range = permutation_array[permutation_array_inv],
range2 = Range(c, len, 0);
K2_CHECK(Equal(range, range2));
}
}
}
TEST(OpsTest, Array2IndexTest) {
for (int loop = 0; loop < 2; loop++) {
ContextPtr c = (loop == 0 ? GetCpuContext() : GetCudaContext()),
cpu_context = GetCpuContext();
int32_t src_dim0 = RandInt(1, 10), src_dim1 = RandInt(1, 10),
ans_dim0 = RandInt(1, 10);
using T = int64_t;
Array2<T> src = RandUniformArray2<T>(c, src_dim0, src_dim1, 0, 100);
Array1<int32_t> indexes_no_minus_one = RandUniformArray1<int32_t>(
c, ans_dim0, 0, src_dim0 - 1),
indexes_minus_one = RandUniformArray1<int32_t>(
c, ans_dim0, -1, src_dim0 - 1);
Array2<T> ans_no_minus_one = IndexRows(src, indexes_no_minus_one, false),
ans_no_minus_one_check =
IndexRows(src, indexes_no_minus_one, true);
ASSERT_TRUE(Equal(ans_no_minus_one, ans_no_minus_one_check));
Array2<T> ans_minus_one = IndexRows(src, indexes_minus_one, true);
ans_minus_one = ans_minus_one.To(cpu_context);
src = src.To(cpu_context);
indexes_minus_one = indexes_minus_one.To(cpu_context);
auto src_acc = src.Accessor(), ans_minus_one_acc = ans_minus_one.Accessor();
K2_LOG(INFO) << "src = " << src << ", indexes = " << indexes_minus_one
<< ", ans = " << ans_minus_one;
for (int32_t i = 0; i < ans_dim0; i++) {
int32_t index = indexes_minus_one[i];
for (int32_t j = 0; j < src_dim1; j++) {
ASSERT_EQ(ans_minus_one_acc(i, j), (index < 0 ? 0 : src_acc(index, j)));
}
}
}
}
template <typename T>
static void Array1SortTestSimple() {
std::vector<T> data = {3, 2, 5, 1};
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
{
// with index map
Array1<T> array(context, data);
Array1<int32_t> index_map;
Sort(&array, &index_map);
CheckArrayData(array, std::vector<T>{1, 2, 3, 5});
CheckArrayData(index_map, std::vector<int32_t>{3, 1, 0, 2});
}
{
// without index map
Array1<T> array(context, data);
Sort(&array);
CheckArrayData(array, std::vector<T>{1, 2, 3, 5});
}
}
}
template <typename T>
static void Array1SortTestEmpty() {
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
Array1<T> array(context, 0);
Array1<int32_t> index_map;
Sort(&array, &index_map);
EXPECT_EQ(array.Dim(), 0);
EXPECT_EQ(index_map.Dim(), 0);
}
}
template <typename T>
static void Array1SortTestRandom() {
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
int32_t dim = RandInt(0, 10000);
int32_t min_value = RandInt(-1000, 1000);
int32_t max_value = min_value + RandInt(0, 3000);
{
// with index map
Array1<T> array =
RandUniformArray1<T>(context, dim, min_value, max_value);
Array1<T> data = array.Clone();
Array1<int32_t> index_map;
Sort(&array, &index_map);
array = array.To(GetCpuContext());
EXPECT_TRUE(std::is_sorted(array.Data(), array.Data() + array.Dim()));
index_map = index_map.To(GetCpuContext());
for (int32_t i = 0; i != array.Dim(); ++i)
EXPECT_EQ(array[i], data[index_map[i]]);
}
{
// without index_map
Array1<T> array =
RandUniformArray1<T>(context, dim, min_value, max_value);
Sort(&array);
array = array.To(GetCpuContext());
EXPECT_TRUE(std::is_sorted(array.Data(), array.Data() + array.Dim()));
}
}
}
TEST(OpsTest, Array1Sort) {
Array1SortTestSimple<int32_t>();
Array1SortTestSimple<float>();
Array1SortTestEmpty<int32_t>();
Array1SortTestEmpty<float>();
Array1SortTestRandom<int32_t>();
Array1SortTestRandom<float>();
}
TEST(OpsTest, Array2Assign) {
for (int loop = 0; loop < 10; loop++) {
ContextPtr c = ((loop % 2) == 0 ? GetCpuContext() : GetCudaContext());
int32_t src_dim0 = RandInt(1, 10), src_dim1 = RandInt(1, 10);
using T = int64_t;
Array2<T> src = RandUniformArray2<T>(c, src_dim0, src_dim1, 0, 100);
Array2<T> dest = RandUniformArray2<T>(c, src_dim0, src_dim1, 0, 100);
Assign(src, &dest);
K2_CHECK(Equal(src, dest));
ContextPtr c_other = ((loop % 2) != 0 ? GetCpuContext() : GetCudaContext());
Array2<T> dest2 = RandUniformArray2<T>(c_other, src_dim0, src_dim1, 0, 100);
if (src.ElemStride0() == src_dim1 && dest2.ElemStride0() == src_dim1) {
// test cross-device copy, which is only supported for contiguous input
Assign(src, &dest2);
K2_CHECK(Equal(src.To(c_other), dest2));
}
}
}
template <typename T>
void Array2ContiguousTest() {
for (int loop = 0; loop < 10; loop++) {
ContextPtr c = ((loop % 2) == 0 ? GetCpuContext() : GetCudaContext());
int32_t src_dim0 = RandInt(1, 10), src_dim1 = RandInt(1, 10);
Array2<T> src = RandUniformArray2<T>(c, src_dim0, src_dim1, 0, 100);
int32_t slice_dim1_begin = RandInt(0, src_dim1),
slice_dim1_end = RandInt(slice_dim1_begin, src_dim1);
// Testing that ToContiguous() works the same with generic and specialized
// arrays.
Array2<T> src_part = src.ColArange(slice_dim1_begin, slice_dim1_end),
src_part_contiguous1 = ToContiguous(src_part);
Array2<Any> src_part_contiguous_generic2 = ToContiguous(src_part.Generic());
Array2<T> src_part_contiguous2 =
src_part_contiguous_generic2.Specialize<T>();
K2_CHECK_EQ(Equal(src_part_contiguous1, src_part_contiguous2),
true);
K2_CHECK_EQ(ApproxEqual(src_part_contiguous1, src_part_contiguous2),
true);
}
}
TEST(OpsTest, ApproxEqualTest) {
Array2<float> array1("[ [ 1 2 3 ] [4 5 6 ]]"),
array2("[ [ 1.1 2 3 ] [4 5 6 ]]");
K2_CHECK_EQ(ApproxEqual(array1, array2, 0.2f), true);
K2_CHECK_EQ(ApproxEqual(array1, array2, 0.01f), false);
}
TEST(OpsTest, Array2Contiguous) {
Array2ContiguousTest<int32_t>();
Array2ContiguousTest<double>();
}
TEST(OpsTest, SizesToMergeMapTest) {
// simple test
for (auto &c : {GetCpuContext(), GetCudaContext()}) {
{
std::vector<int32_t> sizes = {3, 5, 1};
Array1<uint32_t> merge_map = SizesToMergeMap(c, sizes);
std::vector<uint32_t> expected_map = {0, 3, 6, 1, 4, 7, 10, 13, 2};
CheckArrayData(merge_map, expected_map);
}
}
{
// random case (well, I assume cpu implementation is correct and only test
// Cuda implementation)
for (int32_t i = 0; i != 2; ++i) {
int32_t num_srcs = RandInt(0, 100);
std::vector<int32_t> sizes(num_srcs);
for (int32_t n = 0; n != num_srcs; ++n) sizes[n] = RandInt(0, 1000);
Array1<uint32_t> merge_map_cpu = SizesToMergeMap(GetCpuContext(), sizes);
Array1<uint32_t> merge_map = SizesToMergeMap(GetCudaContext(), sizes);
CheckArrayData(merge_map_cpu, merge_map);
}
}
}
template <typename T>
static T ComputeSum(const T *begin, const T *end) {
T s = T(0);
for (auto p = begin; p != end; ++p) s += *p;
return s;
}
template <typename T>
static void TestSum() {
std::random_device rd;
for (auto &c : {GetCpuContext(), GetCudaContext()}) {
uint64_t seed = rd();
SetSeed(c, seed);
Array1<T> empty(c, 0);
T s = Sum(empty);
EXPECT_EQ(s, T(0));
Array1<int32_t> n(c, 1);
Rand(c, 1, 1000, 1, n.Data());
Array1<T> src(c, n[0]);
Rand<T>(c, -1000, 1000, src.Dim(), src.Data());
s = Sum(src);
Array1<T> cpu_src = src.To(GetCpuContext());
T gt = ComputeSum(cpu_src.Data(), cpu_src.Data() + cpu_src.Dim());
EXPECT_NEAR((abs(s - gt) / abs(gt + 1e-6)), 0, 1e-4);
}
}
TEST(OpsTest, Sum) {
TestSum<int32_t>();
TestSum<float>();
}
} // namespace k2
| 3e73dc2816601a57d0674101ccbfefad215b7056.cu | /**
* Copyright 2020 Xiaomi Corporation (authors: Haowen Qiu, Fangjun Kuang)
*
* See LICENSE for clarification regarding multiple authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <cstdio>
#include <functional>
#include <iostream>
#include <limits>
#include <numeric>
#include <random>
#include <utility>
#include <vector>
#include "k2/csrc/array.h"
#include "k2/csrc/array_ops.h"
#include "k2/csrc/context.h"
#include "k2/csrc/macros.h"
#include "k2/csrc/math.h"
#include "k2/csrc/ragged.h"
#include "k2/csrc/ragged_ops.h"
#include "k2/csrc/rand.h"
#include "k2/csrc/test_utils.h"
#include "k2/csrc/timer.h"
namespace k2 {
template <typename T>
void MatrixTanspose(int32_t num_rows, int32_t num_cols, const T *src, T *dest) {
for (int32_t i = 0; i < num_rows; ++i) {
for (int32_t j = 0; j < num_cols; ++j) {
dest[j * num_rows + i] = src[i * num_cols + j];
}
}
}
template <typename T, DeviceType d>
void TestTranspose(int32_t num_rows, int32_t num_cols, int32_t num_reps = 1,
bool print_bandwidth = false) {
ContextPtr cpu = GetCpuContext(); // will use to copy data
ContextPtr context = nullptr;
if (d == kCpu) {
context = GetCpuContext();
} else {
K2_CHECK_EQ(d, kCuda);
context = GetCudaContext();
}
int32_t num_elements = num_rows * num_cols;
std::vector<T> host_src(num_elements);
std::iota(host_src.begin(), host_src.end(), 0);
std::vector<T> gold(num_elements);
MatrixTanspose<T>(num_rows, num_cols, host_src.data(), gold.data());
int32_t num_bytes = num_elements * sizeof(T);
auto src_region = NewRegion(context, num_bytes);
Array2<T> src(num_rows, num_cols, num_cols, 0, src_region);
cpu->CopyDataTo(num_bytes, host_src.data(), src.Context(), src.Data());
auto dest_region = NewRegion(context, num_bytes);
Array2<T> dest(num_cols, num_rows, num_rows, 0, dest_region);
// warm up in case that the first kernel launch takes longer time.
Transpose<T>(context, src, &dest);
Timer t(dest.Context());
for (int32_t i = 0; i < num_reps; ++i) {
Transpose<T>(context, src, &dest);
}
double elapsed = t.Elapsed();
std::vector<T> host_dest(num_elements);
dest.Context()->CopyDataTo(num_bytes, dest.Data(), cpu, host_dest.data());
ASSERT_EQ(host_dest, gold);
if (print_bandwidth) {
// effective_bandwidth (GB/s) = (read_bytes + write_bytes) / (time_seconds *
// 10^9), for matrix transpose, read_bytes + write_bytes = 2 * num_bytes
printf("Average time is: %.6f s, effective bandwidth is: %.2f GB/s\n",
elapsed / num_reps, 2 * num_bytes * 1e-9 * num_reps / elapsed);
}
}
TEST(OpsTest, TransposeTest) {
{
// test with some corner cases
std::vector<std::pair<int32_t, int32_t>> shapes = {
{0, 0}, {1, 1}, {5, 4}, {100, 0}, {128, 64}, {15, 13}, {115, 180},
};
for (const auto &v : shapes) {
TestTranspose<int32_t, kCpu>(v.first, v.second);
TestTranspose<int32_t, kCuda>(v.first, v.second);
}
}
{
// test with random shapes
for (int32_t i = 0; i != 5; ++i) {
auto rows = RandInt(0, 3000);
auto cols = RandInt(0, 3000);
TestTranspose<int32_t, kCpu>(rows, cols);
TestTranspose<int32_t, kCuda>(rows, cols);
}
}
#ifdef K2_WITH_CUDA
{
// speed test for different data type
// TODO(haowen): we may need to allocate different size of shared memory for
// different data type to get the best performance
TestTranspose<char, kCuda>(1000, 2000, 100, true);
TestTranspose<int16_t, kCuda>(1000, 2000, 100, true);
TestTranspose<int32_t, kCuda>(1000, 2000, 100, true);
TestTranspose<float, kCuda>(1000, 2000, 100, true);
TestTranspose<double, kCuda>(1000, 2000, 100, true);
}
#endif
}
template <typename S, typename T>
void ComputeExclusiveSum(const std::vector<S> &src, std ::vector<T> *dest) {
auto &dst = *dest;
K2_CHECK_GE(dst.size(), src.size());
T sum = T(0);
size_t dst_size = dst.size();
size_t src_size = src.size();
for (size_t i = 0; i != dst_size; ++i) {
dst[i] = sum;
if (i >= src_size) break;
sum += src[i];
}
}
template <typename S, typename T>
void CheckExclusiveSumArray1Result(const std::vector<S> &src_data,
const Array1<T> &dest) {
// copy data from CPU to CPU/GPU
std::vector<T> dest_data(dest.Dim());
dest.Context()->CopyDataTo(dest.Dim() * dest.ElementSize(), dest.Data(),
GetCpuContext(), dest_data.data());
std::vector<T> expected_data(dest.Dim());
ComputeExclusiveSum(src_data, &expected_data);
ASSERT_EQ(dest_data.size(), expected_data.size());
for (size_t i = 0; i != dest_data.size(); ++i) {
EXPECT_EQ(dest_data[i], expected_data[i]);
}
}
template <typename S, typename T>
void TestExclusiveSumArray1(int32_t num_elem) {
ContextPtr cpu = GetCpuContext(); // will be used to copy data
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
{
// Test ExclusiveSum(Array1<T> &src)
std::vector<S> data(num_elem);
int32_t start = RandInt(0, 2);
std::iota(data.begin(), data.end(), static_cast<S>(start));
Array1<S> src(context, data);
Array1<S> dest = ExclusiveSum(src);
CheckExclusiveSumArray1Result(data, dest);
}
{
// Test ExclusiveSum(Array1<S> &src, Array1<T> *dest) with
// dest.Dim() == src.Dim()
std::vector<S> data(num_elem);
int32_t start = RandInt(0, 2);
std::iota(data.begin(), data.end(), static_cast<S>(start));
Array1<S> src(context, data);
Array1<T> dest(context, num_elem);
ExclusiveSum(src, &dest);
CheckExclusiveSumArray1Result(data, dest);
}
{
// Test ExclusiveSum(Array1<T> &src, Array1<T> *dest) where
// &dest = &src
std::vector<S> data(num_elem);
int32_t start = RandInt(0, 2);
std::iota(data.begin(), data.end(), static_cast<S>(start));
Array1<S> src(context, data);
ExclusiveSum(src, &src);
CheckExclusiveSumArray1Result(data, src);
}
{
// Test ExclusiveSum(Array1<T> &src, Array1<T> *dest) with
// dest.Dim() == src.Dim() + 1
int32_t src_dim = num_elem - 1;
std::vector<S> data(src_dim);
int32_t start = RandInt(0, 2);
std::iota(data.begin(), data.end(), static_cast<S>(start));
// note we allocate one extra element in region for `src`,
// but its value will not be set.
int32_t num_bytes = num_elem * sizeof(T);
RegionPtr region = NewRegion(context, num_bytes);
cpu->CopyDataTo(src_dim * sizeof(T), data.data(), region->context,
region->data);
Array1<S> src(src_dim, region, 0);
Array1<T> dest(context, num_elem);
ASSERT_EQ(dest.Dim(), src.Dim() + 1);
ExclusiveSum(src, &dest);
CheckExclusiveSumArray1Result(data, dest);
}
{
// Test ExclusiveSumDeref(Array1<S*> &src, Array1<S> *dest) with
// dest.Dim() == src.Dim()
std::vector<S> data(num_elem);
int32_t start = RandInt(0, 2);
std::iota(data.begin(), data.end(), static_cast<S>(start));
Array1<S> src(context, data);
S *src_data = src.Data();
Array1<const S *> src_ptr(context, num_elem);
const S **src_ptr_data = src_ptr.Data();
K2_EVAL(
context, num_elem, lambda_set_values,
(int32_t i)->void { src_ptr_data[i] = src_data + i; });
Array1<S> dest(context, num_elem);
ExclusiveSumDeref(src_ptr, &dest);
CheckExclusiveSumArray1Result(data, dest);
}
{
// Test ExclusiveSumDeref(Array1<S*> &src, Array1<S> *dest) with
// dest.Dim() == src.Dim() + 1
int32_t src_dim = num_elem - 1;
std::vector<S> data(num_elem);
int32_t start = RandInt(0, 2);
std::iota(data.begin(), data.end(), static_cast<S>(start));
// note we allocate one extra element in region for `src`,
// but its value will not be set.
Array1<S> src(context, data);
S *src_data = src.Data();
int32_t num_bytes = num_elem * sizeof(S *);
RegionPtr region = NewRegion(context, num_bytes);
S **region_data = region->GetData<S *>();
K2_EVAL(
context, num_elem, lambda_set_values,
(int32_t i)->void { region_data[i] = &src_data[i]; });
// not src_ptr.Dim() == src_dim == num_elem - 1
Array1<const S *> src_ptr(src_dim, region, 0);
Array1<S> dest(context, num_elem);
ASSERT_EQ(dest.Dim(), src_ptr.Dim() + 1);
ExclusiveSumDeref(src_ptr, &dest);
CheckExclusiveSumArray1Result(data, dest);
}
}
}
#ifndef _MSC_VER
// It results in the following error SOMETIMES on windows
//
// unknown file: error: SEH exception with code 0xc0000005 thrown in the test
// body.
//
// To make the CI happy, we just disable it for Windows.
TEST(OpsTest, ExclusiveSumArray1Test) {
TestExclusiveSumArray1<int32_t, int32_t>(1000);
TestExclusiveSumArray1<float, double>(1000);
}
#endif
template <typename T>
void ComputeExclusiveSumArray2(const std::vector<T> &src, int32_t dest_rows,
int32_t dest_cols, std ::vector<T> *dest,
int32_t axis) {
auto &dst = *dest;
int32_t src_num_elems = static_cast<int32_t>(src.size());
if (axis == 0) {
if (dst.size() > src.size()) {
// dst.rows == src.rows + 1
K2_CHECK_EQ((int32_t)src.size(), dest_cols * (dest_rows - 1));
}
for (int32_t j = 0; j != dest_cols; ++j) {
T sum = T(0);
for (auto i = 0; i != dest_rows; ++i) {
int32_t dest_pos = i * dest_cols + j;
dst[dest_pos] = sum;
int32_t src_pos = i * dest_cols + j; // src_cols == dest_cols
if (src_pos >= src_num_elems) break;
sum += src[src_pos];
}
}
} else {
K2_CHECK_EQ(axis, 1);
int32_t src_cols = dest_cols;
if (dst.size() > src.size()) {
// dst.cols == src.cols + 1
K2_CHECK_EQ((int32_t)src.size(), dest_rows * (dest_cols - 1));
src_cols = dest_cols - 1;
}
for (int32_t i = 0; i != dest_rows; ++i) {
T sum = T(0);
for (auto j = 0; j != dest_cols; ++j) {
int32_t dest_pos = i * dest_cols + j;
dst[dest_pos] = sum;
int32_t src_pos = i * src_cols + j;
if (src_pos >= src_num_elems) break;
sum += src[src_pos];
}
}
}
}
template <typename T>
void CheckExclusiveSumArray2Result(const std::vector<T> &src_data,
Array2<T> &dest, int32_t axis) {
int32_t dest_rows = dest.Dim0();
int32_t dest_cols = dest.Dim1();
// just make test code simple by call `Flatten` even though it's not so
// efficient.
Array1<T> dest_array1 = dest.Flatten();
// copy data from CPU to CPU/GPU
std::vector<T> dest_data(dest_rows * dest_cols);
dest_array1.Context()->CopyDataTo(
dest_array1.Dim() * dest_array1.ElementSize(), dest_array1.Data(),
GetCpuContext(), dest_data.data());
std::vector<T> expected_data(dest_rows * dest_cols);
ComputeExclusiveSumArray2(src_data, dest_rows, dest_cols, &expected_data,
axis);
EXPECT_EQ(dest_data, expected_data);
}
template <typename T>
void TestExclusiveSumArray2(int32_t rows, int32_t cols) {
ContextPtr cpu = GetCpuContext(); // will be used to copy data
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
{
// axis == 0 && &dest == &src, ElementStride0 == cols
int32_t axis = 0;
int32_t num_elems = rows * cols;
std::vector<T> data(num_elems);
std::iota(data.begin(), data.end(), 0);
Array1<T> src_array1(context, data);
Array2<T> src(src_array1, rows, cols);
ExclusiveSum(src, &src, axis);
CheckExclusiveSumArray2Result(data, src, axis);
}
{
// axis == 0 && &dest == &src, ElementStride0 > cols
int32_t axis = 0;
int32_t stride0 = RandInt(cols + 1, cols + 10);
int32_t num_elems = rows * cols;
std::vector<T> data(num_elems);
std::iota(data.begin(), data.end(), 0);
Array1<T> data_array(context, data);
const T *src_data = data_array.Data();
// allocate extra memory as there stride0 > 0
int32_t num_bytes = rows * stride0 * sizeof(T);
RegionPtr region = NewRegion(context, num_bytes);
T *region_data = region->GetData<T>();
K2_EVAL2(
context, rows, cols, lambda_set_elems, (int32_t i, int32_t j)->void {
region_data[i * stride0 + j] = src_data[i * cols + j];
});
Array2<T> src(rows, cols, stride0, 0, region);
ExclusiveSum(src, &src, axis);
CheckExclusiveSumArray2Result(data, src, axis);
}
{
// axis == 0 && dest.Dim0() == src.Dim0(), ElementStride0 == cols
int32_t axis = 0;
int32_t num_elems = rows * cols;
std::vector<T> data(num_elems);
std::iota(data.begin(), data.end(), 0);
Array1<T> src_array1(context, data);
Array2<T> src(src_array1, rows, cols);
Array2<T> dest(context, rows, cols);
ExclusiveSum(src, &dest, axis);
CheckExclusiveSumArray2Result(data, dest, axis);
}
{
// axis == 0 && dest.Dim0() == src.Dim0(), ElementStride0 > cols
int32_t axis = 0;
int32_t stride0 = RandInt(cols + 1, cols + 10);
int32_t num_elems = rows * cols;
std::vector<T> data(num_elems);
std::iota(data.begin(), data.end(), 0);
Array1<T> data_array(context, data);
const T *src_data = data_array.Data();
// allocate extra memory as there stride0 > 0
int32_t num_bytes = rows * stride0 * sizeof(T);
RegionPtr region = NewRegion(context, num_bytes);
T *region_data = region->GetData<T>();
K2_EVAL2(
context, rows, cols, lambda_set_elems, (int32_t i, int32_t j)->void {
region_data[i * stride0 + j] = src_data[i * cols + j];
});
Array2<T> src(rows, cols, stride0, 0, region);
Array2<T> dest(context, rows, cols);
ExclusiveSum(src, &dest, axis);
CheckExclusiveSumArray2Result(data, dest, axis);
}
{
// axis == 0 && dest.Dim0() == src.Dim0() + 1, we need to allocate one
// extra element for src
int32_t axis = 0;
int32_t num_elems = rows * cols;
std::vector<T> data(num_elems);
std::iota(data.begin(), data.end(), 0);
Array1<T> data_array(context, data);
const T *src_data = data_array.Data();
int32_t num_bytes = (rows * cols + 1) * sizeof(T);
RegionPtr region = NewRegion(context, num_bytes);
T *region_data = region->GetData<T>();
K2_EVAL2(
context, rows, cols, lambda_set_elems, (int32_t i, int32_t j)->void {
region_data[i * cols + j] = src_data[i * cols + j];
});
Array2<T> src(rows, cols, cols, 0, region);
{
// dest.stride0 == dest.cols
Array2<T> dest(context, rows + 1, cols);
ExclusiveSum(src, &dest, axis);
CheckExclusiveSumArray2Result(data, dest, axis);
}
{
// dest.stride0 > dest.cols
int32_t dest_stride0 = cols + 5;
int32_t dest_rows = rows + 1;
RegionPtr dest_region =
NewRegion(context, dest_rows * dest_stride0 * sizeof(T));
Array2<T> dest(dest_rows, cols, dest_stride0, 0, dest_region);
ExclusiveSum(src, &dest, axis);
CheckExclusiveSumArray2Result(data, dest, axis);
}
}
{
// axis == 1 && &dest == &src, ElementStride0 == cols
int32_t axis = 1;
int32_t num_elems = rows * cols;
std::vector<T> data(num_elems);
std::iota(data.begin(), data.end(), 0);
Array1<T> src_array1(context, data);
Array2<T> src(src_array1, rows, cols);
ExclusiveSum(src, &src, axis);
CheckExclusiveSumArray2Result(data, src, axis);
}
{
// axis == 1 && &dest == &src, ElementStride0 > cols
int32_t axis = 1;
int32_t stride0 = RandInt(cols + 1, cols + 10);
int32_t num_elems = rows * cols;
std::vector<T> data(num_elems);
std::iota(data.begin(), data.end(), 0);
Array1<T> data_array(context, data);
const T *src_data = data_array.Data();
// allocate extra memory as there stride0 > 0
int32_t num_bytes = rows * stride0 * sizeof(T);
RegionPtr region = NewRegion(context, num_bytes);
T *region_data = region->GetData<T>();
K2_EVAL2(
context, rows, cols, lambda_set_elems, (int32_t i, int32_t j)->void {
region_data[i * stride0 + j] = src_data[i * cols + j];
});
Array2<T> src(rows, cols, stride0, 0, region);
ExclusiveSum(src, &src, axis);
CheckExclusiveSumArray2Result(data, src, axis);
}
{
// axis == 1 && dest.Dim1() == src.Dim1(), ElementStride0 == cols
int32_t axis = 1;
int32_t num_elems = rows * cols;
std::vector<T> data(num_elems);
std::iota(data.begin(), data.end(), 0);
Array1<T> src_array1(context, data);
Array2<T> src(src_array1, rows, cols);
Array2<T> dest(context, rows, cols);
ExclusiveSum(src, &dest, axis);
CheckExclusiveSumArray2Result(data, dest, axis);
}
{
// axis == 1 && dest.Dim1() == src.Dim1() + 1, we need to allocate one
// extra element for src
int32_t axis = 1;
int32_t num_elems = rows * cols;
std::vector<T> data(num_elems);
std::iota(data.begin(), data.end(), 0);
Array1<T> data_array(context, data);
const T *src_data = data_array.Data();
int32_t num_bytes = (rows * cols + 1) * sizeof(T);
RegionPtr region = NewRegion(context, num_bytes);
T *region_data = region->GetData<T>();
K2_EVAL2(
context, rows, cols, lambda_set_elems, (int32_t i, int32_t j)->void {
region_data[i * cols + j] = src_data[i * cols + j];
});
Array2<T> src(rows, cols, cols, 0, region);
{
// dest.stride0 == dest.cols
Array2<T> dest(context, rows, cols + 1);
ExclusiveSum(src, &dest, axis);
CheckExclusiveSumArray2Result(data, dest, axis);
}
{
// dest.stride0 > dest.cols
int32_t dest_stride0 = cols + 5;
int32_t dest_cols = cols + 1;
RegionPtr dest_region =
NewRegion(context, rows * dest_stride0 * sizeof(T));
Array2<T> dest(rows, dest_cols, dest_stride0, 0, dest_region);
ExclusiveSum(src, &dest, axis);
CheckExclusiveSumArray2Result(data, dest, axis);
}
}
}
}
TEST(OpsTest, ExclusiveSumArray2Test) {
int32_t rows = RandInt(500, 1000);
int32_t cols = RandInt(500, 1000);
TestExclusiveSumArray2<int32_t>(rows, cols);
}
template <typename T>
void TestArrayMaxAndOr() {
ContextPtr cpu = GetCpuContext(); // will be used to copy data
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
{
// Max
const std::vector<T> values = {1, 3, 2, 8, 0, -1};
Array1<T> src(context, values);
Array1<T> dst(context, 1);
T default_value = 0;
Max(src, default_value, &dst);
EXPECT_EQ(dst[0], 8);
}
{
// Max, dst is one of element of src
const std::vector<T> values = {1, 3, 2, 8, 0, -1};
Array1<T> src(context, values);
Array1<T> dst = src.Range(2, 1);
T default_value = 0;
Max(src, default_value, &dst);
EXPECT_EQ(dst[0], 8);
// src has been changed as well
EXPECT_EQ(src[2], 8);
// other values are not changed
src = src.To(cpu);
std::vector<T> cpu_data(src.Data(), src.Data() + src.Dim());
const std::vector<T> expected_data = {1, 3, 8, 8, 0, -1};
EXPECT_EQ(cpu_data, expected_data);
}
{
// Max, with random large size
int32_t num_elems = RandInt(1000, 10000);
std::vector<T> data(num_elems);
std::iota(data.begin(), data.end(), num_elems);
// random set a value to `max_value`
int32_t pos = RandInt(0, num_elems - 1);
T max_value = static_cast<T>(num_elems * 2);
data[pos] = max_value;
Array1<T> src(context, data);
Array1<T> dst(context, 1);
T default_value = 0;
Max(src, default_value, &dst);
EXPECT_EQ(dst[0], max_value);
}
{
// And
const std::vector<T> values = {3, 6, 11};
Array1<T> src(context, values);
Array1<T> dst(context, 1);
T default_value = -1;
And(src, default_value, &dst);
EXPECT_EQ(dst[0], 2);
}
{
// Or
const std::vector<T> values = {3, 6, 4};
Array1<T> src(context, values);
Array1<T> dst(context, 1);
T default_value = 0;
Or(src, default_value, &dst);
EXPECT_EQ(dst[0], 7);
}
}
}
TEST(OpsTest, ArrayMaxAndOrTest) { TestArrayMaxAndOr<int32_t>(); }
template <typename T>
void TestCat() {
ContextPtr cpu = GetCpuContext(); // will be used to copy data
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
{
// a case with small size
std::vector<T> data1 = {3, 1, 2};
std::vector<T> data2 = {5, 6, 7, 8};
std::vector<T> data3 = {}; // empty
std::vector<T> data4 = {9};
std::vector<T> expected_data = {3, 1, 2, 5, 6, 7, 8, 9};
Array1<T> array1(context, data1);
Array1<T> array2(context, data2);
Array1<T> array3(context, data3);
Array1<T> array4(context, data4);
{
// test Cat(int32_t, Array1<T>**)
std::vector<const Array1<T> *> arrays = {&array1, &array2, &array3,
&array4};
const Array1<T> **src = arrays.data();
Array1<T> dst = Cat(context, 4, src);
EXPECT_EQ(dst.Dim(), 8);
// copy memory from GPU/CPU to CPU
std::vector<T> cpu_data(dst.Dim());
dst.Context()->CopyDataTo(dst.Dim() * dst.ElementSize(), dst.Data(),
cpu, cpu_data.data());
EXPECT_EQ(cpu_data, expected_data);
}
{
// test Cat(int32_t, Array1<T>*)
std::vector<Array1<T>> arrays = {array1, array2, array3, array4};
const Array1<T> *src = arrays.data();
Array1<T> dst = Cat(context, 4, src);
EXPECT_EQ(dst.Dim(), 8);
// copy memory from GPU/CPU to CPU
std::vector<T> cpu_data(dst.Dim());
dst.Context()->CopyDataTo(dst.Dim() * dst.ElementSize(), dst.Data(),
cpu, cpu_data.data());
EXPECT_EQ(cpu_data, expected_data);
}
}
{
// test with random large size, the arrays' sizes are fairly balanced.
for (int32_t i = 0; i != 2; ++i) {
int32_t num_array = RandInt(10, 1000);
std::vector<Array1<T>> arrays_vec(num_array);
std::vector<const Array1<T> *> arrays(num_array);
int32_t total_size = 0;
for (int32_t j = 0; j != num_array; ++j) {
int32_t curr_array_size = RandInt(0, 10000);
std::vector<T> data(curr_array_size);
std::iota(data.begin(), data.end(), total_size);
total_size += curr_array_size;
arrays_vec[j] = Array1<T>(context, data);
arrays[j] = &arrays_vec[j];
}
const Array1<T> **src = arrays.data();
Array1<T> dst = Cat(context, num_array, src);
EXPECT_EQ(dst.Dim(), total_size);
// copy memory from GPU/CPU to CPU
std::vector<T> cpu_data(dst.Dim());
dst.Context()->CopyDataTo(dst.Dim() * dst.ElementSize(), dst.Data(),
cpu, cpu_data.data());
std::vector<T> expected_data(dst.Dim());
std::iota(expected_data.begin(), expected_data.end(), 0);
EXPECT_EQ(cpu_data, expected_data);
}
}
{
// test with random large size: the arrays' sizes are not balanced.
for (int32_t i = 0; i != 2; ++i) {
int32_t num_array = RandInt(10, 1000);
std::vector<Array1<T>> arrays_vec(num_array);
std::vector<const Array1<T> *> arrays(num_array);
int32_t total_size = 0, max_size = 0;
// notice `j != num_array - 1`, we would push a very long array
// after the loop
for (int32_t j = 0; j != num_array - 1; ++j) {
int32_t curr_array_size = RandInt(0, 10000);
std::vector<T> data(curr_array_size);
std::iota(data.begin(), data.end(), total_size);
total_size += curr_array_size;
arrays_vec[j] = Array1<T>(context, data);
arrays[j] = &arrays_vec[j];
if (curr_array_size > max_size) max_size = curr_array_size;
}
// generate an array with very large size
{
int32_t average_size = total_size / num_array;
int32_t long_size = average_size * 10;
std::vector<T> data(long_size);
std::iota(data.begin(), data.end(), total_size);
total_size += long_size;
arrays_vec[num_array - 1] = Array1<T>(context, data);
arrays[num_array - 1] = &arrays_vec[num_array - 1];
}
const Array1<T> **src = arrays.data();
Array1<T> dst = Cat(context, num_array, src);
EXPECT_EQ(dst.Dim(), total_size);
// copy memory from GPU/CPU to CPU
std::vector<T> cpu_data(dst.Dim());
dst.Context()->CopyDataTo(dst.Dim() * dst.ElementSize(), dst.Data(),
cpu, cpu_data.data());
std::vector<T> expected_data(dst.Dim());
std::iota(expected_data.begin(), expected_data.end(), 0);
EXPECT_EQ(cpu_data, expected_data);
}
}
}
}
TEST(OpsTest, CatTest) {
TestCat<int32_t>();
TestCat<float>();
}
TEST(OpsTest, CatWithOffsets) {
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
{
// a case with small size
std::vector<int32_t> data1 = {3, 1, 2};
std::vector<int32_t> data2 = {5, 6, 7, 8};
std::vector<int32_t> data3 = {}; // empty
std::vector<int32_t> data4 = {9};
std::vector<int32_t> data_offsets = {1, 2, 3, 4};
std::vector<int32_t> expected_data = {4, 2, 3, 7, 8, 9, 10, 13};
Array1<int32_t> array1(context, data1);
Array1<int32_t> array2(context, data2);
Array1<int32_t> array3(context, data3);
Array1<int32_t> array4(context, data4);
Array1<int32_t> offsets(context, data_offsets);
std::vector<const Array1<int32_t> *> arrays = {&array1, &array2, &array3,
&array4};
const Array1<int32_t> **src = arrays.data();
Array1<int32_t> dst = CatWithOffsets(offsets, src);
CheckArrayData(dst, expected_data);
}
{
// test with random large size
for (int32_t i = 0; i != 2; ++i) {
int32_t num_array = RandInt(10, 1000);
Array1<int32_t> offsets = RandUniformArray1(GetCpuContext(), num_array,
-num_array, num_array);
const int32_t *offsets_data = offsets.Data();
std::vector<int32_t> expected_data;
std::vector<Array1<int32_t>> arrays_vec(num_array);
std::vector<const Array1<int32_t> *> arrays(num_array);
int32_t total_size = 0;
for (int32_t j = 0; j != num_array; ++j) {
int32_t curr_array_size = RandInt(0, 10000);
std::vector<int32_t> data(curr_array_size);
std::iota(data.begin(), data.end(), total_size);
arrays_vec[j] = Array1<int32_t>(context, data);
arrays[j] = &arrays_vec[j];
std::vector<int32_t> curr_expected_data(curr_array_size);
std::iota(curr_expected_data.begin(), curr_expected_data.end(),
total_size + offsets_data[j]);
expected_data.insert(expected_data.end(), curr_expected_data.begin(),
curr_expected_data.end());
total_size += curr_array_size;
}
const Array1<int32_t> **src = arrays.data();
offsets = offsets.To(context);
Array1<int32_t> dst = CatWithOffsets(offsets, src);
CheckArrayData(dst, expected_data);
}
}
}
}
TEST(OpsTest, SpliceRowSplitsTest) {
ContextPtr cpu = GetCpuContext(); // will be used to copy data
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
{
// a case with small size
std::vector<int32_t> data1 = {0, 2, 5};
std::vector<int32_t> data2 = {0, 2, 2, 3};
std::vector<int32_t> data3 = {0};
std::vector<int32_t> data4 = {0, 3};
std::vector<int32_t> expected_data = {0, 2, 5, 7, 7, 8, 11};
Array1<int32_t> array1(context, data1);
Array1<int32_t> array2(context, data2);
Array1<int32_t> array3(context, data3);
Array1<int32_t> array4(context, data4);
std::vector<const Array1<int32_t> *> arrays = {&array1, &array2, &array3,
&array4};
const Array1<int32_t> **src = arrays.data();
Array1<int32_t> dst = SpliceRowSplits(4, src);
EXPECT_EQ(dst.Dim(), expected_data.size());
// copy memory from GPU/CPU to CPU
dst = dst.To(cpu);
std::vector<int32_t> cpu_data(dst.Data(), dst.Data() + dst.Dim());
EXPECT_EQ(cpu_data, expected_data);
}
{
// test with random large size, the arrays' sizes are fairly balanced.
for (int32_t i = 0; i != 2; ++i) {
int32_t num_array = RandInt(10, 1000);
std::vector<Array1<int32_t>> arrays_vec(num_array);
std::vector<const Array1<int32_t> *> arrays(num_array);
std::vector<int32_t> expected_data;
int32_t data_offset = 0;
for (int32_t j = 0; j != num_array; ++j) {
int32_t curr_array_size = RandInt(0, 10000);
RaggedShape shape =
RandomRaggedShape(false, 2, 2, curr_array_size, curr_array_size);
ASSERT_EQ(shape.NumAxes(), 2);
Array1<int32_t> cpu_row_splits = shape.RowSplits(1).To(cpu);
int32_t num_splits = cpu_row_splits.Dim();
ASSERT_GE(num_splits, 1);
const int32_t *splits_data = cpu_row_splits.Data();
for (int32_t n = 0; n < num_splits; ++n) {
expected_data.push_back(splits_data[n] + data_offset);
}
if (j + 1 < num_array) expected_data.pop_back();
data_offset += splits_data[num_splits - 1];
Array1<int32_t> row_splits = shape.RowSplits(1).To(context);
ASSERT_GE(row_splits.Dim(), 1);
arrays_vec[j] = row_splits;
arrays[j] = &arrays_vec[j];
}
const Array1<int32_t> **src = arrays.data();
Array1<int32_t> dst = SpliceRowSplits(num_array, src);
EXPECT_EQ(dst.Dim(), expected_data.size());
// copy memory from GPU/CPU to CPU
dst = dst.To(cpu);
std::vector<int32_t> cpu_data(dst.Data(), dst.Data() + dst.Dim());
EXPECT_EQ(cpu_data, expected_data);
}
}
{
// test with random large size: the arrays' sizes are not balanced.
for (int32_t i = 0; i != 2; ++i) {
int32_t num_array = RandInt(10, 1000);
std::vector<Array1<int32_t>> arrays_vec(num_array);
std::vector<const Array1<int32_t> *> arrays(num_array);
std::vector<int32_t> expected_data;
int32_t data_offset = 0;
int32_t max_size = 0;
for (int32_t j = 0; j != num_array - 1; ++j) {
int32_t curr_array_size = RandInt(0, 10000);
RaggedShape shape =
RandomRaggedShape(false, 2, 2, curr_array_size, curr_array_size);
ASSERT_EQ(shape.NumAxes(), 2);
Array1<int32_t> cpu_row_splits = shape.RowSplits(1).To(cpu);
int32_t num_splits = cpu_row_splits.Dim();
ASSERT_GE(num_splits, 1);
const int32_t *splits_data = cpu_row_splits.Data();
for (int32_t n = 0; n < num_splits; ++n) {
expected_data.push_back(splits_data[n] + data_offset);
}
expected_data.pop_back();
data_offset += splits_data[num_splits - 1];
Array1<int32_t> row_splits = shape.RowSplits(1).To(context);
ASSERT_GE(row_splits.Dim(), 1);
arrays_vec[j] = row_splits;
arrays[j] = &arrays_vec[j];
if (num_splits > max_size) max_size = num_splits;
}
// generate an array with very large size
{
int32_t total_size = static_cast<int32_t>(expected_data.size());
int32_t average_size = total_size / num_array;
int32_t long_size = average_size * 10;
RaggedShape shape =
RandomRaggedShape(false, 2, 2, long_size, long_size);
ASSERT_EQ(shape.NumAxes(), 2);
Array1<int32_t> cpu_row_splits = shape.RowSplits(1).To(cpu);
int32_t num_splits = cpu_row_splits.Dim();
ASSERT_GE(num_splits, 1);
const int32_t *splits_data = cpu_row_splits.Data();
for (int32_t n = 0; n < num_splits; ++n) {
expected_data.push_back(splits_data[n] + data_offset);
}
Array1<int32_t> row_splits = shape.RowSplits(1).To(context);
ASSERT_GE(row_splits.Dim(), 1);
arrays_vec[num_array - 1] = row_splits;
arrays[num_array - 1] = &arrays_vec[num_array - 1];
}
const Array1<int32_t> **src = arrays.data();
Array1<int32_t> dst = SpliceRowSplits(num_array, src);
EXPECT_EQ(dst.Dim(), expected_data.size());
// copy memory from GPU/CPU to CPU
dst = dst.To(cpu);
std::vector<int32_t> cpu_data(dst.Data(), dst.Data() + dst.Dim());
EXPECT_EQ(cpu_data, expected_data);
}
}
}
}
template <typename T>
void TestRangeAndRandomArray1() {
ContextPtr cpu = GetCpuContext(); // will be used to copy data
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
{
// test Range with small size
Array1<T> result = Range<T>(context, 6, 3, 2);
const std::vector<T> values = {3, 5, 7, 9, 11, 13};
result = result.To(cpu);
std::vector<T> cpu_data(result.Data(), result.Data() + result.Dim());
EXPECT_EQ(cpu_data, values);
}
{
// test Range with random large size
int32_t num_elems = RandInt(1000, 10000);
std::vector<T> data(num_elems);
std::iota(data.begin(), data.end(), 0);
Array1<T> result = Range<T>(context, num_elems, 0);
result = result.To(cpu);
std::vector<T> cpu_data(result.Data(), result.Data() + result.Dim());
EXPECT_EQ(cpu_data, data);
}
{
// test RandUniformArray1
Array1<T> result = RandUniformArray1<T>(context, 1000, 0, 10000);
result = result.To(cpu);
}
}
}
TEST(OpsTest, RangeTest) {
TestRangeAndRandomArray1<int32_t>();
TestRangeAndRandomArray1<float>();
TestRangeAndRandomArray1<double>();
}
TEST(OpsTest, ValidateRowSplitsAndIdsTest) {
ContextPtr cpu = GetCpuContext(); // will be used to copy data
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
{
// test RowSplitsToRowIds and RowIdsToRowSplits
const std::vector<int32_t> row_splits_vec = {0, 2, 3, 5, 8, 9,
12, 13, 15, 15, 16};
const std::vector<int32_t> row_ids_vec = {0, 0, 1, 2, 2, 3, 3, 3,
4, 5, 5, 5, 6, 7, 7, 9};
{
Array1<int32_t> row_splits(context, row_splits_vec);
Array1<int32_t> row_ids(context, row_ids_vec.size());
RowSplitsToRowIds(row_splits, &row_ids);
row_ids = row_ids.To(cpu);
std::vector<int32_t> cpu_data(row_ids.Data(),
row_ids.Data() + row_ids.Dim());
EXPECT_EQ(cpu_data, row_ids_vec);
}
{
Array1<int32_t> row_ids(context, row_ids_vec);
Array1<int32_t> row_splits(context, row_splits_vec.size());
RowIdsToRowSplits(row_ids, &row_splits);
row_splits = row_splits.To(cpu);
std::vector<int32_t> cpu_data(row_splits.Data(),
row_splits.Data() + row_splits.Dim());
EXPECT_EQ(cpu_data, row_splits_vec);
}
}
{
// empty case for row splits and row ids
const std::vector<int32_t> row_splits_vec;
const std::vector<int32_t> row_ids_vec;
Array1<int32_t> row_ids(context, row_ids_vec);
Array1<int32_t> row_splits(context, row_splits_vec);
EXPECT_FALSE(ValidateRowSplits(row_splits));
EXPECT_TRUE(ValidateRowIds(row_ids));
EXPECT_FALSE(ValidateRowSplitsAndIds(row_splits, row_ids));
}
{
// valid case for row splits and row ids
const std::vector<int32_t> row_splits_vec = {0, 2, 3, 5, 8, 9,
12, 13, 15, 15, 16};
const std::vector<int32_t> row_ids_vec = {0, 0, 1, 2, 2, 3, 3, 3,
4, 5, 5, 5, 6, 7, 7, 9};
Array1<int32_t> row_ids(context, row_ids_vec);
Array1<int32_t> row_splits(context, row_splits_vec);
EXPECT_TRUE(ValidateRowSplits(row_splits));
EXPECT_TRUE(ValidateRowIds(row_ids));
EXPECT_TRUE(ValidateRowSplitsAndIds(row_splits, row_ids));
}
{
// valid case for row splits and row ids with random size
for (int32_t i = 0; i != 5; ++i) {
RaggedShape shape = RandomRaggedShape(true, 2, 2, 2000, 10000);
ASSERT_EQ(shape.NumAxes(), 2);
// note shape is on CPU
Array1<int32_t> row_splits = shape.RowSplits(1).To(context);
Array1<int32_t> row_ids = shape.RowIds(1).To(context);
EXPECT_TRUE(ValidateRowSplits(row_splits));
EXPECT_TRUE(ValidateRowIds(row_ids));
EXPECT_TRUE(ValidateRowSplitsAndIds(row_splits, row_ids));
}
}
{
// provided tmp storage
const std::vector<int32_t> row_splits_vec = {0, 2, 3, 5, 8, 9,
12, 13, 15, 15, 16};
const std::vector<int32_t> row_ids_vec = {0, 0, 1, 2, 2, 3, 3, 3,
4, 5, 5, 5, 6, 7, 7, 9};
Array1<int32_t> row_ids(context, row_ids_vec);
Array1<int32_t> row_splits(context, row_splits_vec);
{
Array1<int32_t> tmp(context, 3, 2);
EXPECT_TRUE(ValidateRowSplits(row_splits, &tmp));
// check elments
tmp = tmp.To(cpu);
std::vector<int32_t> cpu_data(tmp.Data(), tmp.Data() + tmp.Dim());
EXPECT_THAT(cpu_data, ::testing::ElementsAre(0, 2, 2));
}
{
Array1<int32_t> tmp(context, 3, 2);
EXPECT_TRUE(ValidateRowIds(row_ids, &tmp));
// check elments
tmp = tmp.To(cpu);
std::vector<int32_t> cpu_data(tmp.Data(), tmp.Data() + tmp.Dim());
EXPECT_THAT(cpu_data, ::testing::ElementsAre(0, 2, 2));
}
{
Array1<int32_t> tmp(context, 3, 2);
EXPECT_TRUE(ValidateRowSplitsAndIds(row_splits, row_ids, &tmp));
// check elments
tmp = tmp.To(cpu);
std::vector<int32_t> cpu_data(tmp.Data(), tmp.Data() + tmp.Dim());
EXPECT_THAT(cpu_data, ::testing::ElementsAre(0, 2, 2));
}
}
{
// bad case for row splits, not starts with 0
const std::vector<int32_t> row_splits_vec = {1, 2, 3, 5, 8, 9,
12, 13, 15, 15, 16};
const std::vector<int32_t> row_ids_vec = {0, 0, 1, 2, 2, 3, 3, 3,
4, 5, 5, 5, 6, 7, 7, 9};
Array1<int32_t> row_ids(context, row_ids_vec);
Array1<int32_t> row_splits(context, row_splits_vec);
EXPECT_FALSE(ValidateRowSplits(row_splits));
EXPECT_TRUE(ValidateRowIds(row_ids));
EXPECT_FALSE(ValidateRowSplitsAndIds(row_splits, row_ids));
}
{
// bad case for row splits, contains negative value
const std::vector<int32_t> row_splits_vec = {0, 2, 3, -5, 8, 9,
12, 13, 15, 15, 16};
const std::vector<int32_t> row_ids_vec = {0, 0, 1, 2, 2, 3, 3, 3,
4, 5, 5, 5, 6, 7, 7, 9};
Array1<int32_t> row_ids(context, row_ids_vec);
Array1<int32_t> row_splits(context, row_splits_vec);
EXPECT_FALSE(ValidateRowSplits(row_splits));
EXPECT_TRUE(ValidateRowIds(row_ids));
EXPECT_FALSE(ValidateRowSplitsAndIds(row_splits, row_ids));
}
{
// bad case for row splits, not non-decreasing
const std::vector<int32_t> row_splits_vec = {0, 2, 3, 1, 8, 9,
12, 13, 15, 15, 16};
const std::vector<int32_t> row_ids_vec = {0, 0, 1, 2, 2, 3, 3, 3,
4, 5, 5, 5, 6, 7, 7, 9};
Array1<int32_t> row_ids(context, row_ids_vec);
Array1<int32_t> row_splits(context, row_splits_vec);
EXPECT_FALSE(ValidateRowSplits(row_splits));
EXPECT_TRUE(ValidateRowIds(row_ids));
EXPECT_FALSE(ValidateRowSplitsAndIds(row_splits, row_ids));
}
{
// bad case row ids, contains negative value
const std::vector<int32_t> row_splits_vec = {0, 2, 3, 5, 8, 9,
12, 13, 15, 15, 16};
const std::vector<int32_t> row_ids_vec = {0, 0, 1, 2, -2, 3, 3, 3,
4, 5, 5, 5, 6, 7, 7, 9};
Array1<int32_t> row_ids(context, row_ids_vec);
Array1<int32_t> row_splits(context, row_splits_vec);
EXPECT_TRUE(ValidateRowSplits(row_splits));
EXPECT_FALSE(ValidateRowIds(row_ids));
EXPECT_FALSE(ValidateRowSplitsAndIds(row_splits, row_ids));
}
{
// bad case row ids, not non-decreasing
const std::vector<int32_t> row_splits_vec = {0, 2, 3, 5, 8, 9,
12, 13, 15, 15, 16};
const std::vector<int32_t> row_ids_vec = {0, 0, 1, 2, 2, 3, 3, 3,
4, 5, 5, 5, 6, 7, 6, 9};
Array1<int32_t> row_ids(context, row_ids_vec);
Array1<int32_t> row_splits(context, row_splits_vec);
EXPECT_TRUE(ValidateRowSplits(row_splits));
EXPECT_FALSE(ValidateRowIds(row_ids));
EXPECT_FALSE(ValidateRowSplitsAndIds(row_splits, row_ids));
}
{
// bad case row ids and row splits don't agree with each other
// i < row_splits[row_ids[i]]
const std::vector<int32_t> row_splits_vec = {0, 2, 3, 5, 8, 9,
12, 13, 15, 15, 16};
const std::vector<int32_t> row_ids_vec = {0, 0, 1, 2, 2, 3, 3, 3,
4, 5, 5, 5, 6, 7, 8, 9};
Array1<int32_t> row_ids(context, row_ids_vec);
Array1<int32_t> row_splits(context, row_splits_vec);
EXPECT_TRUE(ValidateRowSplits(row_splits));
EXPECT_TRUE(ValidateRowIds(row_ids));
EXPECT_FALSE(ValidateRowSplitsAndIds(row_splits, row_ids));
}
{
// another bad case that row ids and row splits don't agree with each
// other i > = row_splits[row_ids[i]]
const std::vector<int32_t> row_splits_vec = {0, 2, 3, 5, 8, 9,
12, 13, 15, 15, 16};
const std::vector<int32_t> row_ids_vec = {0, 0, 1, 2, 2, 3, 3, 3,
4, 5, 5, 5, 5, 7, 7, 9};
Array1<int32_t> row_ids(context, row_ids_vec);
Array1<int32_t> row_splits(context, row_splits_vec);
EXPECT_TRUE(ValidateRowSplits(row_splits));
EXPECT_TRUE(ValidateRowIds(row_ids));
EXPECT_FALSE(ValidateRowSplitsAndIds(row_splits, row_ids));
}
{
// bad case for row ids, num_elems != row_splits[-1]
const std::vector<int32_t> row_splits_vec = {0, 2, 3, 5, 8, 9,
12, 13, 15, 15, 16};
const std::vector<int32_t> row_ids_vec = {0, 0, 1, 2, 2, 3, 3, 3,
4, 5, 5, 5, 6, 7, 7};
Array1<int32_t> row_ids(context, row_ids_vec);
Array1<int32_t> row_splits(context, row_splits_vec);
EXPECT_TRUE(ValidateRowSplits(row_splits));
EXPECT_TRUE(ValidateRowIds(row_ids));
EXPECT_FALSE(ValidateRowSplitsAndIds(row_splits, row_ids));
}
}
}
TEST(OpsTest, GetCountsTest) {
ContextPtr cpu = GetCpuContext(); // will be used to copy data
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
{
// empty case
int32_t n = 0;
std::vector<int32_t> values;
Array1<int32_t> src(context, values);
Array1<int32_t> ans = GetCounts(src, n);
EXPECT_EQ(ans.Dim(), 0);
}
{
// simple case
int32_t n = 8;
std::vector<int32_t> values = {0, 1, 2, 1, 5, 5, 7, 6, 3, 2};
std::vector<int32_t> expected_data = {1, 2, 2, 1, 0, 2, 1, 1};
Array1<int32_t> src(context, values);
Array1<int32_t> ans = GetCounts(src, n);
ans = ans.To(cpu);
std::vector<int32_t> data(ans.Data(), ans.Data() + ans.Dim());
EXPECT_EQ(data, expected_data);
}
{
// random large case
for (int32_t i = 0; i != 2; ++i) {
int32_t n = RandInt(1, 10000);
int32_t src_dim = RandInt(0, 10000);
Array1<int32_t> src = RandUniformArray1(context, src_dim, 0, n - 1);
Array1<int32_t> ans = GetCounts(src, n);
ans = ans.To(cpu);
std::vector<int32_t> data(ans.Data(), ans.Data() + ans.Dim());
src = src.To(cpu);
int32_t *src_data = src.Data();
std::vector<int32_t> expected_data(n, 0);
for (int32_t j = 0; j < src.Dim(); ++j) {
++expected_data[src_data[j]];
}
EXPECT_EQ(data, expected_data);
}
}
}
}
template <typename S, typename T>
void TestMonotonicLowerBound() {
ContextPtr cpu = GetCpuContext(); // will be used to copy data
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
{
// empty case
std::vector<S> values;
Array1<S> src(context, values);
Array1<T> dest(context, 0);
MonotonicLowerBound(src, &dest);
EXPECT_EQ(dest.Dim(), 0);
}
{
// simple case
std::vector<S> values = {2, 1, 3, 7, 5, 8, 20, 15};
std::vector<T> expected_data = {1, 1, 3, 5, 5, 8, 15, 15};
ASSERT_EQ(values.size(), expected_data.size());
Array1<S> src(context, values);
Array1<T> dest(context, static_cast<int32_t>(values.size()));
MonotonicLowerBound(src, &dest);
dest = dest.To(cpu);
std::vector<T> data(dest.Data(), dest.Data() + dest.Dim());
EXPECT_EQ(data, expected_data);
}
{
// simple case with dest = &src
std::vector<S> values = {2, 1, 3, 7, 5, 8, 20, 15};
std::vector<T> expected_data = {1, 1, 3, 5, 5, 8, 15, 15};
ASSERT_EQ(values.size(), expected_data.size());
Array1<S> src(context, values);
MonotonicLowerBound(src, &src);
src = src.To(cpu);
std::vector<T> data(src.Data(), src.Data() + src.Dim());
EXPECT_EQ(data, expected_data);
}
{
// random large case
for (int32_t i = 0; i != 2; ++i) {
int32_t n = RandInt(1, 10000);
int32_t src_dim = RandInt(0, 10000);
Array1<S> src = RandUniformArray1(context, src_dim, 0, n - 1);
Array1<T> dest(context, src_dim);
MonotonicLowerBound(src, &dest);
dest = dest.To(cpu);
std::vector<T> data(dest.Data(), dest.Data() + dest.Dim());
src = src.To(cpu);
int32_t *src_data = src.Data();
S min_value = std::numeric_limits<S>::max();
std::vector<T> expected_data(src_dim);
for (int32_t i = src_dim - 1; i >= 0; --i) {
min_value = std::min(src_data[i], min_value);
expected_data[i] = min_value;
}
EXPECT_EQ(data, expected_data);
}
}
}
}
TEST(OpsTest, MonotonicLowerBoundTest) {
TestMonotonicLowerBound<int32_t, int32_t>();
TestMonotonicLowerBound<int32_t, double>();
}
template <typename S, typename T>
void TestMonotonicDecreasingUpperBound() {
ContextPtr cpu = GetCpuContext(); // will be used to copy data
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
{
// empty case
std::vector<S> values;
Array1<S> src(context, values);
Array1<T> dest(context, 0);
MonotonicDecreasingUpperBound(src, &dest);
EXPECT_EQ(dest.Dim(), 0);
}
{
// simple case
std::vector<S> values = {10, 7, 3, 5, 4, 1, 0, 2};
std::vector<T> expected_data = {10, 7, 5, 5, 4, 2, 2, 2};
ASSERT_EQ(values.size(), expected_data.size());
Array1<S> src(context, values);
Array1<T> dest(context, static_cast<int32_t>(values.size()));
MonotonicDecreasingUpperBound(src, &dest);
dest = dest.To(cpu);
std::vector<T> data(dest.Data(), dest.Data() + dest.Dim());
EXPECT_EQ(data, expected_data);
}
{
// simple case with dest = &src
std::vector<S> values = {10, 7, 3, 5, 4, 1, 0, 2};
std::vector<T> expected_data = {10, 7, 5, 5, 4, 2, 2, 2};
ASSERT_EQ(values.size(), expected_data.size());
Array1<S> src(context, values);
MonotonicDecreasingUpperBound(src, &src);
src = src.To(cpu);
std::vector<T> data(src.Data(), src.Data() + src.Dim());
EXPECT_EQ(data, expected_data);
}
{
// random large case
for (int32_t i = 0; i != 2; ++i) {
int32_t n = RandInt(1, 10000);
int32_t src_dim = RandInt(0, 10000);
Array1<S> src = RandUniformArray1(context, src_dim, 0, n - 1);
Array1<T> dest(context, src_dim);
MonotonicDecreasingUpperBound(src, &dest);
dest = dest.To(cpu);
std::vector<T> data(dest.Data(), dest.Data() + dest.Dim());
src = src.To(cpu);
int32_t *src_data = src.Data();
S max_value = std::numeric_limits<S>::min();
std::vector<T> expected_data(src_dim);
for (int32_t i = src_dim - 1; i >= 0; --i) {
max_value = std::max(src_data[i], max_value);
expected_data[i] = max_value;
}
EXPECT_EQ(data, expected_data);
}
}
}
}
TEST(OpsTest, MonotonicDecreasingUpperBoundTest) {
TestMonotonicDecreasingUpperBound<int32_t, int32_t>();
TestMonotonicDecreasingUpperBound<int32_t, double>();
}
TEST(OpsTest, InvertMonotonicDecreasingTest) {
ContextPtr cpu = GetCpuContext(); // will be used to copy data
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
{
// empty case
std::vector<int32_t> values;
Array1<int32_t> src(context, values);
Array1<int32_t> dest = InvertMonotonicDecreasing(src);
EXPECT_EQ(dest.Dim(), 0);
}
{
// simple case
std::vector<int32_t> values = {6, 4, 4, 2};
Array1<int32_t> src(context, values);
Array1<int32_t> dest = InvertMonotonicDecreasing(src);
EXPECT_EQ(dest.Dim(), 6);
dest = dest.To(cpu);
std::vector<int32_t> data(dest.Data(), dest.Data() + dest.Dim());
std::vector<int32_t> expected_data = {4, 4, 3, 3, 1, 1};
EXPECT_EQ(data, expected_data);
// convert back
dest = dest.To(context);
Array1<int32_t> src1 = InvertMonotonicDecreasing(dest);
EXPECT_TRUE(Equal(src1, src));
}
{
// random large case
for (int32_t i = 0; i != 2; ++i) {
int32_t n = RandInt(1, 1000);
int32_t src_dim = RandInt(0, 1000);
Array1<int32_t> src = RandUniformArray1(context, src_dim, 1, n);
Sort<int32_t, GreaterThan<int32_t>>(&src);
Array1<int32_t> dest = InvertMonotonicDecreasing(src);
// convert back
Array1<int32_t> src1 = InvertMonotonicDecreasing(dest);
EXPECT_TRUE(Equal(src1, src));
}
}
}
}
template <typename T>
void ArrayPlusTest() {
ContextPtr cpu = GetCpuContext(); // will be used to copy data
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
for (int32_t i = 0; i != 2; ++i) {
{
// normal case
int32_t dim = RandInt(0, 1000);
Array1<T> src1 = RandUniformArray1<T>(context, dim, 0, 1000);
Array1<T> src2 = RandUniformArray1<T>(context, dim, 0, 1000);
Array1<T> dest(context, dim);
Plus(src1, src2, &dest);
Array1<T> ans = Plus(src1, src2);
EXPECT_EQ(ans.Dim(), dim);
src1.To(cpu);
src2.To(cpu);
Array1<T> expected(cpu, dim);
T *expected_data = expected.Data();
for (int32_t n = 0; n != dim; ++n) {
expected_data[n] = src1[n] + src2[n];
}
CheckArrayData(dest, expected);
CheckArrayData(ans, expected);
}
{
// special case: &src1 == &src2 == dest
int32_t dim = RandInt(0, 1000);
Array1<T> src = RandUniformArray1<T>(context, dim, 0, 1000);
Array1<T> src_copy = src.Clone();
Plus(src, src, &src);
src_copy.To(cpu);
Array1<T> expected(cpu, dim);
T *expected_data = expected.Data();
for (int32_t n = 0; n != dim; ++n) {
expected_data[n] = src_copy[n] + src_copy[n];
}
CheckArrayData(src, expected);
}
}
}
}
TEST(OpsTest, PlusTest) {
ArrayPlusTest<int32_t>();
ArrayPlusTest<float>();
}
template <typename T>
void ArrayMinusTest() {
ContextPtr cpu = GetCpuContext(); // will be used to copy data
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
for (int32_t i = 0; i != 2; ++i) {
{
// normal case
int32_t dim = RandInt(0, 1000);
Array1<T> src1 = RandUniformArray1<T>(context, dim, 0, 1000);
Array1<T> src2 = RandUniformArray1<T>(context, dim, 0, 1000);
Array1<T> dest(context, dim);
Minus(src1, src2, &dest);
Array1<T> ans = Minus(src1, src2);
EXPECT_EQ(ans.Dim(), dim);
src1.To(cpu);
src2.To(cpu);
Array1<T> expected(cpu, dim);
T *expected_data = expected.Data();
for (int32_t n = 0; n != dim; ++n) {
expected_data[n] = src1[n] - src2[n];
}
CheckArrayData(dest, expected);
CheckArrayData(ans, expected);
}
{
// special case: &src1 == &src2 == dest
int32_t dim = RandInt(0, 1000);
Array1<T> src = RandUniformArray1<T>(context, dim, 0, 1000);
Minus(src, src, &src);
Array1<T> expected(context, dim, T(0));
CheckArrayData(src, expected);
}
}
}
}
TEST(OpsTest, MinusTest) {
ArrayMinusTest<int32_t>();
ArrayMinusTest<float>();
}
template <typename T>
void ArrayTimesTest() {
ContextPtr cpu = GetCpuContext(); // will be used to copy data
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
for (int32_t i = 0; i != 2; ++i) {
{
// normal case
int32_t dim = RandInt(0, 1000);
Array1<T> src1 = RandUniformArray1<T>(context, dim, 0, 1000);
Array1<T> src2 = RandUniformArray1<T>(context, dim, 0, 1000);
Array1<T> dest(context, dim);
Times(src1, src2, &dest);
Array1<T> ans = Times(src1, src2);
EXPECT_EQ(ans.Dim(), dim);
src1.To(cpu);
src2.To(cpu);
Array1<T> expected(cpu, dim);
T *expected_data = expected.Data();
for (int32_t n = 0; n != dim; ++n) {
expected_data[n] = src1[n] * src2[n];
}
CheckArrayData(dest, expected);
CheckArrayData(ans, expected);
}
{
// special case: &src1 == &src2 == dest
int32_t dim = RandInt(0, 1000);
Array1<T> src = RandUniformArray1<T>(context, dim, 0, 1000);
Array1<T> src_copy = src.Clone();
Times(src, src, &src);
src_copy.To(cpu);
Array1<T> expected(cpu, dim);
T *expected_data = expected.Data();
for (int32_t n = 0; n != dim; ++n) {
expected_data[n] = src_copy[n] * src_copy[n];
}
CheckArrayData(src, expected);
}
}
}
}
TEST(OpsTest, TimesTest) {
ArrayTimesTest<int32_t>();
ArrayTimesTest<float>();
}
TEST(OpsTest, Array1IndexTest) {
for (int loop = 0; loop < 2; loop++) {
ContextPtr c = (loop == 0 ? GetCpuContext() : GetCudaContext()),
cpu_context = GetCpuContext();
int32_t src_dim = RandInt(1, 10), ans_dim = RandInt(1, 10);
using T = int64_t;
Array1<T> src = RandUniformArray1<T>(c, src_dim, 0, 100);
Array1<int32_t> indexes_no_minus_one =
RandUniformArray1<int32_t>(c, ans_dim, 0, src_dim - 1),
indexes_minus_one =
RandUniformArray1<int32_t>(c, ans_dim, -1, src_dim - 1);
T default_value = loop - 1;
Array1<T> ans_no_minus_one =
Index(src, indexes_no_minus_one, false, default_value),
ans_no_minus_one_check = src[indexes_no_minus_one],
ans_no_minus_one_check2 =
Index(src, indexes_no_minus_one, true, default_value);
ASSERT_TRUE(Equal(ans_no_minus_one, ans_no_minus_one_check));
ASSERT_TRUE(Equal(ans_no_minus_one, ans_no_minus_one_check2));
Array1<T> ans_minus_one =
Index(src, indexes_minus_one, true, default_value);
ans_minus_one = ans_minus_one.To(cpu_context);
src = src.To(cpu_context);
indexes_minus_one = indexes_minus_one.To(cpu_context);
for (int32_t i = 0; i < indexes_minus_one.Dim(); i++) {
int32_t index = indexes_minus_one[i];
ASSERT_EQ(ans_minus_one[i], (index < 0 ? default_value : src[index]));
}
}
}
TEST(OpsTest, InvertPermutationTest) {
for (int loop = 0; loop < 2; loop++) {
ContextPtr c = (loop == 0 ? GetCpuContext() : GetCudaContext()),
cpu_context = GetCpuContext();
for (int i = 0; i < 10; i++) {
int32_t len = RandInt(0, 10);
std::vector<int32_t> permutation(len);
std::iota(permutation.begin(), permutation.end(), 0);
std::random_shuffle(permutation.begin(), permutation.end());
Array1<int32_t> permutation_array(c, permutation);
Array1<int32_t> permutation_array_inv =
InvertPermutation(permutation_array);
Array1<int32_t> range = permutation_array[permutation_array_inv],
range2 = Range(c, len, 0);
K2_CHECK(Equal(range, range2));
}
}
}
TEST(OpsTest, Array2IndexTest) {
for (int loop = 0; loop < 2; loop++) {
ContextPtr c = (loop == 0 ? GetCpuContext() : GetCudaContext()),
cpu_context = GetCpuContext();
int32_t src_dim0 = RandInt(1, 10), src_dim1 = RandInt(1, 10),
ans_dim0 = RandInt(1, 10);
using T = int64_t;
Array2<T> src = RandUniformArray2<T>(c, src_dim0, src_dim1, 0, 100);
Array1<int32_t> indexes_no_minus_one = RandUniformArray1<int32_t>(
c, ans_dim0, 0, src_dim0 - 1),
indexes_minus_one = RandUniformArray1<int32_t>(
c, ans_dim0, -1, src_dim0 - 1);
Array2<T> ans_no_minus_one = IndexRows(src, indexes_no_minus_one, false),
ans_no_minus_one_check =
IndexRows(src, indexes_no_minus_one, true);
ASSERT_TRUE(Equal(ans_no_minus_one, ans_no_minus_one_check));
Array2<T> ans_minus_one = IndexRows(src, indexes_minus_one, true);
ans_minus_one = ans_minus_one.To(cpu_context);
src = src.To(cpu_context);
indexes_minus_one = indexes_minus_one.To(cpu_context);
auto src_acc = src.Accessor(), ans_minus_one_acc = ans_minus_one.Accessor();
K2_LOG(INFO) << "src = " << src << ", indexes = " << indexes_minus_one
<< ", ans = " << ans_minus_one;
for (int32_t i = 0; i < ans_dim0; i++) {
int32_t index = indexes_minus_one[i];
for (int32_t j = 0; j < src_dim1; j++) {
ASSERT_EQ(ans_minus_one_acc(i, j), (index < 0 ? 0 : src_acc(index, j)));
}
}
}
}
template <typename T>
static void Array1SortTestSimple() {
std::vector<T> data = {3, 2, 5, 1};
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
{
// with index map
Array1<T> array(context, data);
Array1<int32_t> index_map;
Sort(&array, &index_map);
CheckArrayData(array, std::vector<T>{1, 2, 3, 5});
CheckArrayData(index_map, std::vector<int32_t>{3, 1, 0, 2});
}
{
// without index map
Array1<T> array(context, data);
Sort(&array);
CheckArrayData(array, std::vector<T>{1, 2, 3, 5});
}
}
}
template <typename T>
static void Array1SortTestEmpty() {
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
Array1<T> array(context, 0);
Array1<int32_t> index_map;
Sort(&array, &index_map);
EXPECT_EQ(array.Dim(), 0);
EXPECT_EQ(index_map.Dim(), 0);
}
}
template <typename T>
static void Array1SortTestRandom() {
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
int32_t dim = RandInt(0, 10000);
int32_t min_value = RandInt(-1000, 1000);
int32_t max_value = min_value + RandInt(0, 3000);
{
// with index map
Array1<T> array =
RandUniformArray1<T>(context, dim, min_value, max_value);
Array1<T> data = array.Clone();
Array1<int32_t> index_map;
Sort(&array, &index_map);
array = array.To(GetCpuContext());
EXPECT_TRUE(std::is_sorted(array.Data(), array.Data() + array.Dim()));
index_map = index_map.To(GetCpuContext());
for (int32_t i = 0; i != array.Dim(); ++i)
EXPECT_EQ(array[i], data[index_map[i]]);
}
{
// without index_map
Array1<T> array =
RandUniformArray1<T>(context, dim, min_value, max_value);
Sort(&array);
array = array.To(GetCpuContext());
EXPECT_TRUE(std::is_sorted(array.Data(), array.Data() + array.Dim()));
}
}
}
TEST(OpsTest, Array1Sort) {
Array1SortTestSimple<int32_t>();
Array1SortTestSimple<float>();
Array1SortTestEmpty<int32_t>();
Array1SortTestEmpty<float>();
Array1SortTestRandom<int32_t>();
Array1SortTestRandom<float>();
}
TEST(OpsTest, Array2Assign) {
for (int loop = 0; loop < 10; loop++) {
ContextPtr c = ((loop % 2) == 0 ? GetCpuContext() : GetCudaContext());
int32_t src_dim0 = RandInt(1, 10), src_dim1 = RandInt(1, 10);
using T = int64_t;
Array2<T> src = RandUniformArray2<T>(c, src_dim0, src_dim1, 0, 100);
Array2<T> dest = RandUniformArray2<T>(c, src_dim0, src_dim1, 0, 100);
Assign(src, &dest);
K2_CHECK(Equal(src, dest));
ContextPtr c_other = ((loop % 2) != 0 ? GetCpuContext() : GetCudaContext());
Array2<T> dest2 = RandUniformArray2<T>(c_other, src_dim0, src_dim1, 0, 100);
if (src.ElemStride0() == src_dim1 && dest2.ElemStride0() == src_dim1) {
// test cross-device copy, which is only supported for contiguous input
Assign(src, &dest2);
K2_CHECK(Equal(src.To(c_other), dest2));
}
}
}
template <typename T>
void Array2ContiguousTest() {
for (int loop = 0; loop < 10; loop++) {
ContextPtr c = ((loop % 2) == 0 ? GetCpuContext() : GetCudaContext());
int32_t src_dim0 = RandInt(1, 10), src_dim1 = RandInt(1, 10);
Array2<T> src = RandUniformArray2<T>(c, src_dim0, src_dim1, 0, 100);
int32_t slice_dim1_begin = RandInt(0, src_dim1),
slice_dim1_end = RandInt(slice_dim1_begin, src_dim1);
// Testing that ToContiguous() works the same with generic and specialized
// arrays.
Array2<T> src_part = src.ColArange(slice_dim1_begin, slice_dim1_end),
src_part_contiguous1 = ToContiguous(src_part);
Array2<Any> src_part_contiguous_generic2 = ToContiguous(src_part.Generic());
Array2<T> src_part_contiguous2 =
src_part_contiguous_generic2.Specialize<T>();
K2_CHECK_EQ(Equal(src_part_contiguous1, src_part_contiguous2),
true);
K2_CHECK_EQ(ApproxEqual(src_part_contiguous1, src_part_contiguous2),
true);
}
}
TEST(OpsTest, ApproxEqualTest) {
Array2<float> array1("[ [ 1 2 3 ] [4 5 6 ]]"),
array2("[ [ 1.1 2 3 ] [4 5 6 ]]");
K2_CHECK_EQ(ApproxEqual(array1, array2, 0.2f), true);
K2_CHECK_EQ(ApproxEqual(array1, array2, 0.01f), false);
}
TEST(OpsTest, Array2Contiguous) {
Array2ContiguousTest<int32_t>();
Array2ContiguousTest<double>();
}
TEST(OpsTest, SizesToMergeMapTest) {
// simple test
for (auto &c : {GetCpuContext(), GetCudaContext()}) {
{
std::vector<int32_t> sizes = {3, 5, 1};
Array1<uint32_t> merge_map = SizesToMergeMap(c, sizes);
std::vector<uint32_t> expected_map = {0, 3, 6, 1, 4, 7, 10, 13, 2};
CheckArrayData(merge_map, expected_map);
}
}
{
// random case (well, I assume cpu implementation is correct and only test
// Cuda implementation)
for (int32_t i = 0; i != 2; ++i) {
int32_t num_srcs = RandInt(0, 100);
std::vector<int32_t> sizes(num_srcs);
for (int32_t n = 0; n != num_srcs; ++n) sizes[n] = RandInt(0, 1000);
Array1<uint32_t> merge_map_cpu = SizesToMergeMap(GetCpuContext(), sizes);
Array1<uint32_t> merge_map = SizesToMergeMap(GetCudaContext(), sizes);
CheckArrayData(merge_map_cpu, merge_map);
}
}
}
template <typename T>
static T ComputeSum(const T *begin, const T *end) {
T s = T(0);
for (auto p = begin; p != end; ++p) s += *p;
return s;
}
template <typename T>
static void TestSum() {
std::random_device rd;
for (auto &c : {GetCpuContext(), GetCudaContext()}) {
uint64_t seed = rd();
SetSeed(c, seed);
Array1<T> empty(c, 0);
T s = Sum(empty);
EXPECT_EQ(s, T(0));
Array1<int32_t> n(c, 1);
Rand(c, 1, 1000, 1, n.Data());
Array1<T> src(c, n[0]);
Rand<T>(c, -1000, 1000, src.Dim(), src.Data());
s = Sum(src);
Array1<T> cpu_src = src.To(GetCpuContext());
T gt = ComputeSum(cpu_src.Data(), cpu_src.Data() + cpu_src.Dim());
EXPECT_NEAR((abs(s - gt) / abs(gt + 1e-6)), 0, 1e-4);
}
}
TEST(OpsTest, Sum) {
TestSum<int32_t>();
TestSum<float>();
}
} // namespace k2
|
5e82f5410067268c2ffb008775db4a640345f27a.hip | // !!! This is a file automatically generated by hipify!!!
// includes system
#include <sstream> // ostringstream
// includes CUDA
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
// includes Thrust
#ifdef __GNUC__
#include "thrust/device_ptr.h"
#include "thrust/fill.h"
#include "thrust/extrema.h"
#else
#include "thrust\device_ptr.h"
#include "thrust\fill.h"
#include "thrust\extrema.h"
#endif
// includes project
#include "int_rungekutta8.h"
#include "number_of_bodies.h"
#include "nbody_exception.h"
#include "red_macro.h"
#include "red_constants.h"
#include "util.h"
#define LAMBDA 41.0/840.0
ttt_t rungekutta8::c[] = { 0.0, 2.0/27.0, 1.0/9.0, 1.0/6.0, 5.0/12.0, 1.0/2.0, 5.0/6.0, 1.0/6.0, 2.0/3.0, 1.0/3.0, 1.0, 0.0, 1.0 };
var_t rungekutta8::a[] = { 0.0,
2.0/27.0,
1.0/36.0, 1.0/12.0,
1.0/24.0, 0.0, 1.0/8.0,
5.0/12.0, 0.0, -25.0/16.0, 25.0/16.0,
1.0/20.0, 0.0, 0.0, 1.0/4.0, 1.0/5.0,
-25.0/108.0, 0.0, 0.0, 125.0/108.0, -65.0/27.0, 125.0/54.0,
31.0/300.0, 0.0, 0.0, 0.0, 61.0/225.0, -2.0/9.0, 13.0/900.0,
2.0, 0.0, 0.0, -53.0/6.0, 704.0/45.0, -107.0/9.0, 67.0/90.0, 3.0,
-91.0/108.0, 0.0, 0.0, 23.0/108.0, -976.0/135.0, 311.0/54.0, -19.0/60.0, 17.0/6.0, -1.0/12.0,
2383.0/4100.0, 0.0, 0.0, -341.0/164.0, 4496.0/1025.0, -301.0/82.0, 2133.0/4100.0, 45.0/82.0, 45.0/164.0, 18.0/41.0,
3.0/205.0, 0.0, 0.0, 0.0, 0.0, -6.0/41.0, -3.0/205.0, -3.0/41.0, 3.0/41.0, 6.0/41.0, 0.0,
-1777.0/4100.0, 0.0, 0.0, -341.0/164.0, 4496.0/1025.0, -289.0/82.0, 2193.0/4100.0, 51.0/82.0, 33.0/164.0, 12.0/41.0, 0.0, 1.0 };
var_t rungekutta8::b[] = { 41.0/840.0, 0.0, 0.0, 0.0, 0.0, 34.0/105.0, 9.0/35.0, 9.0/35.0, 9.0/280.0, 9.0/280.0, 41.0/840.0 };
var_t rungekutta8::bh[] = { 0.0, 0.0, 0.0, 0.0, 0.0, 34.0/105.0, 9.0/35.0, 9.0/35.0, 9.0/280.0, 9.0/280.0, 41.0/840.0, 41.0/840.0, 41.0/840.0 };
ttt_t c_rungekutta8::c[] = { 0.0, 2.0/27.0, 1.0/9.0, 1.0/6.0, 5.0/12.0, 1.0/2.0, 5.0/6.0, 1.0/6.0, 2.0/3.0, 1.0/3.0, 1.0, 0.0, 1.0 };
var_t c_rungekutta8::a[] = { 0.0,
2.0/27.0,
1.0/36.0, 1.0/12.0,
1.0/24.0, 0.0, 1.0/8.0,
5.0/12.0, 0.0, -25.0/16.0, 25.0/16.0,
1.0/20.0, 0.0, 0.0, 1.0/4.0, 1.0/5.0,
-25.0/108.0, 0.0, 0.0, 125.0/108.0, -65.0/27.0, 125.0/54.0,
31.0/300.0, 0.0, 0.0, 0.0, 61.0/225.0, -2.0/9.0, 13.0/900.0,
2.0, 0.0, 0.0, -53.0/6.0, 704.0/45.0, -107.0/9.0, 67.0/90.0, 3.0,
-91.0/108.0, 0.0, 0.0, 23.0/108.0, -976.0/135.0, 311.0/54.0, -19.0/60.0, 17.0/6.0, -1.0/12.0,
2383.0/4100.0, 0.0, 0.0, -341.0/164.0, 4496.0/1025.0, -301.0/82.0, 2133.0/4100.0, 45.0/82.0, 45.0/164.0, 18.0/41.0,
3.0/205.0, 0.0, 0.0, 0.0, 0.0, -6.0/41.0, -3.0/205.0, -3.0/41.0, 3.0/41.0, 6.0/41.0, 0.0,
-1777.0/4100.0, 0.0, 0.0, -341.0/164.0, 4496.0/1025.0, -289.0/82.0, 2193.0/4100.0, 51.0/82.0, 33.0/164.0, 12.0/41.0, 0.0, 1.0 };
var_t c_rungekutta8::b[] = { 41.0/840.0, 0.0, 0.0, 0.0, 0.0, 34.0/105.0, 9.0/35.0, 9.0/35.0, 9.0/280.0, 9.0/280.0, 41.0/840.0 };
var_t c_rungekutta8::bh[] = { 0.0, 0.0, 0.0, 0.0, 0.0, 34.0/105.0, 9.0/35.0, 9.0/35.0, 9.0/280.0, 9.0/280.0, 41.0/840.0, 41.0/840.0, 41.0/840.0 };
__constant__ var_t dc_a[ sizeof(rungekutta8::a ) / sizeof(var_t)];
__constant__ var_t dc_b[ sizeof(rungekutta8::b ) / sizeof(var_t)];
__constant__ var_t dc_bh[sizeof(rungekutta8::bh) / sizeof(var_t)];
__constant__ var_t dc_c[ sizeof(rungekutta8::c ) / sizeof(ttt_t)];
namespace test_kernel
{
static __global__
void print_array(int n, const var_t* v)
{
printf("v: %p\n", v);
for (int i = 0; i < n; i++)
{
printf("v[%4d] : %24.16le\n", i, v[i]);
}
}
static __global__
void print_dc_a()
{
for (int i = 0; i < sizeof(dc_a)/sizeof(var_t); i++)
{
printf("dc_a[%2d]: %20.16lf\n", i, dc_a[i]);
}
}
static __global__
void print_dc_b()
{
for (int i = 0; i < sizeof(dc_b)/sizeof(var_t); i++)
{
printf("dc_b[%2d]: %20.16lf\n", i, dc_b[i]);
}
}
static __global__
void print_dc_bh()
{
for (int i = 0; i < sizeof(dc_bh)/sizeof(var_t); i++)
{
printf("dc_bh[%2d]: %20.16lf\n", i, dc_bh[i]);
}
}
static __global__
void print_dc_c()
{
for (int i = 0; i < sizeof(dc_c)/sizeof(ttt_t); i++)
{
printf("dc_c[%2d]: %20.16lf\n", i, dc_c[i]);
}
}
static __global__
void print_memory_address(int n, var_t* adr)
{
for (int i = 0; i < n; i++)
{
printf("adr[%2d]: %p\n", i, adr[i]);
}
}
} /* print_kernel */
namespace rk8_kernel
{
// ytemp = yn + dt*(a10*f0)
static __global__
void calc_ytemp_for_f1(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, var_t a10)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
while (n > tid)
{
ytemp[tid] = y_n[tid] + dt * (a10*f0[tid]);
tid += stride;
}
}
// ytemp = yn + dt*(a20*f0 + a21*f1)
static __global__
void calc_ytemp_for_f2(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f1, var_t a20, var_t a21)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
while (n > tid)
{
ytemp[tid] = y_n[tid] + dt * (a20*f0[tid] + a21*f1[tid]);
tid += stride;
}
}
// ytemp = yn + dt*(a30*f0 + a32*f2)
static __global__
void calc_ytemp_for_f3(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f2, var_t a30, var_t a32)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
while (n > tid)
{
ytemp[tid] = y_n[tid] + dt * (a30*f0[tid] + a32*f2[tid]);
tid += stride;
}
}
// ytemp = yn + dt*(a40*f0 + a42*f2 + a43*f3)
static __global__
void calc_ytemp_for_f4(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f2, const var_t *f3, var_t a40, var_t a42, var_t a43)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
while (n > tid)
{
ytemp[tid] = y_n[tid] + dt * (a40*f0[tid] + a42*f2[tid] + a43*f3[tid]);
tid += stride;
}
}
// ytemp = yn + dt*(a50*f0 + a53*f3 + a54*f4)
static __global__
void calc_ytemp_for_f5(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f3, const var_t *f4, var_t a50, var_t a53, var_t a54)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
while (n > tid)
{
ytemp[tid] = y_n[tid] + dt * (a50*f0[tid] + a53*f3[tid] + a54*f4[tid]);
tid += stride;
}
}
// ytemp = yn + dt*(a60*f0 + a63*f3 + a64*f4 + a65*f5)
static __global__
void calc_ytemp_for_f6(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f3, const var_t *f4, const var_t *f5, var_t a60, var_t a63, var_t a64, var_t a65)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
while (n > tid)
{
ytemp[tid] = y_n[tid] + dt * (a60*f0[tid] + a63*f3[tid] + a64*f4[tid] + a65*f5[tid]);
tid += stride;
}
}
// ytemp = yn + dt*(a70*f0 + a74*f4 + a75*f5 + a76*f6)
static __global__
void calc_ytemp_for_f7(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f4, const var_t *f5, const var_t *f6, var_t a70, var_t a74, var_t a75, var_t a76)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
while (n > tid)
{
ytemp[tid] = y_n[tid] + dt * (a70*f0[tid] + a74*f4[tid] + a75*f5[tid] + a76*f6[tid]);
tid += stride;
}
}
// ytemp = yn + dt*(a80*f0 + a83*f3 + a84*f4 + a85*f5 + a86*f6 + a87*f7)
static __global__
void calc_ytemp_for_f8(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f3, const var_t *f4, const var_t *f5, const var_t *f6, const var_t *f7, var_t a80, var_t a83, var_t a84, var_t a85, var_t a86, var_t a87)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
while (n > tid)
{
ytemp[tid] = y_n[tid] + dt * (a80*f0[tid] + a83*f3[tid] + a84*f4[tid] + a85*f5[tid] + a86*f6[tid] + a87*f7[tid]);
tid += stride;
}
}
// ytemp = yn + dt*(a90*f0 + a93*f3 + a94*f4 + a95*f5 + a96*f6 + a97*f7 + a98*f8)
static __global__
void calc_ytemp_for_f9(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f3, const var_t *f4, const var_t *f5, const var_t *f6, const var_t *f7, const var_t *f8, var_t a90, var_t a93, var_t a94, var_t a95, var_t a96, var_t a97, var_t a98)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
while (n > tid)
{
ytemp[tid] = y_n[tid] + dt * (a90*f0[tid] + a93*f3[tid] + a94*f4[tid] + a95*f5[tid] + a96*f6[tid] + a97*f7[tid] + a98*f8[tid]);
tid += stride;
}
}
// ytemp = yn + dt*(a100*f0 + a103*f3 + a104*f4 + a105*f5 + a106*f6 + a107*f7 + a108*f8 + a109*f9)
static __global__
void calc_ytemp_for_f10(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f3, const var_t *f4, const var_t *f5, const var_t *f6, const var_t *f7, const var_t *f8, const var_t *f9, var_t a100, var_t a103, var_t a104, var_t a105, var_t a106, var_t a107, var_t a108, var_t a109)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
while (n > tid)
{
ytemp[tid] = y_n[tid] + dt * (a100*f0[tid] + a103*f3[tid] + a104*f4[tid] + a105*f5[tid] + a106*f6[tid] + a107*f7[tid] + a108*f8[tid] + a109*f9[tid]);
tid += stride;
}
}
// ytemp = yn + dt*(a110*f0 + a115*f5 + a116*f6 + a117*f7 + a118*f8 + a119*f9)
static __global__
void calc_ytemp_for_f11(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f5, const var_t *f6, const var_t *f7, const var_t *f8, const var_t *f9, var_t a110, var_t a115, var_t a116, var_t a117, var_t a118, var_t a119)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
while (n > tid)
{
ytemp[tid] = y_n[tid] + dt * (a110*f0[tid] + a115*f5[tid] + a116*f6[tid] + a117*f7[tid] + a118*f8[tid] + a119*f9[tid]);
tid += stride;
}
}
// ytemp = yn + dt*(a120*f0 + a123*f3 + a124*f4 + a125*f5 + a126*f6 + a127*f7 + a128*f8 + a129*f9 + a1211*f11)
static __global__
void calc_ytemp_for_f12(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f3, const var_t *f4, const var_t *f5, const var_t *f6, const var_t *f7, const var_t *f8, const var_t *f9, const var_t *f11, var_t a120, var_t a123, var_t a124, var_t a125, var_t a126, var_t a127, var_t a128, var_t a129, var_t a1211)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
while (n > tid)
{
ytemp[tid] = y_n[tid] + dt * (a120*f0[tid] + a123*f3[tid] + a124*f4[tid] + a125*f5[tid] + a126*f6[tid] + a127*f7[tid] + a128*f8[tid] + a129*f9[tid] + a1211*f11[tid]);
tid += stride;
}
}
// err = f0 + f10 - f11 - f12
static __global__
void calc_error(int_t n, var_t *err, const var_t *f0, const var_t *f10, const var_t *f11, const var_t *f12)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
while (n > tid)
{
err[tid] = (f0[tid] + f10[tid] - f11[tid] - f12[tid]);
tid += stride;
}
}
// y_n+1 = yn + dt*(b0*f0 + b5*f5 + b6*f6 + b7*f7 + b8*f8 + b9*f9 + b10*f10)
static __global__
void calc_y_np1(int_t n, var_t *y_np1, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f5, const var_t *f6, const var_t *f7, const var_t *f8, const var_t *f9, const var_t *f10, var_t b0, var_t b5, var_t b6, var_t b7, var_t b8, var_t b9, var_t b10)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
while (n > tid)
{
y_np1[tid] = y_n[tid] + dt * (b0*f0[tid] + b5*f5[tid] + b6*(f6[tid] + f7[tid]) + b8*(f8[tid] + f9[tid]) + b10*f10[tid]);
tid += stride;
}
}
} /* rk8_kernel */
// ytemp = yn + dt*(a10*f0)
void rungekutta8::cpu_calc_ytemp_for_f1(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, var_t a10)
{
for (int tid = 0; tid < n; tid++)
{
ytemp[tid] = y_n[tid] + dt * (a10*f0[tid]);
}
}
// ytemp = yn + dt*(a10*f0)
void rungekutta8::cpu_calc_ytemp_for_f2(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f1, var_t a20, var_t a21)
{
for (int tid = 0; tid < n; tid++)
{
ytemp[tid] = y_n[tid] + dt * (a20*f0[tid] + a21*f1[tid]);
}
}
// ytemp = yn + dt*(a30*f0 + a32*f2)
void rungekutta8::cpu_calc_ytemp_for_f3(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f2, var_t a30, var_t a32)
{
for (int tid = 0; tid < n; tid++)
{
ytemp[tid] = y_n[tid] + dt * (a30*f0[tid] + a32*f2[tid]);
}
}
// ytemp = yn + dt*(a40*f0 + a42*f2 + a43*f3)
void rungekutta8::cpu_calc_ytemp_for_f4(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f2, const var_t *f3, var_t a40, var_t a42, var_t a43)
{
for (int tid = 0; tid < n; tid++)
{
ytemp[tid] = y_n[tid] + dt * (a40*f0[tid] + a42*f2[tid] + a43*f3[tid]);
}
}
// ytemp = yn + dt*(a50*f0 + a53*f3 + a54*f4)
void rungekutta8::cpu_calc_ytemp_for_f5(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f3, const var_t *f4, var_t a50, var_t a53, var_t a54)
{
for (int tid = 0; tid < n; tid++)
{
ytemp[tid] = y_n[tid] + dt * (a50*f0[tid] + a53*f3[tid] + a54*f4[tid]);
}
}
// ytemp = yn + dt*(a60*f0 + a63*f3 + a64*f4 + a65*f5)
void rungekutta8::cpu_calc_ytemp_for_f6(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f3, const var_t *f4, const var_t *f5, var_t a60, var_t a63, var_t a64, var_t a65)
{
for (int tid = 0; tid < n; tid++)
{
ytemp[tid] = y_n[tid] + dt * (a60*f0[tid] + a63*f3[tid] + a64*f4[tid] + a65*f5[tid]);
}
}
// ytemp = yn + dt*(a70*f0 + a74*f4 + a75*f5 + a76*f6)
void rungekutta8::cpu_calc_ytemp_for_f7(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f4, const var_t *f5, const var_t *f6, var_t a70, var_t a74, var_t a75, var_t a76)
{
for (int tid = 0; tid < n; tid++)
{
ytemp[tid] = y_n[tid] + dt * (a70*f0[tid] + a74*f4[tid] + a75*f5[tid] + a76*f6[tid]);
}
}
// ytemp = yn + dt*(a80*f0 + a83*f3 + a84*f4 + a85*f5 + a86*f6 + a87*f7)
void rungekutta8::cpu_calc_ytemp_for_f8(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f3, const var_t *f4, const var_t *f5, const var_t *f6, const var_t *f7, var_t a80, var_t a83, var_t a84, var_t a85, var_t a86, var_t a87)
{
for (int tid = 0; tid < n; tid++)
{
ytemp[tid] = y_n[tid] + dt * (a80*f0[tid] + a83*f3[tid] + a84*f4[tid] + a85*f5[tid] + a86*f6[tid] + a87*f7[tid]);
}
}
// ytemp = yn + dt*(a90*f0 + a93*f3 + a94*f4 + a95*f5 + a96*f6 + a97*f7 + a98*f8)
void rungekutta8::cpu_calc_ytemp_for_f9(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f3, const var_t *f4, const var_t *f5, const var_t *f6, const var_t *f7, const var_t *f8, var_t a90, var_t a93, var_t a94, var_t a95, var_t a96, var_t a97, var_t a98)
{
for (int tid = 0; tid < n; tid++)
{
ytemp[tid] = y_n[tid] + dt * (a90*f0[tid] + a93*f3[tid] + a94*f4[tid] + a95*f5[tid] + a96*f6[tid] + a97*f7[tid] + a98*f8[tid]);
}
}
// ytemp = yn + dt*(a100*f0 + a103*f3 + a104*f4 + a105*f5 + a106*f6 + a107*f7 + a108*f8 + a109*f9)
void rungekutta8::cpu_calc_ytemp_for_f10(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f3, const var_t *f4, const var_t *f5, const var_t *f6, const var_t *f7, const var_t *f8, const var_t *f9, var_t a100, var_t a103, var_t a104, var_t a105, var_t a106, var_t a107, var_t a108, var_t a109)
{
for (int tid = 0; tid < n; tid++)
{
ytemp[tid] = y_n[tid] + dt * (a100*f0[tid] + a103*f3[tid] + a104*f4[tid] + a105*f5[tid] + a106*f6[tid] + a107*f7[tid] + a108*f8[tid] + a109*f9[tid]);
}
}
// ytemp = yn + dt*(a110*f0 + a115*f5 + a116*f6 + a117*f7 + a118*f8 + a119*f9)
void rungekutta8::cpu_calc_ytemp_for_f11(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f5, const var_t *f6, const var_t *f7, const var_t *f8, const var_t *f9, var_t a110, var_t a115, var_t a116, var_t a117, var_t a118, var_t a119)
{
for (int tid = 0; tid < n; tid++)
{
ytemp[tid] = y_n[tid] + dt * (a110*f0[tid] + a115*f5[tid] + a116*f6[tid] + a117*f7[tid] + a118*f8[tid] + a119*f9[tid]);
}
}
// ytemp = yn + dt*(a120*f0 + a123*f3 + a124*f4 + a125*f5 + a126*f6 + a127*f7 + a128*f8 + a129*f9 + a1211*f11)
void rungekutta8::cpu_calc_ytemp_for_f12(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f3, const var_t *f4, const var_t *f5, const var_t *f6, const var_t *f7, const var_t *f8, const var_t *f9, const var_t *f11, var_t a120, var_t a123, var_t a124, var_t a125, var_t a126, var_t a127, var_t a128, var_t a129, var_t a1211)
{
for (int tid = 0; tid < n; tid++)
{
ytemp[tid] = y_n[tid] + dt * (a120*f0[tid] + a123*f3[tid] + a124*f4[tid] + a125*f5[tid] + a126*f6[tid] + a127*f7[tid] + a128*f8[tid] + a129*f9[tid] + a1211*f11[tid]);
}
}
// err = f0 + f10 - f11 - f12
void rungekutta8::cpu_calc_error(int_t n, var_t *err, const var_t *f0, const var_t *f10, const var_t *f11, const var_t *f12)
{
for (int tid = 0; tid < n; tid++)
{
err[tid] = (f0[tid] + f10[tid] - f11[tid] - f12[tid]);
}
}
// y_n+1 = yn + dt*(b0*f0 + b5*f5 + b6*f6 + b7*f7 + b8*f8 + b9*f9 + b10*f10)
void rungekutta8::cpu_calc_y_np1(int_t n, var_t *y_np1, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f5, const var_t *f6, const var_t *f7, const var_t *f8, const var_t *f9, const var_t *f10, var_t b0, var_t b5, var_t b6, var_t b7, var_t b8, var_t b9, var_t b10)
{
for (int tid = 0; tid < n; tid++)
{
y_np1[tid] = y_n[tid] + dt * (b0*f0[tid] + b5*f5[tid] + b6*(f6[tid] + f7[tid]) + b8*(f8[tid] + f9[tid]) + b10*f10[tid]);
}
}
rungekutta8::rungekutta8(pp_disk *ppd, ttt_t dt, bool adaptive, var_t tolerance, computing_device_t comp_dev) :
integrator(ppd, dt, adaptive, tolerance, (adaptive ? 13 : 11), comp_dev)
{
name = "Runge-Kutta-Fehlberg8";
short_name = "RKF8";
order = 7;
}
rungekutta8::~rungekutta8()
{
}
void rungekutta8::calc_ytemp_for_fr(int n_var, int r)
{
int idx = 0;
for (int i = 0; i < 2; i++)
{
var_t* y_n = (var_t*)ppd->sim_data->y[i];
var_t* ytmp = (var_t*)ytemp[i];
var_t* f0 = (var_t*)dydx[i][0];
var_t* f1 = (var_t*)dydx[i][1];
var_t* f2 = (var_t*)dydx[i][2];
var_t* f3 = (var_t*)dydx[i][3];
var_t* f4 = (var_t*)dydx[i][4];
var_t* f5 = (var_t*)dydx[i][5];
var_t* f6 = (var_t*)dydx[i][6];
var_t* f7 = (var_t*)dydx[i][7];
var_t* f8 = (var_t*)dydx[i][8];
var_t* f9 = (var_t*)dydx[i][9];
var_t* f10= (var_t*)dydx[i][10];
var_t* f11= adaptive ? (var_t*)dydx[i][11] : 0x0;
switch (r)
{
case 1:
idx = 1;
if (COMPUTING_DEVICE_CPU == comp_dev)
{
cpu_calc_ytemp_for_f1(n_var, ytmp, dt_try, y_n, f0, a[idx]);
}
else
{
hipLaunchKernelGGL(( rk8_kernel::calc_ytemp_for_f1), dim3(grid), dim3(block), 0, 0, n_var, ytmp, dt_try, y_n, f0, a[idx]);
}
break;
case 2:
idx = 2;
if (COMPUTING_DEVICE_CPU == comp_dev)
{
cpu_calc_ytemp_for_f2(n_var, ytmp, dt_try, y_n, f0, f1, a[idx], a[idx+1]);
}
else
{
hipLaunchKernelGGL(( rk8_kernel::calc_ytemp_for_f2), dim3(grid), dim3(block), 0, 0, n_var, ytmp, dt_try, y_n, f0, f1, a[idx], a[idx+1]);
}
break;
case 3:
idx = 4;
if (COMPUTING_DEVICE_CPU == comp_dev)
{
cpu_calc_ytemp_for_f3(n_var, ytmp, dt_try, y_n, f0, f2, a[idx], a[idx+2]);
}
else
{
hipLaunchKernelGGL(( rk8_kernel::calc_ytemp_for_f3), dim3(grid), dim3(block), 0, 0, n_var, ytmp, dt_try, y_n, f0, f2, a[idx], a[idx+2]);
}
break;
case 4:
idx = 7;
if (COMPUTING_DEVICE_CPU == comp_dev)
{
cpu_calc_ytemp_for_f4(n_var, ytmp, dt_try, y_n, f0, f2, f3, a[idx], a[idx+2], a[idx+3]);
}
else
{
hipLaunchKernelGGL(( rk8_kernel::calc_ytemp_for_f4), dim3(grid), dim3(block), 0, 0, n_var, ytmp, dt_try, y_n, f0, f2, f3, a[idx], a[idx+2], a[idx+3]);
}
break;
case 5:
idx = 11;
if (COMPUTING_DEVICE_CPU == comp_dev)
{
cpu_calc_ytemp_for_f5(n_var, ytmp, dt_try, y_n, f0, f3, f4, a[idx], a[idx+3], a[idx+4]);
}
else
{
hipLaunchKernelGGL(( rk8_kernel::calc_ytemp_for_f5), dim3(grid), dim3(block), 0, 0, n_var, ytmp, dt_try, y_n, f0, f3, f4, a[idx], a[idx+3], a[idx+4]);
}
break;
case 6:
idx = 16;
if (COMPUTING_DEVICE_CPU == comp_dev)
{
cpu_calc_ytemp_for_f6(n_var, ytmp, dt_try, y_n, f0, f3, f4, f5, a[idx], a[idx+3], a[idx+4], a[idx+5]);
}
else
{
hipLaunchKernelGGL(( rk8_kernel::calc_ytemp_for_f6), dim3(grid), dim3(block), 0, 0, n_var, ytmp, dt_try, y_n, f0, f3, f4, f5, a[idx], a[idx+3], a[idx+4], a[idx+5]);
}
break;
case 7:
idx = 22;
if (COMPUTING_DEVICE_CPU == comp_dev)
{
cpu_calc_ytemp_for_f7(n_var, ytmp, dt_try, y_n, f0, f4, f5, f6, a[idx], a[idx+4], a[idx+5], a[idx+6]);
}
else
{
hipLaunchKernelGGL(( rk8_kernel::calc_ytemp_for_f7), dim3(grid), dim3(block), 0, 0, n_var, ytmp, dt_try, y_n, f0, f4, f5, f6, a[idx], a[idx+4], a[idx+5], a[idx+6]);
}
break;
case 8:
idx = 29;
if (COMPUTING_DEVICE_CPU == comp_dev)
{
cpu_calc_ytemp_for_f8(n_var, ytmp, dt_try, y_n, f0, f3, f4, f5, f6, f7, a[idx], a[idx+3], a[idx+4], a[idx+5], a[idx+6], a[idx+7]);
}
else
{
hipLaunchKernelGGL(( rk8_kernel::calc_ytemp_for_f8), dim3(grid), dim3(block), 0, 0, n_var, ytmp, dt_try, y_n, f0, f3, f4, f5, f6, f7, a[idx], a[idx+3], a[idx+4], a[idx+5], a[idx+6], a[idx+7]);
}
break;
case 9:
idx = 37;
if (COMPUTING_DEVICE_CPU == comp_dev)
{
cpu_calc_ytemp_for_f9(n_var, ytmp, dt_try, y_n, f0, f3, f4, f5, f6, f7, f8, a[idx], a[idx+3], a[idx+4], a[idx+5], a[idx+6], a[idx+7], a[idx+8]);
}
else
{
hipLaunchKernelGGL(( rk8_kernel::calc_ytemp_for_f9), dim3(grid), dim3(block), 0, 0, n_var, ytmp, dt_try, y_n, f0, f3, f4, f5, f6, f7, f8, a[idx], a[idx+3], a[idx+4], a[idx+5], a[idx+6], a[idx+7], a[idx+8]);
}
break;
case 10:
idx = 46;
if (COMPUTING_DEVICE_CPU == comp_dev)
{
cpu_calc_ytemp_for_f10(n_var, ytmp, dt_try, y_n, f0, f3, f4, f5, f6, f7, f8, f9, a[idx], a[idx+3], a[idx+4], a[idx+5], a[idx+6], a[idx+7], a[idx+8], a[idx+9]);
}
else
{
hipLaunchKernelGGL(( rk8_kernel::calc_ytemp_for_f10), dim3(grid), dim3(block), 0, 0, n_var, ytmp, dt_try, y_n, f0, f3, f4, f5, f6, f7, f8, f9, a[idx], a[idx+3], a[idx+4], a[idx+5], a[idx+6], a[idx+7], a[idx+8], a[idx+9]);
}
break;
case 11:
idx = 56;
if (COMPUTING_DEVICE_CPU == comp_dev)
{
cpu_calc_ytemp_for_f11(n_var, ytmp, dt_try, y_n, f0, f5, f6, f7, f8, f9, a[idx], a[idx+5], a[idx+6], a[idx+7], a[idx+8], a[idx+9]);
}
else
{
hipLaunchKernelGGL(( rk8_kernel::calc_ytemp_for_f11), dim3(grid), dim3(block), 0, 0, n_var, ytmp, dt_try, y_n, f0, f5, f6, f7, f8, f9, a[idx], a[idx+5], a[idx+6], a[idx+7], a[idx+8], a[idx+9]);
}
break;
case 12:
idx = 67;
if (COMPUTING_DEVICE_CPU == comp_dev)
{
cpu_calc_ytemp_for_f12(n_var, ytmp, dt_try, y_n, f0, f3, f4, f5, f6, f7, f8, f9, f11, a[idx], a[idx+3], a[idx+4], a[idx+5], a[idx+6], a[idx+7], a[idx+8], a[idx+9], a[idx+11]);
}
else
{
hipLaunchKernelGGL(( rk8_kernel::calc_ytemp_for_f12), dim3(grid), dim3(block), 0, 0, n_var, ytmp, dt_try, y_n, f0, f3, f4, f5, f6, f7, f8, f9, f11, a[idx], a[idx+3], a[idx+4], a[idx+5], a[idx+6], a[idx+7], a[idx+8], a[idx+9], a[idx+11]);
}
break;
default:
throw string("rungekutta8::calc_ytemp_for_fr: parameter out of range.");
} /* switch */
if (COMPUTING_DEVICE_GPU == comp_dev)
{
hipError_t cudaStatus = HANDLE_ERROR(hipGetLastError());
if (hipSuccess != cudaStatus)
{
const string err_msg = "rk8_kernel::calc_ytemp_for_f";
ostringstream convert; // stream used for the conversion
convert << r;
throw err_msg + convert.str() + " failed";
}
}
} /* for */
}
void rungekutta8::calc_error(int n_var)
{
for (int i = 0; i < 2; i++)
{
var_t *f0 = (var_t*)dydx[i][0];
var_t *f10 = (var_t*)dydx[i][10];
var_t *f11 = (var_t*)dydx[i][11];
var_t *f12 = (var_t*)dydx[i][12];
if (COMPUTING_DEVICE_GPU == comp_dev)
{
hipLaunchKernelGGL(( rk8_kernel::calc_error), dim3(grid), dim3(block), 0, 0, n_var, err[i], f0, f10, f11, f12);
hipError_t cudaStatus = HANDLE_ERROR(hipGetLastError());
if (hipSuccess != cudaStatus)
{
throw string("rk8_kernel::calc_error failed");
}
}
else
{
cpu_calc_error(n_var, err[i], f0, f10, f11, f12);
}
}
}
void rungekutta8::calc_y_np1(int n_var)
{
for (int i = 0; i < 2; i++)
{
var_t* y_n = (var_t*)ppd->sim_data->y[i];
var_t* y_np1 = (var_t*)ppd->sim_data->yout[i];
var_t *f0 = (var_t*)dydx[i][0];
var_t *f5 = (var_t*)dydx[i][5];
var_t *f6 = (var_t*)dydx[i][6];
var_t *f7 = (var_t*)dydx[i][7];
var_t *f8 = (var_t*)dydx[i][8];
var_t *f9 = (var_t*)dydx[i][9];
var_t *f10 = (var_t*)dydx[i][10];
if (COMPUTING_DEVICE_GPU == comp_dev)
{
hipLaunchKernelGGL(( rk8_kernel::calc_y_np1), dim3(grid), dim3(block), 0, 0, n_var, y_np1, dt_try, y_n, f0, f5, f6, f7, f8, f9, f10, b[0], b[5], b[6], b[7], b[8], b[9], b[10]);
hipError_t cudaStatus = HANDLE_ERROR(hipGetLastError());
if (hipSuccess != cudaStatus)
{
throw string("rk8_kernel::calc_y_np1 failed");
}
}
else
{
cpu_calc_y_np1(n_var, y_np1, dt_try, y_n, f0, f5, f6, f7, f8, f9, f10, b[0], b[5], b[6], b[7], b[8], b[9], b[10]);
}
}
}
ttt_t rungekutta8::step()
{
const int n_body_total = ppd->get_ups() ? ppd->n_bodies->get_n_prime_total() : ppd->n_bodies->get_n_total();
const int n_var_total = NDIM * n_body_total;
if (COMPUTING_DEVICE_GPU == comp_dev)
{
// Set the kernel launch parameters
calc_grid(n_var_total, THREADS_PER_BLOCK);
}
// Calculate initial differentials and store them into dydx[][0]
int r = 0;
ttt_t ttemp = ppd->t + c[r] * dt_try;
// Calculate f1 = f(tn, yn) = dydx[][0]
const vec_t *coor = ppd->sim_data->y[0];
const vec_t *velo = ppd->sim_data->y[1];
for (int i = 0; i < 2; i++)
{
ppd->calc_dydx(i, r, ttemp, coor, velo, dydx[i][r]);
}
var_t max_err = 0.0;
int iter = 0;
do
{
if (0 < ppd->get_n_event())
{
ppd->clear_event_counter();
}
dt_did = dt_try;
// Calculate f2 = f(tn + c1 * dt, yn + a10 * dt * f0) = dydx[][1]
// ...
// Calculate f11 = f(tn + c10 * dt, yn + a10,0 * dt * f0 + ...) = dydx[][10]
for (r = 1; r <= 10; r++)
{
ttemp = ppd->t + c[r] * dt_try;
calc_ytemp_for_fr(n_var_total, r);
for (int i = 0; i < 2; i++)
{
ppd->calc_dydx(i, r, ttemp, ytemp[0], ytemp[1], dydx[i][r]);
}
}
// y_(n+1) = yn + dt*(b0*f0 + b5*f5 + b6*f6 + b7*f7 + b8*f8 + b9*f9 + b10*f10) + O(dt^8)
calc_y_np1(n_var_total);
if (adaptive)
{
// Calculate f11 = f(tn + c11 * dt, yn + ...) = dydx[][11]
// Calculate f12 = f(tn + c11 * dt, yn + ...) = dydx[][12]
for (r = 11; r < r_max; r++)
{
ttemp = ppd->t + c[r] * dt_try;
calc_ytemp_for_fr(n_var_total, r);
for (int i = 0; i < 2; i++)
{
ppd->calc_dydx(i, r, ttemp, ytemp[0], ytemp[1], dydx[i][r]);
}
}
int n_var = 0;
if (ppd->get_ups())
{
n_var = NDIM * (error_check_for_tp ? ppd->n_bodies->get_n_prime_total() : ppd->n_bodies->get_n_prime_massive());
}
else
{
n_var = NDIM * (error_check_for_tp ? ppd->n_bodies->get_n_total() : ppd->n_bodies->get_n_massive());
}
calc_error(n_var);
max_err = get_max_error(n_var, LAMBDA);
dt_try *= 0.9 * pow(tolerance / max_err, 1.0/(order+1));
//if (ppd->get_n_event() > 0)
//{
// if (dt_try < dt_did)
// {
// dt_try = dt_did;
// }
// break;
//}
}
iter++;
} while (adaptive && max_err > tolerance);
update_counters(iter);
ppd->t += dt_did;
ppd->swap();
return dt_did;
}
namespace c_rk8_kernel
{
static __global__
void calc_ytemp(int n, int r, int idx, int offset, ttt_t dt, const var_t *y_n, var_t** dydt, var_t *ytemp)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n)
{
ytemp[tid] = y_n[tid];
for (int i = 0; i < r; i++)
{
if (0.0 == dc_a[idx + i])
{
continue;
}
ytemp[tid] += dt * dc_a[idx + i] * dydt[offset + i][tid];
//TODO: the dt factor can be used at the end of the loop
}
}
}
static __global__
void calc_y_np1(int n, int offset, ttt_t dt, const var_t *y_n, var_t** dydt, var_t *y_np1)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n)
{
y_np1[tid] = y_n[tid];
for (int i = 0; i < 11; i++)
{
if (0.0 == dc_b[i])
{
continue;
}
y_np1[tid] += dt * dc_b[i] * dydt[offset + i][tid];
}
}
}
static __global__
void calc_error(int n, const var_t *f0, const var_t *f10, const var_t *f11, const var_t *f12, var_t *err)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n)
{
err[tid] = (f0[tid] + f10[tid] - f11[tid] - f12[tid]);
}
}
} /* c_rk8_kernel */
c_rungekutta8::c_rungekutta8(pp_disk *ppd, ttt_t dt, bool adaptive, var_t tolerance, computing_device_t comp_dev) :
integrator(ppd, dt, adaptive, tolerance, (adaptive ? 13 : 11), comp_dev)
{
name = "c_Runge-Kutta-Fehlberg8";
short_name = "cRKF8";
order = 7;
if (COMPUTING_DEVICE_GPU == comp_dev)
{
redutilcu::copy_constant_to_device(dc_a, a, sizeof(a));
redutilcu::copy_constant_to_device(dc_b, b, sizeof(b));
redutilcu::copy_constant_to_device(dc_bh, bh, sizeof(bh));
redutilcu::copy_constant_to_device(dc_c, c, sizeof(c));
}
}
c_rungekutta8::~c_rungekutta8()
{
}
void c_rungekutta8::call_kernel_calc_ytemp(int n_var, int r)
{
static int idx_array[] = {0, 1, 2, 4, 7, 11, 16, 22, 29, 37, 46, 56, 67};
for (int i = 0; i < 2; i++)
{
var_t* y_n = (var_t*)ppd->sim_data->y[i];
var_t** dydt = (var_t**)d_dydt;
var_t* ytmp = (var_t*)ytemp[i];
hipLaunchKernelGGL(( c_rk8_kernel::calc_ytemp), dim3(grid), dim3(block), 0, 0, n_var, r, idx_array[r], i*r_max, dt_try, y_n, dydt, ytmp);
hipError_t cudaStatus = HANDLE_ERROR(hipGetLastError());
if (hipSuccess != cudaStatus)
{
throw string("c_rk8_kernel::calc_ytemp failed");
}
}
}
void c_rungekutta8::call_kernel_calc_error(int n_var)
{
for (int i = 0; i < 2; i++)
{
var_t* f0 = (var_t*)dydx[i][0];
var_t* f10 = (var_t*)dydx[i][10];
var_t* f11 = (var_t*)dydx[i][11];
var_t* f12 = (var_t*)dydx[i][12];
hipLaunchKernelGGL(( c_rk8_kernel::calc_error), dim3(grid), dim3(block), 0, 0, n_var, f0, f10, f11, f12, err[i]);
hipError_t cudaStatus = HANDLE_ERROR(hipGetLastError());
if (hipSuccess != cudaStatus)
{
throw string("c_rk8_kernel::calc_error failed");
}
}
}
void c_rungekutta8::call_kernel_calc_y_np1(int n_var)
{
for (int i = 0; i < 2; i++)
{
var_t* y_n = (var_t*)ppd->sim_data->y[i];
var_t** dydt = (var_t**)d_dydt;
var_t* y_np1 = (var_t*)ppd->sim_data->yout[i];
hipLaunchKernelGGL(( c_rk8_kernel::calc_y_np1), dim3(grid), dim3(block), 0, 0, n_var, i*r_max, dt_try, y_n, dydt, y_np1);
hipError_t cudaStatus = HANDLE_ERROR(hipGetLastError());
if (hipSuccess != cudaStatus)
{
throw string("c_rk8_kernel::calc_y_np1_kernel failed");
}
}
}
ttt_t c_rungekutta8::step()
{
const int n_body_total = ppd->get_ups() ? ppd->n_bodies->get_n_prime_total() : ppd->n_bodies->get_n_total();
const int n_var_total = NDIM * n_body_total;
if (COMPUTING_DEVICE_GPU == comp_dev)
{
// Set the kernel launch parameters
calc_grid(n_var_total, THREADS_PER_BLOCK);
}
// Calculate initial differentials and store them into dydx[][0]
int r = 0;
ttt_t ttemp = ppd->t + c[r] * dt_try;
// Calculate f1 = f(tn, yn) = dydx[][0]
const vec_t *coor = ppd->sim_data->y[0];
const vec_t *velo = ppd->sim_data->y[1];
for (int i = 0; i < 2; i++)
{
ppd->calc_dydx(i, r, ttemp, coor, velo, dydx[i][r]);
}
var_t max_err = 0.0;
int iter = 0;
do
{
dt_did = dt_try;
// Calculate f2 = f(tn + c1 * dt, yn + a10 * dt * f0) = dydx[][1]
// ...
// Calculate f11 = f(tn + c10 * dt, yn + a10,0 * dt * f0 + ...) = dydx[][10]
for (r = 1; r <= 10; r++)
{
ttemp = ppd->t + c[r] * dt_try;
call_kernel_calc_ytemp(n_var_total, r);
for (int i = 0; i < 2; i++)
{
ppd->calc_dydx(i, r, ttemp, ytemp[0], ytemp[1], dydx[i][r]);
}
}
// y_(n+1) = yn + dt*(b0*f0 + b5*f5 + b6*f6 + b7*f7 + b8*f8 + b9*f9 + b10*f10) + O(dt^8)
call_kernel_calc_y_np1(n_var_total);
if (adaptive)
{
// Calculate f11 = f(tn + c11 * dt, yn + ...) = dydx[][11]
// Calculate f12 = f(tn + c11 * dt, yn + ...) = dydx[][12]
for (r = 11; r < r_max; r++)
{
ttemp = ppd->t + c[r] * dt_try;
call_kernel_calc_ytemp(n_var_total, r);
for (int i = 0; i < 2; i++)
{
ppd->calc_dydx(i, r, ttemp, ytemp[0], ytemp[1], dydx[i][r]);
}
}
int n_var = 0;
if (ppd->get_ups())
{
n_var = NDIM * (error_check_for_tp ? ppd->n_bodies->get_n_prime_total() : ppd->n_bodies->get_n_prime_massive());
}
else
{
n_var = NDIM * (error_check_for_tp ? ppd->n_bodies->get_n_total() : ppd->n_bodies->get_n_massive());
}
call_kernel_calc_error(n_var);
max_err = get_max_error(n_var, LAMBDA);
dt_try *= 0.9 * pow(tolerance / max_err, 1.0/(order+1));
if (ppd->get_n_event() > 0)
{
if (dt_try < dt_did)
{
dt_try = dt_did;
}
break;
}
}
iter++;
} while (adaptive && max_err > tolerance);
update_counters(iter);
ppd->t += dt_did;
ppd->swap();
return dt_did;
}
#undef LAMBDA
| 5e82f5410067268c2ffb008775db4a640345f27a.cu | // includes system
#include <sstream> // ostringstream
// includes CUDA
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
// includes Thrust
#ifdef __GNUC__
#include "thrust/device_ptr.h"
#include "thrust/fill.h"
#include "thrust/extrema.h"
#else
#include "thrust\device_ptr.h"
#include "thrust\fill.h"
#include "thrust\extrema.h"
#endif
// includes project
#include "int_rungekutta8.h"
#include "number_of_bodies.h"
#include "nbody_exception.h"
#include "red_macro.h"
#include "red_constants.h"
#include "util.h"
#define LAMBDA 41.0/840.0
ttt_t rungekutta8::c[] = { 0.0, 2.0/27.0, 1.0/9.0, 1.0/6.0, 5.0/12.0, 1.0/2.0, 5.0/6.0, 1.0/6.0, 2.0/3.0, 1.0/3.0, 1.0, 0.0, 1.0 };
var_t rungekutta8::a[] = { 0.0,
2.0/27.0,
1.0/36.0, 1.0/12.0,
1.0/24.0, 0.0, 1.0/8.0,
5.0/12.0, 0.0, -25.0/16.0, 25.0/16.0,
1.0/20.0, 0.0, 0.0, 1.0/4.0, 1.0/5.0,
-25.0/108.0, 0.0, 0.0, 125.0/108.0, -65.0/27.0, 125.0/54.0,
31.0/300.0, 0.0, 0.0, 0.0, 61.0/225.0, -2.0/9.0, 13.0/900.0,
2.0, 0.0, 0.0, -53.0/6.0, 704.0/45.0, -107.0/9.0, 67.0/90.0, 3.0,
-91.0/108.0, 0.0, 0.0, 23.0/108.0, -976.0/135.0, 311.0/54.0, -19.0/60.0, 17.0/6.0, -1.0/12.0,
2383.0/4100.0, 0.0, 0.0, -341.0/164.0, 4496.0/1025.0, -301.0/82.0, 2133.0/4100.0, 45.0/82.0, 45.0/164.0, 18.0/41.0,
3.0/205.0, 0.0, 0.0, 0.0, 0.0, -6.0/41.0, -3.0/205.0, -3.0/41.0, 3.0/41.0, 6.0/41.0, 0.0,
-1777.0/4100.0, 0.0, 0.0, -341.0/164.0, 4496.0/1025.0, -289.0/82.0, 2193.0/4100.0, 51.0/82.0, 33.0/164.0, 12.0/41.0, 0.0, 1.0 };
var_t rungekutta8::b[] = { 41.0/840.0, 0.0, 0.0, 0.0, 0.0, 34.0/105.0, 9.0/35.0, 9.0/35.0, 9.0/280.0, 9.0/280.0, 41.0/840.0 };
var_t rungekutta8::bh[] = { 0.0, 0.0, 0.0, 0.0, 0.0, 34.0/105.0, 9.0/35.0, 9.0/35.0, 9.0/280.0, 9.0/280.0, 41.0/840.0, 41.0/840.0, 41.0/840.0 };
ttt_t c_rungekutta8::c[] = { 0.0, 2.0/27.0, 1.0/9.0, 1.0/6.0, 5.0/12.0, 1.0/2.0, 5.0/6.0, 1.0/6.0, 2.0/3.0, 1.0/3.0, 1.0, 0.0, 1.0 };
var_t c_rungekutta8::a[] = { 0.0,
2.0/27.0,
1.0/36.0, 1.0/12.0,
1.0/24.0, 0.0, 1.0/8.0,
5.0/12.0, 0.0, -25.0/16.0, 25.0/16.0,
1.0/20.0, 0.0, 0.0, 1.0/4.0, 1.0/5.0,
-25.0/108.0, 0.0, 0.0, 125.0/108.0, -65.0/27.0, 125.0/54.0,
31.0/300.0, 0.0, 0.0, 0.0, 61.0/225.0, -2.0/9.0, 13.0/900.0,
2.0, 0.0, 0.0, -53.0/6.0, 704.0/45.0, -107.0/9.0, 67.0/90.0, 3.0,
-91.0/108.0, 0.0, 0.0, 23.0/108.0, -976.0/135.0, 311.0/54.0, -19.0/60.0, 17.0/6.0, -1.0/12.0,
2383.0/4100.0, 0.0, 0.0, -341.0/164.0, 4496.0/1025.0, -301.0/82.0, 2133.0/4100.0, 45.0/82.0, 45.0/164.0, 18.0/41.0,
3.0/205.0, 0.0, 0.0, 0.0, 0.0, -6.0/41.0, -3.0/205.0, -3.0/41.0, 3.0/41.0, 6.0/41.0, 0.0,
-1777.0/4100.0, 0.0, 0.0, -341.0/164.0, 4496.0/1025.0, -289.0/82.0, 2193.0/4100.0, 51.0/82.0, 33.0/164.0, 12.0/41.0, 0.0, 1.0 };
var_t c_rungekutta8::b[] = { 41.0/840.0, 0.0, 0.0, 0.0, 0.0, 34.0/105.0, 9.0/35.0, 9.0/35.0, 9.0/280.0, 9.0/280.0, 41.0/840.0 };
var_t c_rungekutta8::bh[] = { 0.0, 0.0, 0.0, 0.0, 0.0, 34.0/105.0, 9.0/35.0, 9.0/35.0, 9.0/280.0, 9.0/280.0, 41.0/840.0, 41.0/840.0, 41.0/840.0 };
__constant__ var_t dc_a[ sizeof(rungekutta8::a ) / sizeof(var_t)];
__constant__ var_t dc_b[ sizeof(rungekutta8::b ) / sizeof(var_t)];
__constant__ var_t dc_bh[sizeof(rungekutta8::bh) / sizeof(var_t)];
__constant__ var_t dc_c[ sizeof(rungekutta8::c ) / sizeof(ttt_t)];
namespace test_kernel
{
static __global__
void print_array(int n, const var_t* v)
{
printf("v: %p\n", v);
for (int i = 0; i < n; i++)
{
printf("v[%4d] : %24.16le\n", i, v[i]);
}
}
static __global__
void print_dc_a()
{
for (int i = 0; i < sizeof(dc_a)/sizeof(var_t); i++)
{
printf("dc_a[%2d]: %20.16lf\n", i, dc_a[i]);
}
}
static __global__
void print_dc_b()
{
for (int i = 0; i < sizeof(dc_b)/sizeof(var_t); i++)
{
printf("dc_b[%2d]: %20.16lf\n", i, dc_b[i]);
}
}
static __global__
void print_dc_bh()
{
for (int i = 0; i < sizeof(dc_bh)/sizeof(var_t); i++)
{
printf("dc_bh[%2d]: %20.16lf\n", i, dc_bh[i]);
}
}
static __global__
void print_dc_c()
{
for (int i = 0; i < sizeof(dc_c)/sizeof(ttt_t); i++)
{
printf("dc_c[%2d]: %20.16lf\n", i, dc_c[i]);
}
}
static __global__
void print_memory_address(int n, var_t* adr)
{
for (int i = 0; i < n; i++)
{
printf("adr[%2d]: %p\n", i, adr[i]);
}
}
} /* print_kernel */
namespace rk8_kernel
{
// ytemp = yn + dt*(a10*f0)
static __global__
void calc_ytemp_for_f1(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, var_t a10)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
while (n > tid)
{
ytemp[tid] = y_n[tid] + dt * (a10*f0[tid]);
tid += stride;
}
}
// ytemp = yn + dt*(a20*f0 + a21*f1)
static __global__
void calc_ytemp_for_f2(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f1, var_t a20, var_t a21)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
while (n > tid)
{
ytemp[tid] = y_n[tid] + dt * (a20*f0[tid] + a21*f1[tid]);
tid += stride;
}
}
// ytemp = yn + dt*(a30*f0 + a32*f2)
static __global__
void calc_ytemp_for_f3(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f2, var_t a30, var_t a32)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
while (n > tid)
{
ytemp[tid] = y_n[tid] + dt * (a30*f0[tid] + a32*f2[tid]);
tid += stride;
}
}
// ytemp = yn + dt*(a40*f0 + a42*f2 + a43*f3)
static __global__
void calc_ytemp_for_f4(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f2, const var_t *f3, var_t a40, var_t a42, var_t a43)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
while (n > tid)
{
ytemp[tid] = y_n[tid] + dt * (a40*f0[tid] + a42*f2[tid] + a43*f3[tid]);
tid += stride;
}
}
// ytemp = yn + dt*(a50*f0 + a53*f3 + a54*f4)
static __global__
void calc_ytemp_for_f5(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f3, const var_t *f4, var_t a50, var_t a53, var_t a54)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
while (n > tid)
{
ytemp[tid] = y_n[tid] + dt * (a50*f0[tid] + a53*f3[tid] + a54*f4[tid]);
tid += stride;
}
}
// ytemp = yn + dt*(a60*f0 + a63*f3 + a64*f4 + a65*f5)
static __global__
void calc_ytemp_for_f6(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f3, const var_t *f4, const var_t *f5, var_t a60, var_t a63, var_t a64, var_t a65)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
while (n > tid)
{
ytemp[tid] = y_n[tid] + dt * (a60*f0[tid] + a63*f3[tid] + a64*f4[tid] + a65*f5[tid]);
tid += stride;
}
}
// ytemp = yn + dt*(a70*f0 + a74*f4 + a75*f5 + a76*f6)
static __global__
void calc_ytemp_for_f7(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f4, const var_t *f5, const var_t *f6, var_t a70, var_t a74, var_t a75, var_t a76)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
while (n > tid)
{
ytemp[tid] = y_n[tid] + dt * (a70*f0[tid] + a74*f4[tid] + a75*f5[tid] + a76*f6[tid]);
tid += stride;
}
}
// ytemp = yn + dt*(a80*f0 + a83*f3 + a84*f4 + a85*f5 + a86*f6 + a87*f7)
static __global__
void calc_ytemp_for_f8(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f3, const var_t *f4, const var_t *f5, const var_t *f6, const var_t *f7, var_t a80, var_t a83, var_t a84, var_t a85, var_t a86, var_t a87)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
while (n > tid)
{
ytemp[tid] = y_n[tid] + dt * (a80*f0[tid] + a83*f3[tid] + a84*f4[tid] + a85*f5[tid] + a86*f6[tid] + a87*f7[tid]);
tid += stride;
}
}
// ytemp = yn + dt*(a90*f0 + a93*f3 + a94*f4 + a95*f5 + a96*f6 + a97*f7 + a98*f8)
static __global__
void calc_ytemp_for_f9(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f3, const var_t *f4, const var_t *f5, const var_t *f6, const var_t *f7, const var_t *f8, var_t a90, var_t a93, var_t a94, var_t a95, var_t a96, var_t a97, var_t a98)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
while (n > tid)
{
ytemp[tid] = y_n[tid] + dt * (a90*f0[tid] + a93*f3[tid] + a94*f4[tid] + a95*f5[tid] + a96*f6[tid] + a97*f7[tid] + a98*f8[tid]);
tid += stride;
}
}
// ytemp = yn + dt*(a100*f0 + a103*f3 + a104*f4 + a105*f5 + a106*f6 + a107*f7 + a108*f8 + a109*f9)
static __global__
void calc_ytemp_for_f10(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f3, const var_t *f4, const var_t *f5, const var_t *f6, const var_t *f7, const var_t *f8, const var_t *f9, var_t a100, var_t a103, var_t a104, var_t a105, var_t a106, var_t a107, var_t a108, var_t a109)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
while (n > tid)
{
ytemp[tid] = y_n[tid] + dt * (a100*f0[tid] + a103*f3[tid] + a104*f4[tid] + a105*f5[tid] + a106*f6[tid] + a107*f7[tid] + a108*f8[tid] + a109*f9[tid]);
tid += stride;
}
}
// ytemp = yn + dt*(a110*f0 + a115*f5 + a116*f6 + a117*f7 + a118*f8 + a119*f9)
static __global__
void calc_ytemp_for_f11(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f5, const var_t *f6, const var_t *f7, const var_t *f8, const var_t *f9, var_t a110, var_t a115, var_t a116, var_t a117, var_t a118, var_t a119)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
while (n > tid)
{
ytemp[tid] = y_n[tid] + dt * (a110*f0[tid] + a115*f5[tid] + a116*f6[tid] + a117*f7[tid] + a118*f8[tid] + a119*f9[tid]);
tid += stride;
}
}
// ytemp = yn + dt*(a120*f0 + a123*f3 + a124*f4 + a125*f5 + a126*f6 + a127*f7 + a128*f8 + a129*f9 + a1211*f11)
static __global__
void calc_ytemp_for_f12(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f3, const var_t *f4, const var_t *f5, const var_t *f6, const var_t *f7, const var_t *f8, const var_t *f9, const var_t *f11, var_t a120, var_t a123, var_t a124, var_t a125, var_t a126, var_t a127, var_t a128, var_t a129, var_t a1211)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
while (n > tid)
{
ytemp[tid] = y_n[tid] + dt * (a120*f0[tid] + a123*f3[tid] + a124*f4[tid] + a125*f5[tid] + a126*f6[tid] + a127*f7[tid] + a128*f8[tid] + a129*f9[tid] + a1211*f11[tid]);
tid += stride;
}
}
// err = f0 + f10 - f11 - f12
static __global__
void calc_error(int_t n, var_t *err, const var_t *f0, const var_t *f10, const var_t *f11, const var_t *f12)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
while (n > tid)
{
err[tid] = (f0[tid] + f10[tid] - f11[tid] - f12[tid]);
tid += stride;
}
}
// y_n+1 = yn + dt*(b0*f0 + b5*f5 + b6*f6 + b7*f7 + b8*f8 + b9*f9 + b10*f10)
static __global__
void calc_y_np1(int_t n, var_t *y_np1, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f5, const var_t *f6, const var_t *f7, const var_t *f8, const var_t *f9, const var_t *f10, var_t b0, var_t b5, var_t b6, var_t b7, var_t b8, var_t b9, var_t b10)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
while (n > tid)
{
y_np1[tid] = y_n[tid] + dt * (b0*f0[tid] + b5*f5[tid] + b6*(f6[tid] + f7[tid]) + b8*(f8[tid] + f9[tid]) + b10*f10[tid]);
tid += stride;
}
}
} /* rk8_kernel */
// ytemp = yn + dt*(a10*f0)
void rungekutta8::cpu_calc_ytemp_for_f1(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, var_t a10)
{
for (int tid = 0; tid < n; tid++)
{
ytemp[tid] = y_n[tid] + dt * (a10*f0[tid]);
}
}
// ytemp = yn + dt*(a10*f0)
void rungekutta8::cpu_calc_ytemp_for_f2(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f1, var_t a20, var_t a21)
{
for (int tid = 0; tid < n; tid++)
{
ytemp[tid] = y_n[tid] + dt * (a20*f0[tid] + a21*f1[tid]);
}
}
// ytemp = yn + dt*(a30*f0 + a32*f2)
void rungekutta8::cpu_calc_ytemp_for_f3(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f2, var_t a30, var_t a32)
{
for (int tid = 0; tid < n; tid++)
{
ytemp[tid] = y_n[tid] + dt * (a30*f0[tid] + a32*f2[tid]);
}
}
// ytemp = yn + dt*(a40*f0 + a42*f2 + a43*f3)
void rungekutta8::cpu_calc_ytemp_for_f4(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f2, const var_t *f3, var_t a40, var_t a42, var_t a43)
{
for (int tid = 0; tid < n; tid++)
{
ytemp[tid] = y_n[tid] + dt * (a40*f0[tid] + a42*f2[tid] + a43*f3[tid]);
}
}
// ytemp = yn + dt*(a50*f0 + a53*f3 + a54*f4)
void rungekutta8::cpu_calc_ytemp_for_f5(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f3, const var_t *f4, var_t a50, var_t a53, var_t a54)
{
for (int tid = 0; tid < n; tid++)
{
ytemp[tid] = y_n[tid] + dt * (a50*f0[tid] + a53*f3[tid] + a54*f4[tid]);
}
}
// ytemp = yn + dt*(a60*f0 + a63*f3 + a64*f4 + a65*f5)
void rungekutta8::cpu_calc_ytemp_for_f6(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f3, const var_t *f4, const var_t *f5, var_t a60, var_t a63, var_t a64, var_t a65)
{
for (int tid = 0; tid < n; tid++)
{
ytemp[tid] = y_n[tid] + dt * (a60*f0[tid] + a63*f3[tid] + a64*f4[tid] + a65*f5[tid]);
}
}
// ytemp = yn + dt*(a70*f0 + a74*f4 + a75*f5 + a76*f6)
void rungekutta8::cpu_calc_ytemp_for_f7(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f4, const var_t *f5, const var_t *f6, var_t a70, var_t a74, var_t a75, var_t a76)
{
for (int tid = 0; tid < n; tid++)
{
ytemp[tid] = y_n[tid] + dt * (a70*f0[tid] + a74*f4[tid] + a75*f5[tid] + a76*f6[tid]);
}
}
// ytemp = yn + dt*(a80*f0 + a83*f3 + a84*f4 + a85*f5 + a86*f6 + a87*f7)
void rungekutta8::cpu_calc_ytemp_for_f8(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f3, const var_t *f4, const var_t *f5, const var_t *f6, const var_t *f7, var_t a80, var_t a83, var_t a84, var_t a85, var_t a86, var_t a87)
{
for (int tid = 0; tid < n; tid++)
{
ytemp[tid] = y_n[tid] + dt * (a80*f0[tid] + a83*f3[tid] + a84*f4[tid] + a85*f5[tid] + a86*f6[tid] + a87*f7[tid]);
}
}
// ytemp = yn + dt*(a90*f0 + a93*f3 + a94*f4 + a95*f5 + a96*f6 + a97*f7 + a98*f8)
void rungekutta8::cpu_calc_ytemp_for_f9(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f3, const var_t *f4, const var_t *f5, const var_t *f6, const var_t *f7, const var_t *f8, var_t a90, var_t a93, var_t a94, var_t a95, var_t a96, var_t a97, var_t a98)
{
for (int tid = 0; tid < n; tid++)
{
ytemp[tid] = y_n[tid] + dt * (a90*f0[tid] + a93*f3[tid] + a94*f4[tid] + a95*f5[tid] + a96*f6[tid] + a97*f7[tid] + a98*f8[tid]);
}
}
// ytemp = yn + dt*(a100*f0 + a103*f3 + a104*f4 + a105*f5 + a106*f6 + a107*f7 + a108*f8 + a109*f9)
void rungekutta8::cpu_calc_ytemp_for_f10(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f3, const var_t *f4, const var_t *f5, const var_t *f6, const var_t *f7, const var_t *f8, const var_t *f9, var_t a100, var_t a103, var_t a104, var_t a105, var_t a106, var_t a107, var_t a108, var_t a109)
{
for (int tid = 0; tid < n; tid++)
{
ytemp[tid] = y_n[tid] + dt * (a100*f0[tid] + a103*f3[tid] + a104*f4[tid] + a105*f5[tid] + a106*f6[tid] + a107*f7[tid] + a108*f8[tid] + a109*f9[tid]);
}
}
// ytemp = yn + dt*(a110*f0 + a115*f5 + a116*f6 + a117*f7 + a118*f8 + a119*f9)
void rungekutta8::cpu_calc_ytemp_for_f11(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f5, const var_t *f6, const var_t *f7, const var_t *f8, const var_t *f9, var_t a110, var_t a115, var_t a116, var_t a117, var_t a118, var_t a119)
{
for (int tid = 0; tid < n; tid++)
{
ytemp[tid] = y_n[tid] + dt * (a110*f0[tid] + a115*f5[tid] + a116*f6[tid] + a117*f7[tid] + a118*f8[tid] + a119*f9[tid]);
}
}
// ytemp = yn + dt*(a120*f0 + a123*f3 + a124*f4 + a125*f5 + a126*f6 + a127*f7 + a128*f8 + a129*f9 + a1211*f11)
void rungekutta8::cpu_calc_ytemp_for_f12(int_t n, var_t *ytemp, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f3, const var_t *f4, const var_t *f5, const var_t *f6, const var_t *f7, const var_t *f8, const var_t *f9, const var_t *f11, var_t a120, var_t a123, var_t a124, var_t a125, var_t a126, var_t a127, var_t a128, var_t a129, var_t a1211)
{
for (int tid = 0; tid < n; tid++)
{
ytemp[tid] = y_n[tid] + dt * (a120*f0[tid] + a123*f3[tid] + a124*f4[tid] + a125*f5[tid] + a126*f6[tid] + a127*f7[tid] + a128*f8[tid] + a129*f9[tid] + a1211*f11[tid]);
}
}
// err = f0 + f10 - f11 - f12
void rungekutta8::cpu_calc_error(int_t n, var_t *err, const var_t *f0, const var_t *f10, const var_t *f11, const var_t *f12)
{
for (int tid = 0; tid < n; tid++)
{
err[tid] = (f0[tid] + f10[tid] - f11[tid] - f12[tid]);
}
}
// y_n+1 = yn + dt*(b0*f0 + b5*f5 + b6*f6 + b7*f7 + b8*f8 + b9*f9 + b10*f10)
void rungekutta8::cpu_calc_y_np1(int_t n, var_t *y_np1, ttt_t dt, const var_t *y_n, const var_t *f0, const var_t *f5, const var_t *f6, const var_t *f7, const var_t *f8, const var_t *f9, const var_t *f10, var_t b0, var_t b5, var_t b6, var_t b7, var_t b8, var_t b9, var_t b10)
{
for (int tid = 0; tid < n; tid++)
{
y_np1[tid] = y_n[tid] + dt * (b0*f0[tid] + b5*f5[tid] + b6*(f6[tid] + f7[tid]) + b8*(f8[tid] + f9[tid]) + b10*f10[tid]);
}
}
rungekutta8::rungekutta8(pp_disk *ppd, ttt_t dt, bool adaptive, var_t tolerance, computing_device_t comp_dev) :
integrator(ppd, dt, adaptive, tolerance, (adaptive ? 13 : 11), comp_dev)
{
name = "Runge-Kutta-Fehlberg8";
short_name = "RKF8";
order = 7;
}
rungekutta8::~rungekutta8()
{
}
void rungekutta8::calc_ytemp_for_fr(int n_var, int r)
{
int idx = 0;
for (int i = 0; i < 2; i++)
{
var_t* y_n = (var_t*)ppd->sim_data->y[i];
var_t* ytmp = (var_t*)ytemp[i];
var_t* f0 = (var_t*)dydx[i][0];
var_t* f1 = (var_t*)dydx[i][1];
var_t* f2 = (var_t*)dydx[i][2];
var_t* f3 = (var_t*)dydx[i][3];
var_t* f4 = (var_t*)dydx[i][4];
var_t* f5 = (var_t*)dydx[i][5];
var_t* f6 = (var_t*)dydx[i][6];
var_t* f7 = (var_t*)dydx[i][7];
var_t* f8 = (var_t*)dydx[i][8];
var_t* f9 = (var_t*)dydx[i][9];
var_t* f10= (var_t*)dydx[i][10];
var_t* f11= adaptive ? (var_t*)dydx[i][11] : 0x0;
switch (r)
{
case 1:
idx = 1;
if (COMPUTING_DEVICE_CPU == comp_dev)
{
cpu_calc_ytemp_for_f1(n_var, ytmp, dt_try, y_n, f0, a[idx]);
}
else
{
rk8_kernel::calc_ytemp_for_f1<<<grid, block>>>(n_var, ytmp, dt_try, y_n, f0, a[idx]);
}
break;
case 2:
idx = 2;
if (COMPUTING_DEVICE_CPU == comp_dev)
{
cpu_calc_ytemp_for_f2(n_var, ytmp, dt_try, y_n, f0, f1, a[idx], a[idx+1]);
}
else
{
rk8_kernel::calc_ytemp_for_f2<<<grid, block>>>(n_var, ytmp, dt_try, y_n, f0, f1, a[idx], a[idx+1]);
}
break;
case 3:
idx = 4;
if (COMPUTING_DEVICE_CPU == comp_dev)
{
cpu_calc_ytemp_for_f3(n_var, ytmp, dt_try, y_n, f0, f2, a[idx], a[idx+2]);
}
else
{
rk8_kernel::calc_ytemp_for_f3<<<grid, block>>>(n_var, ytmp, dt_try, y_n, f0, f2, a[idx], a[idx+2]);
}
break;
case 4:
idx = 7;
if (COMPUTING_DEVICE_CPU == comp_dev)
{
cpu_calc_ytemp_for_f4(n_var, ytmp, dt_try, y_n, f0, f2, f3, a[idx], a[idx+2], a[idx+3]);
}
else
{
rk8_kernel::calc_ytemp_for_f4<<<grid, block>>>(n_var, ytmp, dt_try, y_n, f0, f2, f3, a[idx], a[idx+2], a[idx+3]);
}
break;
case 5:
idx = 11;
if (COMPUTING_DEVICE_CPU == comp_dev)
{
cpu_calc_ytemp_for_f5(n_var, ytmp, dt_try, y_n, f0, f3, f4, a[idx], a[idx+3], a[idx+4]);
}
else
{
rk8_kernel::calc_ytemp_for_f5<<<grid, block>>>(n_var, ytmp, dt_try, y_n, f0, f3, f4, a[idx], a[idx+3], a[idx+4]);
}
break;
case 6:
idx = 16;
if (COMPUTING_DEVICE_CPU == comp_dev)
{
cpu_calc_ytemp_for_f6(n_var, ytmp, dt_try, y_n, f0, f3, f4, f5, a[idx], a[idx+3], a[idx+4], a[idx+5]);
}
else
{
rk8_kernel::calc_ytemp_for_f6<<<grid, block>>>(n_var, ytmp, dt_try, y_n, f0, f3, f4, f5, a[idx], a[idx+3], a[idx+4], a[idx+5]);
}
break;
case 7:
idx = 22;
if (COMPUTING_DEVICE_CPU == comp_dev)
{
cpu_calc_ytemp_for_f7(n_var, ytmp, dt_try, y_n, f0, f4, f5, f6, a[idx], a[idx+4], a[idx+5], a[idx+6]);
}
else
{
rk8_kernel::calc_ytemp_for_f7<<<grid, block>>>(n_var, ytmp, dt_try, y_n, f0, f4, f5, f6, a[idx], a[idx+4], a[idx+5], a[idx+6]);
}
break;
case 8:
idx = 29;
if (COMPUTING_DEVICE_CPU == comp_dev)
{
cpu_calc_ytemp_for_f8(n_var, ytmp, dt_try, y_n, f0, f3, f4, f5, f6, f7, a[idx], a[idx+3], a[idx+4], a[idx+5], a[idx+6], a[idx+7]);
}
else
{
rk8_kernel::calc_ytemp_for_f8<<<grid, block>>>(n_var, ytmp, dt_try, y_n, f0, f3, f4, f5, f6, f7, a[idx], a[idx+3], a[idx+4], a[idx+5], a[idx+6], a[idx+7]);
}
break;
case 9:
idx = 37;
if (COMPUTING_DEVICE_CPU == comp_dev)
{
cpu_calc_ytemp_for_f9(n_var, ytmp, dt_try, y_n, f0, f3, f4, f5, f6, f7, f8, a[idx], a[idx+3], a[idx+4], a[idx+5], a[idx+6], a[idx+7], a[idx+8]);
}
else
{
rk8_kernel::calc_ytemp_for_f9<<<grid, block>>>(n_var, ytmp, dt_try, y_n, f0, f3, f4, f5, f6, f7, f8, a[idx], a[idx+3], a[idx+4], a[idx+5], a[idx+6], a[idx+7], a[idx+8]);
}
break;
case 10:
idx = 46;
if (COMPUTING_DEVICE_CPU == comp_dev)
{
cpu_calc_ytemp_for_f10(n_var, ytmp, dt_try, y_n, f0, f3, f4, f5, f6, f7, f8, f9, a[idx], a[idx+3], a[idx+4], a[idx+5], a[idx+6], a[idx+7], a[idx+8], a[idx+9]);
}
else
{
rk8_kernel::calc_ytemp_for_f10<<<grid, block>>>(n_var, ytmp, dt_try, y_n, f0, f3, f4, f5, f6, f7, f8, f9, a[idx], a[idx+3], a[idx+4], a[idx+5], a[idx+6], a[idx+7], a[idx+8], a[idx+9]);
}
break;
case 11:
idx = 56;
if (COMPUTING_DEVICE_CPU == comp_dev)
{
cpu_calc_ytemp_for_f11(n_var, ytmp, dt_try, y_n, f0, f5, f6, f7, f8, f9, a[idx], a[idx+5], a[idx+6], a[idx+7], a[idx+8], a[idx+9]);
}
else
{
rk8_kernel::calc_ytemp_for_f11<<<grid, block>>>(n_var, ytmp, dt_try, y_n, f0, f5, f6, f7, f8, f9, a[idx], a[idx+5], a[idx+6], a[idx+7], a[idx+8], a[idx+9]);
}
break;
case 12:
idx = 67;
if (COMPUTING_DEVICE_CPU == comp_dev)
{
cpu_calc_ytemp_for_f12(n_var, ytmp, dt_try, y_n, f0, f3, f4, f5, f6, f7, f8, f9, f11, a[idx], a[idx+3], a[idx+4], a[idx+5], a[idx+6], a[idx+7], a[idx+8], a[idx+9], a[idx+11]);
}
else
{
rk8_kernel::calc_ytemp_for_f12<<<grid, block>>>(n_var, ytmp, dt_try, y_n, f0, f3, f4, f5, f6, f7, f8, f9, f11, a[idx], a[idx+3], a[idx+4], a[idx+5], a[idx+6], a[idx+7], a[idx+8], a[idx+9], a[idx+11]);
}
break;
default:
throw string("rungekutta8::calc_ytemp_for_fr: parameter out of range.");
} /* switch */
if (COMPUTING_DEVICE_GPU == comp_dev)
{
cudaError cudaStatus = HANDLE_ERROR(cudaGetLastError());
if (cudaSuccess != cudaStatus)
{
const string err_msg = "rk8_kernel::calc_ytemp_for_f";
ostringstream convert; // stream used for the conversion
convert << r;
throw err_msg + convert.str() + " failed";
}
}
} /* for */
}
void rungekutta8::calc_error(int n_var)
{
for (int i = 0; i < 2; i++)
{
var_t *f0 = (var_t*)dydx[i][0];
var_t *f10 = (var_t*)dydx[i][10];
var_t *f11 = (var_t*)dydx[i][11];
var_t *f12 = (var_t*)dydx[i][12];
if (COMPUTING_DEVICE_GPU == comp_dev)
{
rk8_kernel::calc_error<<<grid, block>>>(n_var, err[i], f0, f10, f11, f12);
cudaError cudaStatus = HANDLE_ERROR(cudaGetLastError());
if (cudaSuccess != cudaStatus)
{
throw string("rk8_kernel::calc_error failed");
}
}
else
{
cpu_calc_error(n_var, err[i], f0, f10, f11, f12);
}
}
}
void rungekutta8::calc_y_np1(int n_var)
{
for (int i = 0; i < 2; i++)
{
var_t* y_n = (var_t*)ppd->sim_data->y[i];
var_t* y_np1 = (var_t*)ppd->sim_data->yout[i];
var_t *f0 = (var_t*)dydx[i][0];
var_t *f5 = (var_t*)dydx[i][5];
var_t *f6 = (var_t*)dydx[i][6];
var_t *f7 = (var_t*)dydx[i][7];
var_t *f8 = (var_t*)dydx[i][8];
var_t *f9 = (var_t*)dydx[i][9];
var_t *f10 = (var_t*)dydx[i][10];
if (COMPUTING_DEVICE_GPU == comp_dev)
{
rk8_kernel::calc_y_np1<<<grid, block>>>(n_var, y_np1, dt_try, y_n, f0, f5, f6, f7, f8, f9, f10, b[0], b[5], b[6], b[7], b[8], b[9], b[10]);
cudaError cudaStatus = HANDLE_ERROR(cudaGetLastError());
if (cudaSuccess != cudaStatus)
{
throw string("rk8_kernel::calc_y_np1 failed");
}
}
else
{
cpu_calc_y_np1(n_var, y_np1, dt_try, y_n, f0, f5, f6, f7, f8, f9, f10, b[0], b[5], b[6], b[7], b[8], b[9], b[10]);
}
}
}
ttt_t rungekutta8::step()
{
const int n_body_total = ppd->get_ups() ? ppd->n_bodies->get_n_prime_total() : ppd->n_bodies->get_n_total();
const int n_var_total = NDIM * n_body_total;
if (COMPUTING_DEVICE_GPU == comp_dev)
{
// Set the kernel launch parameters
calc_grid(n_var_total, THREADS_PER_BLOCK);
}
// Calculate initial differentials and store them into dydx[][0]
int r = 0;
ttt_t ttemp = ppd->t + c[r] * dt_try;
// Calculate f1 = f(tn, yn) = dydx[][0]
const vec_t *coor = ppd->sim_data->y[0];
const vec_t *velo = ppd->sim_data->y[1];
for (int i = 0; i < 2; i++)
{
ppd->calc_dydx(i, r, ttemp, coor, velo, dydx[i][r]);
}
var_t max_err = 0.0;
int iter = 0;
do
{
if (0 < ppd->get_n_event())
{
ppd->clear_event_counter();
}
dt_did = dt_try;
// Calculate f2 = f(tn + c1 * dt, yn + a10 * dt * f0) = dydx[][1]
// ...
// Calculate f11 = f(tn + c10 * dt, yn + a10,0 * dt * f0 + ...) = dydx[][10]
for (r = 1; r <= 10; r++)
{
ttemp = ppd->t + c[r] * dt_try;
calc_ytemp_for_fr(n_var_total, r);
for (int i = 0; i < 2; i++)
{
ppd->calc_dydx(i, r, ttemp, ytemp[0], ytemp[1], dydx[i][r]);
}
}
// y_(n+1) = yn + dt*(b0*f0 + b5*f5 + b6*f6 + b7*f7 + b8*f8 + b9*f9 + b10*f10) + O(dt^8)
calc_y_np1(n_var_total);
if (adaptive)
{
// Calculate f11 = f(tn + c11 * dt, yn + ...) = dydx[][11]
// Calculate f12 = f(tn + c11 * dt, yn + ...) = dydx[][12]
for (r = 11; r < r_max; r++)
{
ttemp = ppd->t + c[r] * dt_try;
calc_ytemp_for_fr(n_var_total, r);
for (int i = 0; i < 2; i++)
{
ppd->calc_dydx(i, r, ttemp, ytemp[0], ytemp[1], dydx[i][r]);
}
}
int n_var = 0;
if (ppd->get_ups())
{
n_var = NDIM * (error_check_for_tp ? ppd->n_bodies->get_n_prime_total() : ppd->n_bodies->get_n_prime_massive());
}
else
{
n_var = NDIM * (error_check_for_tp ? ppd->n_bodies->get_n_total() : ppd->n_bodies->get_n_massive());
}
calc_error(n_var);
max_err = get_max_error(n_var, LAMBDA);
dt_try *= 0.9 * pow(tolerance / max_err, 1.0/(order+1));
//if (ppd->get_n_event() > 0)
//{
// if (dt_try < dt_did)
// {
// dt_try = dt_did;
// }
// break;
//}
}
iter++;
} while (adaptive && max_err > tolerance);
update_counters(iter);
ppd->t += dt_did;
ppd->swap();
return dt_did;
}
namespace c_rk8_kernel
{
static __global__
void calc_ytemp(int n, int r, int idx, int offset, ttt_t dt, const var_t *y_n, var_t** dydt, var_t *ytemp)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n)
{
ytemp[tid] = y_n[tid];
for (int i = 0; i < r; i++)
{
if (0.0 == dc_a[idx + i])
{
continue;
}
ytemp[tid] += dt * dc_a[idx + i] * dydt[offset + i][tid];
//TODO: the dt factor can be used at the end of the loop
}
}
}
static __global__
void calc_y_np1(int n, int offset, ttt_t dt, const var_t *y_n, var_t** dydt, var_t *y_np1)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n)
{
y_np1[tid] = y_n[tid];
for (int i = 0; i < 11; i++)
{
if (0.0 == dc_b[i])
{
continue;
}
y_np1[tid] += dt * dc_b[i] * dydt[offset + i][tid];
}
}
}
static __global__
void calc_error(int n, const var_t *f0, const var_t *f10, const var_t *f11, const var_t *f12, var_t *err)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n)
{
err[tid] = (f0[tid] + f10[tid] - f11[tid] - f12[tid]);
}
}
} /* c_rk8_kernel */
c_rungekutta8::c_rungekutta8(pp_disk *ppd, ttt_t dt, bool adaptive, var_t tolerance, computing_device_t comp_dev) :
integrator(ppd, dt, adaptive, tolerance, (adaptive ? 13 : 11), comp_dev)
{
name = "c_Runge-Kutta-Fehlberg8";
short_name = "cRKF8";
order = 7;
if (COMPUTING_DEVICE_GPU == comp_dev)
{
redutilcu::copy_constant_to_device(dc_a, a, sizeof(a));
redutilcu::copy_constant_to_device(dc_b, b, sizeof(b));
redutilcu::copy_constant_to_device(dc_bh, bh, sizeof(bh));
redutilcu::copy_constant_to_device(dc_c, c, sizeof(c));
}
}
c_rungekutta8::~c_rungekutta8()
{
}
void c_rungekutta8::call_kernel_calc_ytemp(int n_var, int r)
{
static int idx_array[] = {0, 1, 2, 4, 7, 11, 16, 22, 29, 37, 46, 56, 67};
for (int i = 0; i < 2; i++)
{
var_t* y_n = (var_t*)ppd->sim_data->y[i];
var_t** dydt = (var_t**)d_dydt;
var_t* ytmp = (var_t*)ytemp[i];
c_rk8_kernel::calc_ytemp<<<grid, block>>>(n_var, r, idx_array[r], i*r_max, dt_try, y_n, dydt, ytmp);
cudaError cudaStatus = HANDLE_ERROR(cudaGetLastError());
if (cudaSuccess != cudaStatus)
{
throw string("c_rk8_kernel::calc_ytemp failed");
}
}
}
void c_rungekutta8::call_kernel_calc_error(int n_var)
{
for (int i = 0; i < 2; i++)
{
var_t* f0 = (var_t*)dydx[i][0];
var_t* f10 = (var_t*)dydx[i][10];
var_t* f11 = (var_t*)dydx[i][11];
var_t* f12 = (var_t*)dydx[i][12];
c_rk8_kernel::calc_error<<<grid, block>>>(n_var, f0, f10, f11, f12, err[i]);
cudaError cudaStatus = HANDLE_ERROR(cudaGetLastError());
if (cudaSuccess != cudaStatus)
{
throw string("c_rk8_kernel::calc_error failed");
}
}
}
void c_rungekutta8::call_kernel_calc_y_np1(int n_var)
{
for (int i = 0; i < 2; i++)
{
var_t* y_n = (var_t*)ppd->sim_data->y[i];
var_t** dydt = (var_t**)d_dydt;
var_t* y_np1 = (var_t*)ppd->sim_data->yout[i];
c_rk8_kernel::calc_y_np1<<<grid, block>>>(n_var, i*r_max, dt_try, y_n, dydt, y_np1);
cudaError cudaStatus = HANDLE_ERROR(cudaGetLastError());
if (cudaSuccess != cudaStatus)
{
throw string("c_rk8_kernel::calc_y_np1_kernel failed");
}
}
}
ttt_t c_rungekutta8::step()
{
const int n_body_total = ppd->get_ups() ? ppd->n_bodies->get_n_prime_total() : ppd->n_bodies->get_n_total();
const int n_var_total = NDIM * n_body_total;
if (COMPUTING_DEVICE_GPU == comp_dev)
{
// Set the kernel launch parameters
calc_grid(n_var_total, THREADS_PER_BLOCK);
}
// Calculate initial differentials and store them into dydx[][0]
int r = 0;
ttt_t ttemp = ppd->t + c[r] * dt_try;
// Calculate f1 = f(tn, yn) = dydx[][0]
const vec_t *coor = ppd->sim_data->y[0];
const vec_t *velo = ppd->sim_data->y[1];
for (int i = 0; i < 2; i++)
{
ppd->calc_dydx(i, r, ttemp, coor, velo, dydx[i][r]);
}
var_t max_err = 0.0;
int iter = 0;
do
{
dt_did = dt_try;
// Calculate f2 = f(tn + c1 * dt, yn + a10 * dt * f0) = dydx[][1]
// ...
// Calculate f11 = f(tn + c10 * dt, yn + a10,0 * dt * f0 + ...) = dydx[][10]
for (r = 1; r <= 10; r++)
{
ttemp = ppd->t + c[r] * dt_try;
call_kernel_calc_ytemp(n_var_total, r);
for (int i = 0; i < 2; i++)
{
ppd->calc_dydx(i, r, ttemp, ytemp[0], ytemp[1], dydx[i][r]);
}
}
// y_(n+1) = yn + dt*(b0*f0 + b5*f5 + b6*f6 + b7*f7 + b8*f8 + b9*f9 + b10*f10) + O(dt^8)
call_kernel_calc_y_np1(n_var_total);
if (adaptive)
{
// Calculate f11 = f(tn + c11 * dt, yn + ...) = dydx[][11]
// Calculate f12 = f(tn + c11 * dt, yn + ...) = dydx[][12]
for (r = 11; r < r_max; r++)
{
ttemp = ppd->t + c[r] * dt_try;
call_kernel_calc_ytemp(n_var_total, r);
for (int i = 0; i < 2; i++)
{
ppd->calc_dydx(i, r, ttemp, ytemp[0], ytemp[1], dydx[i][r]);
}
}
int n_var = 0;
if (ppd->get_ups())
{
n_var = NDIM * (error_check_for_tp ? ppd->n_bodies->get_n_prime_total() : ppd->n_bodies->get_n_prime_massive());
}
else
{
n_var = NDIM * (error_check_for_tp ? ppd->n_bodies->get_n_total() : ppd->n_bodies->get_n_massive());
}
call_kernel_calc_error(n_var);
max_err = get_max_error(n_var, LAMBDA);
dt_try *= 0.9 * pow(tolerance / max_err, 1.0/(order+1));
if (ppd->get_n_event() > 0)
{
if (dt_try < dt_did)
{
dt_try = dt_did;
}
break;
}
}
iter++;
} while (adaptive && max_err > tolerance);
update_counters(iter);
ppd->t += dt_did;
ppd->swap();
return dt_did;
}
#undef LAMBDA
|
af2c2d6204d10f40527ae9956cf22054be3df8a6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include "thrust\host_vector.h"
#include "thrust\device_vector.h"
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
int computeCompatabilityMajor;
int computeCompatabilityMinor;
hipDeviceGetAttribute(&computeCompatabilityMajor, hipDeviceAttributeComputeCapabilityMajor, 0);
hipDeviceGetAttribute(&computeCompatabilityMinor, hipDeviceAttributeComputeCapabilityMinor, 0);
printf("Compute Version: %d.%d\n", computeCompatabilityMajor, computeCompatabilityMinor);
int maxGridDimX;
int maxGridDimY;
int maxGridDimZ;
hipDeviceGetAttribute(&maxGridDimX, hipDeviceAttributeMaxGridDimX, 0);
hipDeviceGetAttribute(&maxGridDimY, hipDeviceAttributeMaxGridDimY, 0);
hipDeviceGetAttribute(&maxGridDimZ, hipDeviceAttributeMaxGridDimZ, 0);
printf("Max Grid Dimensions: (%d, %d, %d)\n", maxGridDimX, maxGridDimY, maxGridDimZ);
int maxBlockDimX;
int maxBlockDimY;
int maxBlockDimZ;
hipDeviceGetAttribute(&maxBlockDimX, hipDeviceAttributeMaxBlockDimX, 0);
hipDeviceGetAttribute(&maxBlockDimY, hipDeviceAttributeMaxBlockDimY, 0);
hipDeviceGetAttribute(&maxBlockDimZ, hipDeviceAttributeMaxBlockDimZ, 0);
printf("Max Block Dimensions: (%d, %d, %d)\n", maxBlockDimX, maxBlockDimY, maxBlockDimZ);
int maxThreads;
hipDeviceGetAttribute(&maxThreads, hipDeviceAttributeMaxThreadsPerBlock, 0);
printf("Max Threads per Block: %d\n", maxThreads);
system("pause");
}
| af2c2d6204d10f40527ae9956cf22054be3df8a6.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include "thrust\host_vector.h"
#include "thrust\device_vector.h"
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
int computeCompatabilityMajor;
int computeCompatabilityMinor;
cudaDeviceGetAttribute(&computeCompatabilityMajor, cudaDevAttrComputeCapabilityMajor, 0);
cudaDeviceGetAttribute(&computeCompatabilityMinor, cudaDevAttrComputeCapabilityMinor, 0);
printf("Compute Version: %d.%d\n", computeCompatabilityMajor, computeCompatabilityMinor);
int maxGridDimX;
int maxGridDimY;
int maxGridDimZ;
cudaDeviceGetAttribute(&maxGridDimX, cudaDevAttrMaxGridDimX, 0);
cudaDeviceGetAttribute(&maxGridDimY, cudaDevAttrMaxGridDimY, 0);
cudaDeviceGetAttribute(&maxGridDimZ, cudaDevAttrMaxGridDimZ, 0);
printf("Max Grid Dimensions: (%d, %d, %d)\n", maxGridDimX, maxGridDimY, maxGridDimZ);
int maxBlockDimX;
int maxBlockDimY;
int maxBlockDimZ;
cudaDeviceGetAttribute(&maxBlockDimX, cudaDevAttrMaxBlockDimX, 0);
cudaDeviceGetAttribute(&maxBlockDimY, cudaDevAttrMaxBlockDimY, 0);
cudaDeviceGetAttribute(&maxBlockDimZ, cudaDevAttrMaxBlockDimZ, 0);
printf("Max Block Dimensions: (%d, %d, %d)\n", maxBlockDimX, maxBlockDimY, maxBlockDimZ);
int maxThreads;
cudaDeviceGetAttribute(&maxThreads, cudaDevAttrMaxThreadsPerBlock, 0);
printf("Max Threads per Block: %d\n", maxThreads);
system("pause");
}
|
640b55c3c73fe8486d6e9331f1554e08f9e03923.hip | // !!! This is a file automatically generated by hipify!!!
/**************************************************************************//**
* @file Conv_GPU_kernel.cpp
* @brief Module for performing Autocorrelation on the GPU using
* convolution. The module takes input data, width and height and returns
* the output matrix with the time taken.
* @author Ian Glass
* @date 14/10/2013
*******************************************************************************
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*******************************************************************************/
/* Includes ------------------------------------------------------------------*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include <helper_cuda.h>
#include <helper_math.h>
#include <helper_functions.h> // helper functions for SDK examples
/* Global Variables ----------------------------------------------------------*/
typedef unsigned int uint;
typedef unsigned char uchar;
/* Defines the maximum number of threads per block */
#define thread_limit 512
////////////////////////////////////////////////////////////////////////////////
// Complex operations
////////////////////////////////////////////////////////////////////////////////
/*-----------------------------------------------------------*/
/**
* @brief Addition of complex values on the GPU.
* @param Two complex variables to be added.
* @retval None
*/
static __device__ __host__ inline hipDoubleComplex ComplexAdd(hipDoubleComplex a, hipDoubleComplex b) {
hipDoubleComplex c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
/*-----------------------------------------------------------*/
/**
* @brief Multiplication of complex values on the GPU.
* @param Two complex variables to be added.
* @retval None
*/
static __device__ __host__ inline hipDoubleComplex ComplexMul(hipDoubleComplex a, hipDoubleComplex b) {
hipDoubleComplex c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
}
/*-----------------------------------------------------------*/
/**
* @brief Computes convolution on the GPU.
* @param Two complex variables to be added.
* @retval None
*/
__global__ void Convolve(hipDoubleComplex *signal, hipDoubleComplex *filter_kernel, int size, hipDoubleComplex *filtered_signal) {
int minRadius = size / 2;
int maxRadius = size - minRadius;
/* Find current position in matrix as 1D index */
int ID = blockIdx.x * blockDim.x + threadIdx.x;
/* Loop over output element indices */
filtered_signal[ID].x = filtered_signal[ID].y = 0;
/* Loop over convolution indices */
for (int j = - maxRadius + 1; j <= minRadius; ++j) {
int k = ID + j;
if (k >= 0 && k < size) {
filtered_signal[ID] = ComplexAdd(filtered_signal[ID], ComplexMul(signal[k], filter_kernel[minRadius - j]));
}
}
}
/*-----------------------------------------------------------*/
/**
* @brief Zero-pads the input matrix and shifts it by (width-1)/2
* @param The input matrix, a pointer to the result, the width of the
* input matrix, the width of the output matrix and the height of the
* input matrix.
* @retval None
*/
__global__ void Pad(hipDoubleComplex *input, hipDoubleComplex *output, int width, int width_out, int height) {
int j = (blockIdx.x * blockDim.x + threadIdx.x)%width_out;
int i = (blockIdx.x * blockDim.x + threadIdx.x)/width_out;
output[blockIdx.x * blockDim.x + threadIdx.x].x = 0;
output[blockIdx.x * blockDim.x + threadIdx.x].y = 0;
//fill matrix with input data for even size
if ((i<height)&&(j<width)) {
output[(j+((width-1)/2))+(i+((height-1)/2))*width_out].x = input[j+i*width].x;
output[(j+((width-1)/2))+(i+((height-1)/2))*width_out].y = input[j+i*width].y;
}
}
/*-----------------------------------------------------------*/
/**
* @brief Main entry point for this file.
* @param The matrix to convolute, the matrix width and height
* and a pointer to the processing time.
* @retval None
*/
extern "C" hipDoubleComplex *Conv_GPU_fn(int argc, char **argv, hipDoubleComplex *data, int width, int height, float *time) {
int width_out = width*2-1;
int height_out = height*2-1;
printf("\n%s Starting...", argv[0]);
/* use command-line specified CUDA device, otherwise use device with highest Gflops/s */
int devID = findCudaDevice(argc, (const char **)argv);
StopWatchInterface *timer = 0;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
int size_padded = width_out*height_out;
/* Set thread conditions */
int num_threads = thread_limit;
if (size_padded < thread_limit) {
num_threads = size_padded;
}
unsigned int mem_size = sizeof(hipDoubleComplex)*size_padded;
/* setup execution parameters */
/* calculate required grid dimensions */
int grid_size = size_padded/thread_limit+1;
dim3 grid(grid_size, 1, 1);
dim3 threads(num_threads, 1, 1);
/* Move input data to device */
hipDoubleComplex *data_in;
checkCudaErrors(hipMalloc((void **)&data_in, sizeof(hipDoubleComplex)*width*height));
checkCudaErrors(hipMemcpy(data_in, data, sizeof(hipDoubleComplex)*width*height, hipMemcpyHostToDevice));
/* Create zero padded matrix */
hipDoubleComplex *data_padded;
checkCudaErrors(hipMalloc((void **)&data_padded, mem_size));
hipLaunchKernelGGL(( Pad), dim3(grid),dim3(threads), 0, 0, data_in,data_padded, width, width_out, height);
/* Perform Convolution */
hipDoubleComplex *h_convolved_signal;
checkCudaErrors(hipMalloc((void **)&h_convolved_signal, mem_size));
hipLaunchKernelGGL(( Convolve), dim3(grid),dim3(threads), 0, 0, data_padded, data_padded, size_padded, h_convolved_signal);
/* Move result back to host */
hipDoubleComplex *conv_out = (hipDoubleComplex*) malloc(mem_size);
checkCudaErrors(hipMemcpy(conv_out, h_convolved_signal, mem_size, hipMemcpyDeviceToHost));
sdkStopTimer(&timer);
*time = sdkGetTimerValue(&timer);
sdkDeleteTimer(&timer);
/* Free memory */
hipFree(data_in);
hipFree(data_padded);
hipFree(h_convolved_signal);
hipDeviceReset();
return(conv_out);
}
| 640b55c3c73fe8486d6e9331f1554e08f9e03923.cu | /**************************************************************************//**
* @file Conv_GPU_kernel.cpp
* @brief Module for performing Autocorrelation on the GPU using
* convolution. The module takes input data, width and height and returns
* the output matrix with the time taken.
* @author Ian Glass
* @date 14/10/2013
*******************************************************************************
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*******************************************************************************/
/* Includes ------------------------------------------------------------------*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <cufft.h>
#include <helper_cuda.h>
#include <helper_math.h>
#include <helper_functions.h> // helper functions for SDK examples
/* Global Variables ----------------------------------------------------------*/
typedef unsigned int uint;
typedef unsigned char uchar;
/* Defines the maximum number of threads per block */
#define thread_limit 512
////////////////////////////////////////////////////////////////////////////////
// Complex operations
////////////////////////////////////////////////////////////////////////////////
/*-----------------------------------------------------------*/
/**
* @brief Addition of complex values on the GPU.
* @param Two complex variables to be added.
* @retval None
*/
static __device__ __host__ inline cuDoubleComplex ComplexAdd(cuDoubleComplex a, cuDoubleComplex b) {
cuDoubleComplex c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
/*-----------------------------------------------------------*/
/**
* @brief Multiplication of complex values on the GPU.
* @param Two complex variables to be added.
* @retval None
*/
static __device__ __host__ inline cuDoubleComplex ComplexMul(cuDoubleComplex a, cuDoubleComplex b) {
cuDoubleComplex c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
}
/*-----------------------------------------------------------*/
/**
* @brief Computes convolution on the GPU.
* @param Two complex variables to be added.
* @retval None
*/
__global__ void Convolve(cuDoubleComplex *signal, cuDoubleComplex *filter_kernel, int size, cuDoubleComplex *filtered_signal) {
int minRadius = size / 2;
int maxRadius = size - minRadius;
/* Find current position in matrix as 1D index */
int ID = blockIdx.x * blockDim.x + threadIdx.x;
/* Loop over output element indices */
filtered_signal[ID].x = filtered_signal[ID].y = 0;
/* Loop over convolution indices */
for (int j = - maxRadius + 1; j <= minRadius; ++j) {
int k = ID + j;
if (k >= 0 && k < size) {
filtered_signal[ID] = ComplexAdd(filtered_signal[ID], ComplexMul(signal[k], filter_kernel[minRadius - j]));
}
}
}
/*-----------------------------------------------------------*/
/**
* @brief Zero-pads the input matrix and shifts it by (width-1)/2
* @param The input matrix, a pointer to the result, the width of the
* input matrix, the width of the output matrix and the height of the
* input matrix.
* @retval None
*/
__global__ void Pad(cuDoubleComplex *input, cuDoubleComplex *output, int width, int width_out, int height) {
int j = (blockIdx.x * blockDim.x + threadIdx.x)%width_out;
int i = (blockIdx.x * blockDim.x + threadIdx.x)/width_out;
output[blockIdx.x * blockDim.x + threadIdx.x].x = 0;
output[blockIdx.x * blockDim.x + threadIdx.x].y = 0;
//fill matrix with input data for even size
if ((i<height)&&(j<width)) {
output[(j+((width-1)/2))+(i+((height-1)/2))*width_out].x = input[j+i*width].x;
output[(j+((width-1)/2))+(i+((height-1)/2))*width_out].y = input[j+i*width].y;
}
}
/*-----------------------------------------------------------*/
/**
* @brief Main entry point for this file.
* @param The matrix to convolute, the matrix width and height
* and a pointer to the processing time.
* @retval None
*/
extern "C" cuDoubleComplex *Conv_GPU_fn(int argc, char **argv, cuDoubleComplex *data, int width, int height, float *time) {
int width_out = width*2-1;
int height_out = height*2-1;
printf("\n%s Starting...", argv[0]);
/* use command-line specified CUDA device, otherwise use device with highest Gflops/s */
int devID = findCudaDevice(argc, (const char **)argv);
StopWatchInterface *timer = 0;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
int size_padded = width_out*height_out;
/* Set thread conditions */
int num_threads = thread_limit;
if (size_padded < thread_limit) {
num_threads = size_padded;
}
unsigned int mem_size = sizeof(cuDoubleComplex)*size_padded;
/* setup execution parameters */
/* calculate required grid dimensions */
int grid_size = size_padded/thread_limit+1;
dim3 grid(grid_size, 1, 1);
dim3 threads(num_threads, 1, 1);
/* Move input data to device */
cuDoubleComplex *data_in;
checkCudaErrors(cudaMalloc((void **)&data_in, sizeof(cuDoubleComplex)*width*height));
checkCudaErrors(cudaMemcpy(data_in, data, sizeof(cuDoubleComplex)*width*height, cudaMemcpyHostToDevice));
/* Create zero padded matrix */
cuDoubleComplex *data_padded;
checkCudaErrors(cudaMalloc((void **)&data_padded, mem_size));
Pad<<<grid,threads>>>(data_in,data_padded, width, width_out, height);
/* Perform Convolution */
cuDoubleComplex *h_convolved_signal;
checkCudaErrors(cudaMalloc((void **)&h_convolved_signal, mem_size));
Convolve<<<grid,threads>>>(data_padded, data_padded, size_padded, h_convolved_signal);
/* Move result back to host */
cuDoubleComplex *conv_out = (cuDoubleComplex*) malloc(mem_size);
checkCudaErrors(cudaMemcpy(conv_out, h_convolved_signal, mem_size, cudaMemcpyDeviceToHost));
sdkStopTimer(&timer);
*time = sdkGetTimerValue(&timer);
sdkDeleteTimer(&timer);
/* Free memory */
cudaFree(data_in);
cudaFree(data_padded);
cudaFree(h_convolved_signal);
cudaDeviceReset();
return(conv_out);
}
|
93fedad419a908f65ff69109426a2efd17067eb3.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <chrono>
#include <hip/hip_runtime.h>
#include <common/book.h>
#ifndef BLOCK_SIZE
#define BLOCK_SIZE (int)100
#endif
#define SIZE (int)( BLOCK_SIZE*1024*1024 )
static const int CHAR_SIZE = sizeof(unsigned char);
static const int NCHARS = SIZE / CHAR_SIZE;
#ifndef NTHREADS
#define NTHREADS 256
#endif
#ifndef NBLOCKS
#define NBLOCKS 1024
#endif
#if NCHARS < (NTHREADS * 1024)
#define NBLOCKS (int)( ( NCHARS + NTHREADS - 1 ) / NTHREADS )
#endif
#define N_MAX_CHAR 256 // chars
#if N_MAX_CHAR != NTHREADS
#define NTHREADS N_MAX_CHAR
#endif
#define CPU_NOW() (std::chrono::system_clock::now())
#ifdef USE_GPU
__global__ void buff_to_histo( unsigned char*, unsigned int* );
#endif
int main (void) {
unsigned char *buffer = (unsigned char*)big_random_block( SIZE );
unsigned int histo[N_MAX_CHAR]{0};
/* Timer */
float elapsedTime;
#ifndef USE_GPU
///////////////////////////////////////////
// //////////////// CPU //////////////// //
///////////////////////////////////////////
auto t1 = CPU_NOW();
for (int i=0; i<SIZE; ++i){
++histo[ buffer[i] ];
}
auto t2 = CPU_NOW();
elapsedTime = std::chrono::duration<float, std::milli> (t2 - t1).count();
#else
//////////////////////////////////////////
// //////////////// GPU /////////////// //
//////////////////////////////////////////
hipEvent_t start, stop;
HANDLE_ERROR( hipEventCreate( &start ) );
HANDLE_ERROR( hipEventCreate( &stop ) );
HANDLE_ERROR( hipEventRecord( start, 0 ) );
unsigned char* dev_buffer;
unsigned int* dev_histo;
HANDLE_ERROR( hipMalloc( (void**)&dev_buffer, SIZE ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_histo,
N_MAX_CHAR * sizeof(int) ) );
HANDLE_ERROR( hipMemcpy( dev_buffer, buffer, SIZE, hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemset( dev_histo, 0, N_MAX_CHAR * sizeof(int) ) );
dim3 dimGrid( NBLOCKS, 1, 1 );
dim3 dimBlocks( NTHREADS, 1, 1 );
hipLaunchKernelGGL(( buff_to_histo) , dim3(dimGrid), dim3(dimBlocks) , 0, 0, dev_buffer, dev_histo );
hipDeviceSynchronize();
hipMemcpy(histo, dev_histo,
N_MAX_CHAR * sizeof(int),
hipMemcpyDeviceToHost);
HANDLE_ERROR( hipEventRecord( stop, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stop ) );
HANDLE_ERROR( hipEventElapsedTime ( &elapsedTime, start, stop ) );
HANDLE_ERROR( hipFree( dev_buffer ) );
HANDLE_ERROR( hipFree( dev_histo ) );
HANDLE_ERROR( hipEventDestroy( start ) );
HANDLE_ERROR( hipEventDestroy( stop ) );
#endif // <-- USE_GPU
long countHisto = 0;
for (int i=0; i<N_MAX_CHAR; ++i){
countHisto += histo[i];
}
free(buffer);
if ( countHisto == NCHARS ) {
printf( "Work done, time consummed: %.2f ms.\n", elapsedTime );
} else {
printf( "\nNo match of Histogram count(%lo) and the Size of data block(%i).\n",
countHisto, NCHARS );
throw std::runtime_error("No match of Histogram count and the Size of data block.\n");
}
return 0;
}
#ifdef USE_GPU
#ifdef USE_SHARED_MEM
__global__ void buff_to_histo( unsigned char* buffer, unsigned int* histo ) {
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ unsigned int sub_histo[NTHREADS];
////////// !!! IMPORTANT !! ///////////////////////////////////
sub_histo[threadIdx.x] = 0;
__syncthreads();
while ( tid < NCHARS ) {
unsigned char ascii = buffer[tid];
atomicAdd( &sub_histo[ascii], 1 );
tid += blockDim.x * gridDim.x;
}
__syncthreads(); // Wait for all sub_histo calculated
/* Cumulate sub_histos */
atomicAdd( &histo[threadIdx.x], sub_histo[threadIdx.x] );
}
#else
__global__ void buff_to_histo( unsigned char* buffer, unsigned int* histo ) {
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
while ( tid < NCHARS ) {
unsigned char ascii = buffer[tid];
atomicAdd( &histo[ascii], 1 );
tid += blockDim.x * gridDim.x;
}
}
#endif // <-- USE_SHARED_MEM
#endif // <-- USE_GPU
| 93fedad419a908f65ff69109426a2efd17067eb3.cu | #include <iostream>
#include <chrono>
#include <cuda.h>
#include <common/book.h>
#ifndef BLOCK_SIZE
#define BLOCK_SIZE (int)100
#endif
#define SIZE (int)( BLOCK_SIZE*1024*1024 )
static const int CHAR_SIZE = sizeof(unsigned char);
static const int NCHARS = SIZE / CHAR_SIZE;
#ifndef NTHREADS
#define NTHREADS 256
#endif
#ifndef NBLOCKS
#define NBLOCKS 1024
#endif
#if NCHARS < (NTHREADS * 1024)
#define NBLOCKS (int)( ( NCHARS + NTHREADS - 1 ) / NTHREADS )
#endif
#define N_MAX_CHAR 256 // chars
#if N_MAX_CHAR != NTHREADS
#define NTHREADS N_MAX_CHAR
#endif
#define CPU_NOW() (std::chrono::system_clock::now())
#ifdef USE_GPU
__global__ void buff_to_histo( unsigned char*, unsigned int* );
#endif
int main (void) {
unsigned char *buffer = (unsigned char*)big_random_block( SIZE );
unsigned int histo[N_MAX_CHAR]{0};
/* Timer */
float elapsedTime;
#ifndef USE_GPU
///////////////////////////////////////////
// //////////////// CPU //////////////// //
///////////////////////////////////////////
auto t1 = CPU_NOW();
for (int i=0; i<SIZE; ++i){
++histo[ buffer[i] ];
}
auto t2 = CPU_NOW();
elapsedTime = std::chrono::duration<float, std::milli> (t2 - t1).count();
#else
//////////////////////////////////////////
// //////////////// GPU /////////////// //
//////////////////////////////////////////
cudaEvent_t start, stop;
HANDLE_ERROR( cudaEventCreate( &start ) );
HANDLE_ERROR( cudaEventCreate( &stop ) );
HANDLE_ERROR( cudaEventRecord( start, 0 ) );
unsigned char* dev_buffer;
unsigned int* dev_histo;
HANDLE_ERROR( cudaMalloc( (void**)&dev_buffer, SIZE ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_histo,
N_MAX_CHAR * sizeof(int) ) );
HANDLE_ERROR( cudaMemcpy( dev_buffer, buffer, SIZE, cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemset( dev_histo, 0, N_MAX_CHAR * sizeof(int) ) );
dim3 dimGrid( NBLOCKS, 1, 1 );
dim3 dimBlocks( NTHREADS, 1, 1 );
buff_to_histo <<< dimGrid, dimBlocks >>> ( dev_buffer, dev_histo );
cudaDeviceSynchronize();
cudaMemcpy(histo, dev_histo,
N_MAX_CHAR * sizeof(int),
cudaMemcpyDeviceToHost);
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
HANDLE_ERROR( cudaEventElapsedTime ( &elapsedTime, start, stop ) );
HANDLE_ERROR( cudaFree( dev_buffer ) );
HANDLE_ERROR( cudaFree( dev_histo ) );
HANDLE_ERROR( cudaEventDestroy( start ) );
HANDLE_ERROR( cudaEventDestroy( stop ) );
#endif // <-- USE_GPU
long countHisto = 0;
for (int i=0; i<N_MAX_CHAR; ++i){
countHisto += histo[i];
}
free(buffer);
if ( countHisto == NCHARS ) {
printf( "Work done, time consummed: %.2f ms.\n", elapsedTime );
} else {
printf( "\nNo match of Histogram count(%lo) and the Size of data block(%i).\n",
countHisto, NCHARS );
throw std::runtime_error("No match of Histogram count and the Size of data block.\n");
}
return 0;
}
#ifdef USE_GPU
#ifdef USE_SHARED_MEM
__global__ void buff_to_histo( unsigned char* buffer, unsigned int* histo ) {
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ unsigned int sub_histo[NTHREADS];
////////// !!! IMPORTANT !! ///////////////////////////////////
sub_histo[threadIdx.x] = 0;
__syncthreads();
while ( tid < NCHARS ) {
unsigned char ascii = buffer[tid];
atomicAdd( &sub_histo[ascii], 1 );
tid += blockDim.x * gridDim.x;
}
__syncthreads(); // Wait for all sub_histo calculated
/* Cumulate sub_histos */
atomicAdd( &histo[threadIdx.x], sub_histo[threadIdx.x] );
}
#else
__global__ void buff_to_histo( unsigned char* buffer, unsigned int* histo ) {
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
while ( tid < NCHARS ) {
unsigned char ascii = buffer[tid];
atomicAdd( &histo[ascii], 1 );
tid += blockDim.x * gridDim.x;
}
}
#endif // <-- USE_SHARED_MEM
#endif // <-- USE_GPU
|
834ac0d78463fb955ea0bdcd10720440c8d37901.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file spatial_transformer.cu
* \brief
* \author Wei Wu
*/
#include "./spatial_transformer-inl.h"
#include <algorithm>
#if MXNET_USE_CUDNN == 1
#include "./cudnn_spatial_transformer-inl.h"
#endif // MXNET_USE_CUDNN
namespace mshadow {
template <typename DType>
__device__ bool between(DType value, int lowerBound, int upperBound) {
return (value >= lowerBound && value <= upperBound);
}
template<typename DType>
__global__ void
/*
* In order to not generate the code that uses too many
* registers (resulting in too many resources requested
* error) we need to tell the compiler that we will be
* launching this kernel with cuda::kMaxThreadsPerBlock
* threads per block. Setting __launch_bounds__ ensures
* that such configuration can always be launched.
*/
__launch_bounds__(cuda::kMaxThreadsPerBlock, 1)
BilinearSamplingForwardKernel(const int i_c, const int i_h,
const int i_w, const DType* data,
const DType* grid, const int o_n,
const int o_c, const int o_h,
const int o_w, DType* out) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < o_n * o_c * o_h * o_w;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) is the element in out
int w = index % o_w;
int h = (index / o_w) % o_h;
int c = (index / o_w / o_h) % o_c;
int n = index / o_w / o_h / o_c;
index_t out_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
index_t grid_index = n * o_h * o_w * 2 + h * o_w + w;
DType y_real = (*(grid + grid_index + o_h * o_w) + 1) * (i_h - 1) / 2;
DType x_real = (*(grid + grid_index) + 1) * (i_w - 1) / 2;
int top_left_y = static_cast<int>(floor(y_real));
int top_left_x = static_cast<int>(floor(x_real));
DType top_left_y_w = 1.0 - (y_real - top_left_y);
DType top_left_x_w = 1.0 - (x_real - top_left_x);
int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
DType top_left_v = 0;
DType top_right_v = 0;
DType bottom_left_v = 0;
DType bottom_right_v = 0;
if (between(top_left_x, 0, i_w - 1) && between(top_left_y, 0, i_h - 1))
top_left_v = *(data + data_index);
if (between(top_left_x + 1, 0, i_w - 1) && between(top_left_y, 0, i_h - 1))
top_right_v = *(data + data_index + 1);
if (between(top_left_x, 0, i_w - 1) && between(top_left_y + 1, 0, i_h - 1))
bottom_left_v = *(data + data_index + i_w);
if (between(top_left_x + 1, 0, i_w - 1) && between(top_left_y + 1, 0, i_h - 1))
bottom_right_v = *(data + data_index + i_w + 1);
*(out + out_index) = top_left_v * top_left_y_w * top_left_x_w +
top_right_v * top_left_y_w * (1.0 - top_left_x_w) +
bottom_left_v * (1.0 - top_left_y_w) * top_left_x_w +
bottom_right_v * (1.0 - top_left_y_w) * (1.0 - top_left_x_w);
}
}
/*
* In order to not generate the code that uses too many
* registers (resulting in too many resources requested
* error) we need to tell the compiler that we will be
* launching this kernel with cuda::kMaxThreadsPerBlock
* threads per block. Setting __launch_bounds__ ensures
* that such configuration can always be launched.
*/
template <typename DType>
__global__ void __launch_bounds__(cuda::kMaxThreadsPerBlock, 1)
BilinearSamplingBackwardKernel(const int i_c,
const int i_h,
const int i_w,
const DType* grad,
const DType* data,
const int o_n,
const int o_c,
const int o_h,
const int o_w,
DType* g_input,
DType* grid_src) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < o_n * o_h * o_w;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) is the element in grad
int w = index % o_w;
int h = (index / o_w) % o_h;
int n = index / o_w / o_h;
DType top_left_y_gw = 0.0;
DType top_left_x_gw = 0.0;
index_t grid_src_index = n * o_h * o_w * 2 + h * o_w + w;
DType y_real = (*(grid_src + grid_src_index + o_h * o_w) + 1) * (i_h - 1) / 2;
DType x_real = (*(grid_src + grid_src_index) + 1) * (i_w - 1) / 2;
int top_left_y = static_cast<int>(floor(y_real));
int top_left_x = static_cast<int>(floor(x_real));
DType top_left_y_w = 1.0 - (y_real - top_left_y);
DType top_left_x_w = 1.0 - (x_real - top_left_x);
for (index_t c = 0; c < o_c; ++c) {
index_t grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
// calc 4 vertex value in input data
DType top_left_v = 0;
DType top_right_v = 0;
DType bottom_left_v = 0;
DType bottom_right_v = 0;
// calc input grad
if (between(top_left_x, 0, i_w - 1) && between(top_left_y, 0, i_h - 1)) {
atomicAdd((g_input + data_index), *(grad + grad_index) * top_left_y_w * top_left_x_w);
top_left_v = *(data + data_index);
}
if (between(top_left_x + 1, 0, i_w - 1) && between(top_left_y, 0, i_h - 1)) {
atomicAdd((g_input + data_index + 1),
*(grad + grad_index) * top_left_y_w * (1.0 - top_left_x_w));
top_right_v = *(data + data_index + 1);
}
if (between(top_left_x, 0, i_w - 1) && between(top_left_y + 1, 0, i_h - 1)) {
atomicAdd((g_input + data_index + i_w),
*(grad + grad_index) * (1.0 - top_left_y_w) * top_left_x_w);
bottom_left_v = *(data + data_index + i_w);
}
if (between(top_left_x + 1, 0, i_w - 1) && between(top_left_y + 1, 0, i_h - 1)) {
atomicAdd((g_input + data_index + i_w + 1),
*(grad + grad_index) * (1.0 - top_left_y_w) * (1.0 - top_left_x_w));
bottom_right_v = *(data + data_index + i_w + 1);
}
// calc weight grad of top_left_w, then multiple -1 is the grad of grid_src
top_left_y_gw -= *(grad + grad_index) *
(top_right_v - bottom_right_v +
(top_left_v - top_right_v - bottom_left_v + bottom_right_v) * top_left_x_w);
top_left_x_gw -= *(grad + grad_index) *
(bottom_left_v - bottom_right_v +
(top_left_v - top_right_v - bottom_left_v + bottom_right_v) * top_left_y_w);
}
// calc grid_src grad
*(grid_src + grid_src_index + o_h * o_w) = top_left_y_gw * (i_h - 1) / 2;
*(grid_src + grid_src_index) = top_left_x_gw * (i_w - 1) / 2;
}
}
template <typename DType>
inline void BilinearSamplingForward(const Tensor<gpu, 4, DType>& output,
const Tensor<gpu, 4, DType>& input,
const Tensor<gpu, 3, DType> grid_src) {
DType* out = output.dptr_;
const DType* data = input.dptr_;
const DType* grid = grid_src.dptr_;
int o_n = output.size(0), o_c = output.size(1), o_h = output.size(2), o_w = output.size(3);
int i_c = input.size(1), i_h = input.size(2), i_w = input.size(3);
using namespace cuda;
const int max_block = (output.shape_.Size() + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 num_blocks(kMaxGridDim, (max_block + kMaxGridDim - 1) / kMaxGridDim);
dim3 threads_per_block(kMaxThreadsPerBlock);
CheckLaunchParam(num_blocks, threads_per_block, "spatial transformer forward");
hipStream_t stream = Stream<gpu>::GetStream(output.stream_);
hipLaunchKernelGGL(( BilinearSamplingForwardKernel<DType>), dim3(num_blocks), dim3(threads_per_block), 0, stream,
i_c, i_h, i_w, data, grid, o_n, o_c, o_h, o_w, out);
MSHADOW_CUDA_POST_KERNEL_CHECK(BilinearSamplingForwardKernel);
}
template <typename DType>
inline void BilinearSamplingBackward(const Tensor<gpu, 4, DType>& input_grad,
const Tensor<gpu, 3, DType>& grid_src_data,
const Tensor<gpu, 4, DType>& output_grad,
const Tensor<gpu, 4, DType>& input_data) {
DType* g_input = input_grad.dptr_;
DType* grid_src = grid_src_data.dptr_;
const DType* grad = output_grad.dptr_;
const DType* data = input_data.dptr_;
int o_n = output_grad.size(0), o_c = output_grad.size(1), o_h = output_grad.size(2),
o_w = output_grad.size(3);
int i_c = input_data.size(1), i_h = input_data.size(2), i_w = input_data.size(3);
using namespace cuda;
const int max_block =
(output_grad.shape_.Size() / o_c + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 num_blocks(kMaxGridDim, (max_block + kMaxGridDim - 1) / kMaxGridDim);
dim3 threads_per_block(kMaxThreadsPerBlock);
CheckLaunchParam(num_blocks, threads_per_block, "spatial transformer backward");
hipStream_t stream = Stream<gpu>::GetStream(input_grad.stream_);
hipLaunchKernelGGL(( BilinearSamplingBackwardKernel<DType>), dim3(num_blocks), dim3(threads_per_block), 0, stream,
i_c, i_h, i_w, grad, data, o_n, o_c, o_h, o_w, g_input, grid_src);
MSHADOW_CUDA_POST_KERNEL_CHECK(BilinearSamplingBackwardKernel);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template <>
Operator* CreateOp<gpu>(SpatialTransformerParam param, int dtype) {
Operator* op = nullptr;
#if MXNET_USE_CUDNN == 1
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
if (param.cudnn_off.has_value() && param.cudnn_off.value()) {
op = new SpatialTransformerOp<gpu, DType>(param);
} else {
op = new CuDNNSpatialTransformerOp<DType>(param);
}
})
#else
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new SpatialTransformerOp<gpu, DType>(param); })
#endif // MXNET_USE_CUDNN
return op;
}
} // namespace op
} // namespace mxnet
| 834ac0d78463fb955ea0bdcd10720440c8d37901.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file spatial_transformer.cu
* \brief
* \author Wei Wu
*/
#include "./spatial_transformer-inl.h"
#include <algorithm>
#if MXNET_USE_CUDNN == 1
#include "./cudnn_spatial_transformer-inl.h"
#endif // MXNET_USE_CUDNN
namespace mshadow {
template <typename DType>
__device__ bool between(DType value, int lowerBound, int upperBound) {
return (value >= lowerBound && value <= upperBound);
}
template<typename DType>
__global__ void
/*
* In order to not generate the code that uses too many
* registers (resulting in too many resources requested
* error) we need to tell the compiler that we will be
* launching this kernel with cuda::kMaxThreadsPerBlock
* threads per block. Setting __launch_bounds__ ensures
* that such configuration can always be launched.
*/
__launch_bounds__(cuda::kMaxThreadsPerBlock, 1)
BilinearSamplingForwardKernel(const int i_c, const int i_h,
const int i_w, const DType* data,
const DType* grid, const int o_n,
const int o_c, const int o_h,
const int o_w, DType* out) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < o_n * o_c * o_h * o_w;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) is the element in out
int w = index % o_w;
int h = (index / o_w) % o_h;
int c = (index / o_w / o_h) % o_c;
int n = index / o_w / o_h / o_c;
index_t out_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
index_t grid_index = n * o_h * o_w * 2 + h * o_w + w;
DType y_real = (*(grid + grid_index + o_h * o_w) + 1) * (i_h - 1) / 2;
DType x_real = (*(grid + grid_index) + 1) * (i_w - 1) / 2;
int top_left_y = static_cast<int>(floor(y_real));
int top_left_x = static_cast<int>(floor(x_real));
DType top_left_y_w = 1.0 - (y_real - top_left_y);
DType top_left_x_w = 1.0 - (x_real - top_left_x);
int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
DType top_left_v = 0;
DType top_right_v = 0;
DType bottom_left_v = 0;
DType bottom_right_v = 0;
if (between(top_left_x, 0, i_w - 1) && between(top_left_y, 0, i_h - 1))
top_left_v = *(data + data_index);
if (between(top_left_x + 1, 0, i_w - 1) && between(top_left_y, 0, i_h - 1))
top_right_v = *(data + data_index + 1);
if (between(top_left_x, 0, i_w - 1) && between(top_left_y + 1, 0, i_h - 1))
bottom_left_v = *(data + data_index + i_w);
if (between(top_left_x + 1, 0, i_w - 1) && between(top_left_y + 1, 0, i_h - 1))
bottom_right_v = *(data + data_index + i_w + 1);
*(out + out_index) = top_left_v * top_left_y_w * top_left_x_w +
top_right_v * top_left_y_w * (1.0 - top_left_x_w) +
bottom_left_v * (1.0 - top_left_y_w) * top_left_x_w +
bottom_right_v * (1.0 - top_left_y_w) * (1.0 - top_left_x_w);
}
}
/*
* In order to not generate the code that uses too many
* registers (resulting in too many resources requested
* error) we need to tell the compiler that we will be
* launching this kernel with cuda::kMaxThreadsPerBlock
* threads per block. Setting __launch_bounds__ ensures
* that such configuration can always be launched.
*/
template <typename DType>
__global__ void __launch_bounds__(cuda::kMaxThreadsPerBlock, 1)
BilinearSamplingBackwardKernel(const int i_c,
const int i_h,
const int i_w,
const DType* grad,
const DType* data,
const int o_n,
const int o_c,
const int o_h,
const int o_w,
DType* g_input,
DType* grid_src) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < o_n * o_h * o_w;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) is the element in grad
int w = index % o_w;
int h = (index / o_w) % o_h;
int n = index / o_w / o_h;
DType top_left_y_gw = 0.0;
DType top_left_x_gw = 0.0;
index_t grid_src_index = n * o_h * o_w * 2 + h * o_w + w;
DType y_real = (*(grid_src + grid_src_index + o_h * o_w) + 1) * (i_h - 1) / 2;
DType x_real = (*(grid_src + grid_src_index) + 1) * (i_w - 1) / 2;
int top_left_y = static_cast<int>(floor(y_real));
int top_left_x = static_cast<int>(floor(x_real));
DType top_left_y_w = 1.0 - (y_real - top_left_y);
DType top_left_x_w = 1.0 - (x_real - top_left_x);
for (index_t c = 0; c < o_c; ++c) {
index_t grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
// calc 4 vertex value in input data
DType top_left_v = 0;
DType top_right_v = 0;
DType bottom_left_v = 0;
DType bottom_right_v = 0;
// calc input grad
if (between(top_left_x, 0, i_w - 1) && between(top_left_y, 0, i_h - 1)) {
atomicAdd((g_input + data_index), *(grad + grad_index) * top_left_y_w * top_left_x_w);
top_left_v = *(data + data_index);
}
if (between(top_left_x + 1, 0, i_w - 1) && between(top_left_y, 0, i_h - 1)) {
atomicAdd((g_input + data_index + 1),
*(grad + grad_index) * top_left_y_w * (1.0 - top_left_x_w));
top_right_v = *(data + data_index + 1);
}
if (between(top_left_x, 0, i_w - 1) && between(top_left_y + 1, 0, i_h - 1)) {
atomicAdd((g_input + data_index + i_w),
*(grad + grad_index) * (1.0 - top_left_y_w) * top_left_x_w);
bottom_left_v = *(data + data_index + i_w);
}
if (between(top_left_x + 1, 0, i_w - 1) && between(top_left_y + 1, 0, i_h - 1)) {
atomicAdd((g_input + data_index + i_w + 1),
*(grad + grad_index) * (1.0 - top_left_y_w) * (1.0 - top_left_x_w));
bottom_right_v = *(data + data_index + i_w + 1);
}
// calc weight grad of top_left_w, then multiple -1 is the grad of grid_src
top_left_y_gw -= *(grad + grad_index) *
(top_right_v - bottom_right_v +
(top_left_v - top_right_v - bottom_left_v + bottom_right_v) * top_left_x_w);
top_left_x_gw -= *(grad + grad_index) *
(bottom_left_v - bottom_right_v +
(top_left_v - top_right_v - bottom_left_v + bottom_right_v) * top_left_y_w);
}
// calc grid_src grad
*(grid_src + grid_src_index + o_h * o_w) = top_left_y_gw * (i_h - 1) / 2;
*(grid_src + grid_src_index) = top_left_x_gw * (i_w - 1) / 2;
}
}
template <typename DType>
inline void BilinearSamplingForward(const Tensor<gpu, 4, DType>& output,
const Tensor<gpu, 4, DType>& input,
const Tensor<gpu, 3, DType> grid_src) {
DType* out = output.dptr_;
const DType* data = input.dptr_;
const DType* grid = grid_src.dptr_;
int o_n = output.size(0), o_c = output.size(1), o_h = output.size(2), o_w = output.size(3);
int i_c = input.size(1), i_h = input.size(2), i_w = input.size(3);
using namespace cuda;
const int max_block = (output.shape_.Size() + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 num_blocks(kMaxGridDim, (max_block + kMaxGridDim - 1) / kMaxGridDim);
dim3 threads_per_block(kMaxThreadsPerBlock);
CheckLaunchParam(num_blocks, threads_per_block, "spatial transformer forward");
cudaStream_t stream = Stream<gpu>::GetStream(output.stream_);
BilinearSamplingForwardKernel<DType><<<num_blocks, threads_per_block, 0, stream>>>(
i_c, i_h, i_w, data, grid, o_n, o_c, o_h, o_w, out);
MSHADOW_CUDA_POST_KERNEL_CHECK(BilinearSamplingForwardKernel);
}
template <typename DType>
inline void BilinearSamplingBackward(const Tensor<gpu, 4, DType>& input_grad,
const Tensor<gpu, 3, DType>& grid_src_data,
const Tensor<gpu, 4, DType>& output_grad,
const Tensor<gpu, 4, DType>& input_data) {
DType* g_input = input_grad.dptr_;
DType* grid_src = grid_src_data.dptr_;
const DType* grad = output_grad.dptr_;
const DType* data = input_data.dptr_;
int o_n = output_grad.size(0), o_c = output_grad.size(1), o_h = output_grad.size(2),
o_w = output_grad.size(3);
int i_c = input_data.size(1), i_h = input_data.size(2), i_w = input_data.size(3);
using namespace cuda;
const int max_block =
(output_grad.shape_.Size() / o_c + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 num_blocks(kMaxGridDim, (max_block + kMaxGridDim - 1) / kMaxGridDim);
dim3 threads_per_block(kMaxThreadsPerBlock);
CheckLaunchParam(num_blocks, threads_per_block, "spatial transformer backward");
cudaStream_t stream = Stream<gpu>::GetStream(input_grad.stream_);
BilinearSamplingBackwardKernel<DType><<<num_blocks, threads_per_block, 0, stream>>>(
i_c, i_h, i_w, grad, data, o_n, o_c, o_h, o_w, g_input, grid_src);
MSHADOW_CUDA_POST_KERNEL_CHECK(BilinearSamplingBackwardKernel);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template <>
Operator* CreateOp<gpu>(SpatialTransformerParam param, int dtype) {
Operator* op = nullptr;
#if MXNET_USE_CUDNN == 1
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
if (param.cudnn_off.has_value() && param.cudnn_off.value()) {
op = new SpatialTransformerOp<gpu, DType>(param);
} else {
op = new CuDNNSpatialTransformerOp<DType>(param);
}
})
#else
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new SpatialTransformerOp<gpu, DType>(param); })
#endif // MXNET_USE_CUDNN
return op;
}
} // namespace op
} // namespace mxnet
|
3752fd0ceb11bbbdd77ebc9833ca4f3f31c9d177.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "cudaOverlay.h"
// cudaOverlay
template<typename T>
__global__ void gpuOverlay( T* input, int inputWidth, int inputHeight, T* output, int outputWidth, int outputHeight, int x0, int y0 )
{
const int input_x = blockIdx.x * blockDim.x + threadIdx.x;
const int input_y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = input_x + x0;
const int y = input_y + y0;
if( input_x >= inputWidth || input_y >= inputHeight || x >= outputWidth || y >= outputHeight )
return;
output[y * outputWidth + x] = input[input_y * inputWidth + input_x];
}
hipError_t cudaOverlay( void* input, size_t inputWidth, size_t inputHeight,
void* output, size_t outputWidth, size_t outputHeight,
imageFormat format, int x, int y )
{
if( !input || !output || inputWidth == 0 || inputHeight == 0 || outputWidth == 0 || outputHeight == 0 )
return hipErrorInvalidValue;
if( x < 0 || y < 0 || x >= outputWidth || y >= outputHeight )
return hipErrorInvalidValue;
if( !imageFormatIsRGB(format) && !imageFormatIsBGR(format) && !imageFormatIsGray(format) )
return hipErrorInvalidValue;
int overlayWidth = inputWidth;
int overlayHeight = inputHeight;
if( x + overlayWidth >= outputWidth )
overlayWidth = outputWidth - x;
if( y + overlayHeight >= outputHeight )
overlayHeight = outputHeight - y;
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(overlayWidth,blockDim.x), iDivUp(overlayHeight,blockDim.y));
#define launch_overlay(type) \
hipLaunchKernelGGL(( gpuOverlay<type>), dim3(gridDim), dim3(blockDim), 0, 0, (type*)input, inputWidth, inputHeight, (type*)output, outputWidth, outputHeight, x, y)
if( format == IMAGE_RGB8 || format == IMAGE_BGR8 )
launch_overlay(uchar3);
else if( format == IMAGE_RGBA8 || format == IMAGE_BGRA8 )
launch_overlay(uchar4);
else if( format == IMAGE_RGB32F || format == IMAGE_BGR32F )
launch_overlay(float3);
else if( format == IMAGE_RGBA32F || format == IMAGE_BGRA32F )
launch_overlay(float4);
else if( format == IMAGE_GRAY8 )
launch_overlay(uint8_t);
else if( format == IMAGE_GRAY32F )
launch_overlay(float);
return hipGetLastError();
}
//----------------------------------------------------------------------------
template<typename T>
__global__ void gpuRectFill( T* input, T* output, int width, int height,
float4* rects, int numRects, float4 color )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= width || y >= height )
return;
T px = input[ y * width + x ];
const float fx = x;
const float fy = y;
const float alpha = color.w / 255.0f;
const float ialph = 1.0f - alpha;
for( int nr=0; nr < numRects; nr++ )
{
const float4 r = rects[nr];
if( fy >= r.y && fy <= r.w && fx >= r.x && fx <= r.z )
{
px.x = alpha * color.x + ialph * px.x;
px.y = alpha * color.y + ialph * px.y;
px.z = alpha * color.z + ialph * px.z;
}
}
output[y * width + x] = px;
}
template<typename T>
__global__ void gpuRectFillBox( T* input, T* output, int imgWidth, int imgHeight, int x0, int y0, int boxWidth, int boxHeight, const float4 color )
{
const int box_x = blockIdx.x * blockDim.x + threadIdx.x;
const int box_y = blockIdx.y * blockDim.y + threadIdx.y;
if( box_x >= boxWidth || box_y >= boxHeight )
return;
const int x = box_x + x0;
const int y = box_y + y0;
if( x >= imgWidth || y >= imgHeight )
return;
T px = input[ y * imgWidth + x ];
const float alpha = color.w / 255.0f;
const float ialph = 1.0f - alpha;
px.x = alpha * color.x + ialph * px.x;
px.y = alpha * color.y + ialph * px.y;
px.z = alpha * color.z + ialph * px.z;
output[y * imgWidth + x] = px;
}
template<typename T>
hipError_t launchRectFill( T* input, T* output, size_t width, size_t height, float4* rects, int numRects, const float4& color )
{
// if input and output are the same image, then we can use the faster method
// which draws 1 box per kernel, but doesn't copy pixels that aren't inside boxes
if( input == output )
{
for( int n=0; n < numRects; n++ )
{
const int boxWidth = (int)(rects[n].z - rects[n].x);
const int boxHeight = (int)(rects[n].w - rects[n].y);
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(boxWidth,blockDim.x), iDivUp(boxHeight,blockDim.y));
hipLaunchKernelGGL(( gpuRectFillBox<T>), dim3(gridDim), dim3(blockDim), 0, 0, input, output, width, height, (int)rects[n].x, (int)rects[n].y, boxWidth, boxHeight, color);
}
}
else
{
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y));
hipLaunchKernelGGL(( gpuRectFill<T>), dim3(gridDim), dim3(blockDim), 0, 0, input, output, width, height, rects, numRects, color);
}
return hipGetLastError();
}
// cudaRectFill
hipError_t cudaRectFill( void* input, void* output, size_t width, size_t height, imageFormat format, float4* rects, int numRects, const float4& color )
{
if( !input || !output || width == 0 || height == 0 || !rects || numRects == 0 )
return hipErrorInvalidValue;
if( format == IMAGE_RGB8 )
return launchRectFill<uchar3>((uchar3*)input, (uchar3*)output, width, height, rects, numRects, color);
else if( format == IMAGE_RGBA8 )
return launchRectFill<uchar4>((uchar4*)input, (uchar4*)output, width, height, rects, numRects, color);
else if( format == IMAGE_RGB32F )
return launchRectFill<float3>((float3*)input, (float3*)output, width, height, rects, numRects, color);
else if( format == IMAGE_RGBA32F )
return launchRectFill<float4>((float4*)input, (float4*)output, width, height, rects, numRects, color);
else
return hipErrorInvalidValue;
}
| 3752fd0ceb11bbbdd77ebc9833ca4f3f31c9d177.cu | /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "cudaOverlay.h"
// cudaOverlay
template<typename T>
__global__ void gpuOverlay( T* input, int inputWidth, int inputHeight, T* output, int outputWidth, int outputHeight, int x0, int y0 )
{
const int input_x = blockIdx.x * blockDim.x + threadIdx.x;
const int input_y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = input_x + x0;
const int y = input_y + y0;
if( input_x >= inputWidth || input_y >= inputHeight || x >= outputWidth || y >= outputHeight )
return;
output[y * outputWidth + x] = input[input_y * inputWidth + input_x];
}
cudaError_t cudaOverlay( void* input, size_t inputWidth, size_t inputHeight,
void* output, size_t outputWidth, size_t outputHeight,
imageFormat format, int x, int y )
{
if( !input || !output || inputWidth == 0 || inputHeight == 0 || outputWidth == 0 || outputHeight == 0 )
return cudaErrorInvalidValue;
if( x < 0 || y < 0 || x >= outputWidth || y >= outputHeight )
return cudaErrorInvalidValue;
if( !imageFormatIsRGB(format) && !imageFormatIsBGR(format) && !imageFormatIsGray(format) )
return cudaErrorInvalidValue;
int overlayWidth = inputWidth;
int overlayHeight = inputHeight;
if( x + overlayWidth >= outputWidth )
overlayWidth = outputWidth - x;
if( y + overlayHeight >= outputHeight )
overlayHeight = outputHeight - y;
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(overlayWidth,blockDim.x), iDivUp(overlayHeight,blockDim.y));
#define launch_overlay(type) \
gpuOverlay<type><<<gridDim, blockDim>>>((type*)input, inputWidth, inputHeight, (type*)output, outputWidth, outputHeight, x, y)
if( format == IMAGE_RGB8 || format == IMAGE_BGR8 )
launch_overlay(uchar3);
else if( format == IMAGE_RGBA8 || format == IMAGE_BGRA8 )
launch_overlay(uchar4);
else if( format == IMAGE_RGB32F || format == IMAGE_BGR32F )
launch_overlay(float3);
else if( format == IMAGE_RGBA32F || format == IMAGE_BGRA32F )
launch_overlay(float4);
else if( format == IMAGE_GRAY8 )
launch_overlay(uint8_t);
else if( format == IMAGE_GRAY32F )
launch_overlay(float);
return cudaGetLastError();
}
//----------------------------------------------------------------------------
template<typename T>
__global__ void gpuRectFill( T* input, T* output, int width, int height,
float4* rects, int numRects, float4 color )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= width || y >= height )
return;
T px = input[ y * width + x ];
const float fx = x;
const float fy = y;
const float alpha = color.w / 255.0f;
const float ialph = 1.0f - alpha;
for( int nr=0; nr < numRects; nr++ )
{
const float4 r = rects[nr];
if( fy >= r.y && fy <= r.w && fx >= r.x && fx <= r.z )
{
px.x = alpha * color.x + ialph * px.x;
px.y = alpha * color.y + ialph * px.y;
px.z = alpha * color.z + ialph * px.z;
}
}
output[y * width + x] = px;
}
template<typename T>
__global__ void gpuRectFillBox( T* input, T* output, int imgWidth, int imgHeight, int x0, int y0, int boxWidth, int boxHeight, const float4 color )
{
const int box_x = blockIdx.x * blockDim.x + threadIdx.x;
const int box_y = blockIdx.y * blockDim.y + threadIdx.y;
if( box_x >= boxWidth || box_y >= boxHeight )
return;
const int x = box_x + x0;
const int y = box_y + y0;
if( x >= imgWidth || y >= imgHeight )
return;
T px = input[ y * imgWidth + x ];
const float alpha = color.w / 255.0f;
const float ialph = 1.0f - alpha;
px.x = alpha * color.x + ialph * px.x;
px.y = alpha * color.y + ialph * px.y;
px.z = alpha * color.z + ialph * px.z;
output[y * imgWidth + x] = px;
}
template<typename T>
cudaError_t launchRectFill( T* input, T* output, size_t width, size_t height, float4* rects, int numRects, const float4& color )
{
// if input and output are the same image, then we can use the faster method
// which draws 1 box per kernel, but doesn't copy pixels that aren't inside boxes
if( input == output )
{
for( int n=0; n < numRects; n++ )
{
const int boxWidth = (int)(rects[n].z - rects[n].x);
const int boxHeight = (int)(rects[n].w - rects[n].y);
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(boxWidth,blockDim.x), iDivUp(boxHeight,blockDim.y));
gpuRectFillBox<T><<<gridDim, blockDim>>>(input, output, width, height, (int)rects[n].x, (int)rects[n].y, boxWidth, boxHeight, color);
}
}
else
{
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y));
gpuRectFill<T><<<gridDim, blockDim>>>(input, output, width, height, rects, numRects, color);
}
return cudaGetLastError();
}
// cudaRectFill
cudaError_t cudaRectFill( void* input, void* output, size_t width, size_t height, imageFormat format, float4* rects, int numRects, const float4& color )
{
if( !input || !output || width == 0 || height == 0 || !rects || numRects == 0 )
return cudaErrorInvalidValue;
if( format == IMAGE_RGB8 )
return launchRectFill<uchar3>((uchar3*)input, (uchar3*)output, width, height, rects, numRects, color);
else if( format == IMAGE_RGBA8 )
return launchRectFill<uchar4>((uchar4*)input, (uchar4*)output, width, height, rects, numRects, color);
else if( format == IMAGE_RGB32F )
return launchRectFill<float3>((float3*)input, (float3*)output, width, height, rects, numRects, color);
else if( format == IMAGE_RGBA32F )
return launchRectFill<float4>((float4*)input, (float4*)output, width, height, rects, numRects, color);
else
return cudaErrorInvalidValue;
}
|
b27b220873bed287ca1d730b0fdfa9948631abd9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// The code is based on
// https://github.com/csuhan/s2anet/blob/master/mmdet/ops/box_iou_rotated
#include "paddle/extension.h"
#include "rbox_iou_op.h"
// 2D block with 32 * 16 = 512 threads per block
const int BLOCK_DIM_X = 32;
const int BLOCK_DIM_Y = 16;
/**
Computes ceil(a / b)
*/
static inline int CeilDiv(const int a, const int b) { return (a + b - 1) / b; }
template <typename T>
__global__ void rbox_iou_cuda_kernel(const int rbox1_num, const int rbox2_num,
const T *rbox1_data_ptr,
const T *rbox2_data_ptr,
T *output_data_ptr) {
// get row_start and col_start
const int rbox1_block_idx = blockIdx.x * blockDim.x;
const int rbox2_block_idx = blockIdx.y * blockDim.y;
const int rbox1_thread_num = min(rbox1_num - rbox1_block_idx, blockDim.x);
const int rbox2_thread_num = min(rbox2_num - rbox2_block_idx, blockDim.y);
__shared__ T block_boxes1[BLOCK_DIM_X * 5];
__shared__ T block_boxes2[BLOCK_DIM_Y * 5];
// It's safe to copy using threadIdx.x since BLOCK_DIM_X >= BLOCK_DIM_Y
if (threadIdx.x < rbox1_thread_num && threadIdx.y == 0) {
block_boxes1[threadIdx.x * 5 + 0] =
rbox1_data_ptr[(rbox1_block_idx + threadIdx.x) * 5 + 0];
block_boxes1[threadIdx.x * 5 + 1] =
rbox1_data_ptr[(rbox1_block_idx + threadIdx.x) * 5 + 1];
block_boxes1[threadIdx.x * 5 + 2] =
rbox1_data_ptr[(rbox1_block_idx + threadIdx.x) * 5 + 2];
block_boxes1[threadIdx.x * 5 + 3] =
rbox1_data_ptr[(rbox1_block_idx + threadIdx.x) * 5 + 3];
block_boxes1[threadIdx.x * 5 + 4] =
rbox1_data_ptr[(rbox1_block_idx + threadIdx.x) * 5 + 4];
}
// threadIdx.x < BLOCK_DIM_Y=rbox2_thread_num, just use same condition as
// above: threadIdx.y == 0
if (threadIdx.x < rbox2_thread_num && threadIdx.y == 0) {
block_boxes2[threadIdx.x * 5 + 0] =
rbox2_data_ptr[(rbox2_block_idx + threadIdx.x) * 5 + 0];
block_boxes2[threadIdx.x * 5 + 1] =
rbox2_data_ptr[(rbox2_block_idx + threadIdx.x) * 5 + 1];
block_boxes2[threadIdx.x * 5 + 2] =
rbox2_data_ptr[(rbox2_block_idx + threadIdx.x) * 5 + 2];
block_boxes2[threadIdx.x * 5 + 3] =
rbox2_data_ptr[(rbox2_block_idx + threadIdx.x) * 5 + 3];
block_boxes2[threadIdx.x * 5 + 4] =
rbox2_data_ptr[(rbox2_block_idx + threadIdx.x) * 5 + 4];
}
// sync
__syncthreads();
if (threadIdx.x < rbox1_thread_num && threadIdx.y < rbox2_thread_num) {
int offset = (rbox1_block_idx + threadIdx.x) * rbox2_num + rbox2_block_idx +
threadIdx.y;
output_data_ptr[offset] = rbox_iou_single<T>(
block_boxes1 + threadIdx.x * 5, block_boxes2 + threadIdx.y * 5);
}
}
#define CHECK_INPUT_GPU(x) \
PD_CHECK(x.place() == paddle::PlaceType::kGPU, #x " must be a GPU Tensor.")
std::vector<paddle::Tensor> RboxIouCUDAForward(const paddle::Tensor &rbox1,
const paddle::Tensor &rbox2) {
CHECK_INPUT_GPU(rbox1);
CHECK_INPUT_GPU(rbox2);
auto rbox1_num = rbox1.shape()[0];
auto rbox2_num = rbox2.shape()[0];
auto output = paddle::Tensor(paddle::PlaceType::kGPU, {rbox1_num, rbox2_num});
const int blocks_x = CeilDiv(rbox1_num, BLOCK_DIM_X);
const int blocks_y = CeilDiv(rbox2_num, BLOCK_DIM_Y);
dim3 blocks(blocks_x, blocks_y);
dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y);
PD_DISPATCH_FLOATING_TYPES(
rbox1.type(), "rbox_iou_cuda_kernel", ([&] {
hipLaunchKernelGGL(( rbox_iou_cuda_kernel<data_t>), dim3(blocks), dim3(threads), 0, rbox1.stream(),
rbox1_num, rbox2_num, rbox1.data<data_t>(), rbox2.data<data_t>(),
output.mutable_data<data_t>());
}));
return {output};
}
| b27b220873bed287ca1d730b0fdfa9948631abd9.cu | // Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// The code is based on
// https://github.com/csuhan/s2anet/blob/master/mmdet/ops/box_iou_rotated
#include "paddle/extension.h"
#include "rbox_iou_op.h"
// 2D block with 32 * 16 = 512 threads per block
const int BLOCK_DIM_X = 32;
const int BLOCK_DIM_Y = 16;
/**
Computes ceil(a / b)
*/
static inline int CeilDiv(const int a, const int b) { return (a + b - 1) / b; }
template <typename T>
__global__ void rbox_iou_cuda_kernel(const int rbox1_num, const int rbox2_num,
const T *rbox1_data_ptr,
const T *rbox2_data_ptr,
T *output_data_ptr) {
// get row_start and col_start
const int rbox1_block_idx = blockIdx.x * blockDim.x;
const int rbox2_block_idx = blockIdx.y * blockDim.y;
const int rbox1_thread_num = min(rbox1_num - rbox1_block_idx, blockDim.x);
const int rbox2_thread_num = min(rbox2_num - rbox2_block_idx, blockDim.y);
__shared__ T block_boxes1[BLOCK_DIM_X * 5];
__shared__ T block_boxes2[BLOCK_DIM_Y * 5];
// It's safe to copy using threadIdx.x since BLOCK_DIM_X >= BLOCK_DIM_Y
if (threadIdx.x < rbox1_thread_num && threadIdx.y == 0) {
block_boxes1[threadIdx.x * 5 + 0] =
rbox1_data_ptr[(rbox1_block_idx + threadIdx.x) * 5 + 0];
block_boxes1[threadIdx.x * 5 + 1] =
rbox1_data_ptr[(rbox1_block_idx + threadIdx.x) * 5 + 1];
block_boxes1[threadIdx.x * 5 + 2] =
rbox1_data_ptr[(rbox1_block_idx + threadIdx.x) * 5 + 2];
block_boxes1[threadIdx.x * 5 + 3] =
rbox1_data_ptr[(rbox1_block_idx + threadIdx.x) * 5 + 3];
block_boxes1[threadIdx.x * 5 + 4] =
rbox1_data_ptr[(rbox1_block_idx + threadIdx.x) * 5 + 4];
}
// threadIdx.x < BLOCK_DIM_Y=rbox2_thread_num, just use same condition as
// above: threadIdx.y == 0
if (threadIdx.x < rbox2_thread_num && threadIdx.y == 0) {
block_boxes2[threadIdx.x * 5 + 0] =
rbox2_data_ptr[(rbox2_block_idx + threadIdx.x) * 5 + 0];
block_boxes2[threadIdx.x * 5 + 1] =
rbox2_data_ptr[(rbox2_block_idx + threadIdx.x) * 5 + 1];
block_boxes2[threadIdx.x * 5 + 2] =
rbox2_data_ptr[(rbox2_block_idx + threadIdx.x) * 5 + 2];
block_boxes2[threadIdx.x * 5 + 3] =
rbox2_data_ptr[(rbox2_block_idx + threadIdx.x) * 5 + 3];
block_boxes2[threadIdx.x * 5 + 4] =
rbox2_data_ptr[(rbox2_block_idx + threadIdx.x) * 5 + 4];
}
// sync
__syncthreads();
if (threadIdx.x < rbox1_thread_num && threadIdx.y < rbox2_thread_num) {
int offset = (rbox1_block_idx + threadIdx.x) * rbox2_num + rbox2_block_idx +
threadIdx.y;
output_data_ptr[offset] = rbox_iou_single<T>(
block_boxes1 + threadIdx.x * 5, block_boxes2 + threadIdx.y * 5);
}
}
#define CHECK_INPUT_GPU(x) \
PD_CHECK(x.place() == paddle::PlaceType::kGPU, #x " must be a GPU Tensor.")
std::vector<paddle::Tensor> RboxIouCUDAForward(const paddle::Tensor &rbox1,
const paddle::Tensor &rbox2) {
CHECK_INPUT_GPU(rbox1);
CHECK_INPUT_GPU(rbox2);
auto rbox1_num = rbox1.shape()[0];
auto rbox2_num = rbox2.shape()[0];
auto output = paddle::Tensor(paddle::PlaceType::kGPU, {rbox1_num, rbox2_num});
const int blocks_x = CeilDiv(rbox1_num, BLOCK_DIM_X);
const int blocks_y = CeilDiv(rbox2_num, BLOCK_DIM_Y);
dim3 blocks(blocks_x, blocks_y);
dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y);
PD_DISPATCH_FLOATING_TYPES(
rbox1.type(), "rbox_iou_cuda_kernel", ([&] {
rbox_iou_cuda_kernel<data_t><<<blocks, threads, 0, rbox1.stream()>>>(
rbox1_num, rbox2_num, rbox1.data<data_t>(), rbox2.data<data_t>(),
output.mutable_data<data_t>());
}));
return {output};
}
|
d021f4510b76af834f810717f8a8d15b3b6a4476.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/BatchNormalization.cu"
#else
#define DeviceTensor3 THCDeviceTensor<real, 3>
#define DeviceTensor1 THCDeviceTensor<real, 1>
template <int Dim>
static THCDeviceTensor<real, Dim> THNN_(devicetensor)(THCState *state, THCTensor *t) {
if (!t) {
return THCDeviceTensor<real, Dim>();
}
int inDim = THCTensor_nDimensionLegacyAll(state, t);
if (inDim == Dim) {
return toDeviceTensor<real, Dim>(state, t);
}
// View in which the last dimensions are collapsed or expanded as needed
THAssert(THCTensor_isContiguous(state, t));
int size[Dim];
for (int i = 0; i < Dim || i < inDim; ++i) {
if (i < Dim && i < inDim) {
size[i] = THTensor_sizeLegacyNoScalars(t, i);
} else if (i < Dim) {
size[i] = 1;
} else {
size[Dim - 1] *= THTensor_sizeLegacyNoScalars(t, i);
}
}
return THCDeviceTensor<real, Dim>(t->data<real>(), size);
}
void THNN_(BatchNormalization_updateOutput)(
THCState *state, THCTensor *input_, THCTensor *output_,
THCTensor *weight_, THCTensor *bias_, THCTensor *runningMean_,
THCTensor *runningVar_, THCTensor *saveMean_, THCTensor *saveStd_,
bool train, double momentum, double eps) {
THCTensor_(resizeAs)(state, output_, input_);
if (train) {
int64_t nInput = THCTensor_(size)(state, input_, 1);
THCTensor_(resize1d)(state, saveMean_, nInput);
THCTensor_(resize1d)(state, saveStd_, nInput);
}
DeviceTensor3 input = THNN_(devicetensor)<3>(state, input_);
DeviceTensor3 output = THNN_(devicetensor)<3>(state, output_);
DeviceTensor1 weight = THNN_(devicetensor)<1>(state, weight_);
DeviceTensor1 bias = THNN_(devicetensor)<1>(state, bias_);
DeviceTensor1 runningMean = THNN_(devicetensor)<1>(state, runningMean_);
DeviceTensor1 runningVar = THNN_(devicetensor)<1>(state, runningVar_);
DeviceTensor1 saveMean = THNN_(devicetensor)<1>(state, saveMean_);
DeviceTensor1 saveStd = THNN_(devicetensor)<1>(state, saveStd_);
hipStream_t s = THCState_getCurrentStream(state);
hipDeviceProp_t *prop = THCState_getCurrentDeviceProperties(state);
if (!train) {
dim3 blocks(input.getSize(1));
dim3 threads(getNumThreads(input.getSize(2)));
hipLaunchKernelGGL(( BatchNormalizationUpdateOutputInference_kernel<real, accreal, DeviceTensor1, DeviceTensor3>) , dim3(blocks), dim3(threads), 0, s,
input, output, runningMean, runningVar, weight, bias, eps);
} else {
dim3 blocks(input.getSize(1));
dim3 threads(getNumThreads(input.getSize(2)));
hipLaunchKernelGGL(( BatchNormalizationUpdateOutput_kernel<real, accreal, DeviceTensor1, DeviceTensor3>) , dim3(blocks), dim3(threads), 0, s,
input, output, weight, bias, static_cast<accreal>(eps), static_cast<accreal>(momentum), runningMean, runningVar,
saveMean, saveStd);
}
THCudaCheck(hipGetLastError());
}
void THNN_(BatchNormalization_backward)(
THCState *state, THCTensor *input_, THCTensor *gradOutput_,
THCTensor *gradInput_, THCTensor *gradWeight_, THCTensor *gradBias_,
THCTensor *weight_, THCTensor *runningMean_, THCTensor *runningVar_,
THCTensor *saveMean_, THCTensor *saveStd_, bool train, double scale, double eps) {
THCUNN_check_shape(state, input_, gradOutput_);
if (gradInput_) {
THCTensor_(resizeAs)(state, gradInput_, input_);
}
DeviceTensor3 input = THNN_(devicetensor)<3>(state, input_);
DeviceTensor3 gradOutput = THNN_(devicetensor)<3>(state, gradOutput_);
DeviceTensor3 gradInput = THNN_(devicetensor)<3>(state, gradInput_);
DeviceTensor1 gradWeight = THNN_(devicetensor)<1>(state, gradWeight_);
DeviceTensor1 gradBias = THNN_(devicetensor)<1>(state, gradBias_);
DeviceTensor1 weight = THNN_(devicetensor)<1>(state, weight_);
DeviceTensor1 runningMean = THNN_(devicetensor)<1>(state, runningMean_);
DeviceTensor1 runningVar = THNN_(devicetensor)<1>(state, runningVar_);
DeviceTensor1 saveMean = THNN_(devicetensor)<1>(state, saveMean_);
DeviceTensor1 saveStd = THNN_(devicetensor)<1>(state, saveStd_);
hipStream_t s = THCState_getCurrentStream(state);
dim3 blocks(gradOutput.getSize(1));
dim3 threads(getNumThreads(gradOutput.getSize(2)));
hipLaunchKernelGGL(( BatchNormalizationBackward_kernel<real, accreal, DeviceTensor1, DeviceTensor3>) , dim3(blocks), dim3(threads), 0, s,
input, gradOutput, gradInput, gradWeight, gradBias, weight, runningMean, runningVar,
saveMean, saveStd, train, scale, eps);
THCudaCheck(hipGetLastError());
}
#undef DeviceTensor3
#undef DeviceTensor1
#endif
| d021f4510b76af834f810717f8a8d15b3b6a4476.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/BatchNormalization.cu"
#else
#define DeviceTensor3 THCDeviceTensor<real, 3>
#define DeviceTensor1 THCDeviceTensor<real, 1>
template <int Dim>
static THCDeviceTensor<real, Dim> THNN_(devicetensor)(THCState *state, THCTensor *t) {
if (!t) {
return THCDeviceTensor<real, Dim>();
}
int inDim = THCTensor_nDimensionLegacyAll(state, t);
if (inDim == Dim) {
return toDeviceTensor<real, Dim>(state, t);
}
// View in which the last dimensions are collapsed or expanded as needed
THAssert(THCTensor_isContiguous(state, t));
int size[Dim];
for (int i = 0; i < Dim || i < inDim; ++i) {
if (i < Dim && i < inDim) {
size[i] = THTensor_sizeLegacyNoScalars(t, i);
} else if (i < Dim) {
size[i] = 1;
} else {
size[Dim - 1] *= THTensor_sizeLegacyNoScalars(t, i);
}
}
return THCDeviceTensor<real, Dim>(t->data<real>(), size);
}
void THNN_(BatchNormalization_updateOutput)(
THCState *state, THCTensor *input_, THCTensor *output_,
THCTensor *weight_, THCTensor *bias_, THCTensor *runningMean_,
THCTensor *runningVar_, THCTensor *saveMean_, THCTensor *saveStd_,
bool train, double momentum, double eps) {
THCTensor_(resizeAs)(state, output_, input_);
if (train) {
int64_t nInput = THCTensor_(size)(state, input_, 1);
THCTensor_(resize1d)(state, saveMean_, nInput);
THCTensor_(resize1d)(state, saveStd_, nInput);
}
DeviceTensor3 input = THNN_(devicetensor)<3>(state, input_);
DeviceTensor3 output = THNN_(devicetensor)<3>(state, output_);
DeviceTensor1 weight = THNN_(devicetensor)<1>(state, weight_);
DeviceTensor1 bias = THNN_(devicetensor)<1>(state, bias_);
DeviceTensor1 runningMean = THNN_(devicetensor)<1>(state, runningMean_);
DeviceTensor1 runningVar = THNN_(devicetensor)<1>(state, runningVar_);
DeviceTensor1 saveMean = THNN_(devicetensor)<1>(state, saveMean_);
DeviceTensor1 saveStd = THNN_(devicetensor)<1>(state, saveStd_);
cudaStream_t s = THCState_getCurrentStream(state);
cudaDeviceProp *prop = THCState_getCurrentDeviceProperties(state);
if (!train) {
dim3 blocks(input.getSize(1));
dim3 threads(getNumThreads(input.getSize(2)));
BatchNormalizationUpdateOutputInference_kernel<real, accreal, DeviceTensor1, DeviceTensor3> <<<blocks, threads, 0, s>>>(
input, output, runningMean, runningVar, weight, bias, eps);
} else {
dim3 blocks(input.getSize(1));
dim3 threads(getNumThreads(input.getSize(2)));
BatchNormalizationUpdateOutput_kernel<real, accreal, DeviceTensor1, DeviceTensor3> <<<blocks, threads, 0, s>>>(
input, output, weight, bias, static_cast<accreal>(eps), static_cast<accreal>(momentum), runningMean, runningVar,
saveMean, saveStd);
}
THCudaCheck(cudaGetLastError());
}
void THNN_(BatchNormalization_backward)(
THCState *state, THCTensor *input_, THCTensor *gradOutput_,
THCTensor *gradInput_, THCTensor *gradWeight_, THCTensor *gradBias_,
THCTensor *weight_, THCTensor *runningMean_, THCTensor *runningVar_,
THCTensor *saveMean_, THCTensor *saveStd_, bool train, double scale, double eps) {
THCUNN_check_shape(state, input_, gradOutput_);
if (gradInput_) {
THCTensor_(resizeAs)(state, gradInput_, input_);
}
DeviceTensor3 input = THNN_(devicetensor)<3>(state, input_);
DeviceTensor3 gradOutput = THNN_(devicetensor)<3>(state, gradOutput_);
DeviceTensor3 gradInput = THNN_(devicetensor)<3>(state, gradInput_);
DeviceTensor1 gradWeight = THNN_(devicetensor)<1>(state, gradWeight_);
DeviceTensor1 gradBias = THNN_(devicetensor)<1>(state, gradBias_);
DeviceTensor1 weight = THNN_(devicetensor)<1>(state, weight_);
DeviceTensor1 runningMean = THNN_(devicetensor)<1>(state, runningMean_);
DeviceTensor1 runningVar = THNN_(devicetensor)<1>(state, runningVar_);
DeviceTensor1 saveMean = THNN_(devicetensor)<1>(state, saveMean_);
DeviceTensor1 saveStd = THNN_(devicetensor)<1>(state, saveStd_);
cudaStream_t s = THCState_getCurrentStream(state);
dim3 blocks(gradOutput.getSize(1));
dim3 threads(getNumThreads(gradOutput.getSize(2)));
BatchNormalizationBackward_kernel<real, accreal, DeviceTensor1, DeviceTensor3> <<<blocks, threads, 0, s>>>(
input, gradOutput, gradInput, gradWeight, gradBias, weight, runningMean, runningVar,
saveMean, saveStd, train, scale, eps);
THCudaCheck(cudaGetLastError());
}
#undef DeviceTensor3
#undef DeviceTensor1
#endif
|
52f1f3502a272cc4cf88b1fa903963177c52e368.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cstdio>
__global__ void init_data_kernel( int n, double* x)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < n )
{
x[i] = n - i;
}
}
__global__ void daxpy_kernel(int n, double a, double * x, double * y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n)
{
y[i] = a*x[i] + y[i];
}
}
__global__ void check_results_kernel( int n, double correctvalue, double * x )
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n)
{
if ( x[i] != correctvalue )
{
printf("ERROR at index = %d, expected = %f, actual: %f\n",i,correctvalue,x[i]);
}
}
}
void init_host_data( int n, double * x )
{
for (int i=0; i<n; ++i)
{
x[i] = i;
}
}
void init_data(int n, double* x, double* x_d, double* y_d)
{
hipStream_t copy_stream;
hipStream_t compute_stream;
hipStreamCreate(©_stream);
hipStreamCreate(&compute_stream);
hipMemcpyAsync( x_d, x, n*sizeof(double), hipMemcpyDefault, copy_stream );
hipLaunchKernelGGL(( init_data_kernel), dim3(ceil(n/256)),dim3(256),0,compute_stream, n, y_d);
hipStreamSynchronize(copy_stream);
hipStreamSynchronize(compute_stream);
hipStreamDestroy(compute_stream);
hipStreamDestroy(copy_stream);
}
void daxpy(int n, double a, double* x_d, double* y_d)
{
hipLaunchKernelGGL(( daxpy_kernel), dim3(ceil(n/256)),dim3(256), 0, 0, n,a,x_d,y_d);
hipDeviceSynchronize();
}
void check_results( int n, double correctvalue, double* x_d )
{
hipLaunchKernelGGL(( check_results_kernel), dim3(ceil(n/256)),dim3(256), 0, 0, n,correctvalue,x_d);
}
void run_test(int n)
{
double* x;
double* x_d;
double* y_d;
hipSetDevice(0);
hipHostMalloc((void**) &x, n*sizeof(double));
hipMalloc((void**)&x_d,n*sizeof(double));
hipMalloc((void**)&y_d,n*sizeof(double));
init_host_data(n, x);
init_data(n,x,x_d,y_d);
daxpy(n,1.0,x_d,y_d);
check_results(n, n, y_d);
hipFree(y_d);
hipFree(x_d);
hipHostFree(x);
hipDeviceSynchronize();
}
int main()
{
int n = 1<<22;
run_test(n);
return 0;
}
| 52f1f3502a272cc4cf88b1fa903963177c52e368.cu | /* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cstdio>
__global__ void init_data_kernel( int n, double* x)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < n )
{
x[i] = n - i;
}
}
__global__ void daxpy_kernel(int n, double a, double * x, double * y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n)
{
y[i] = a*x[i] + y[i];
}
}
__global__ void check_results_kernel( int n, double correctvalue, double * x )
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n)
{
if ( x[i] != correctvalue )
{
printf("ERROR at index = %d, expected = %f, actual: %f\n",i,correctvalue,x[i]);
}
}
}
void init_host_data( int n, double * x )
{
for (int i=0; i<n; ++i)
{
x[i] = i;
}
}
void init_data(int n, double* x, double* x_d, double* y_d)
{
cudaStream_t copy_stream;
cudaStream_t compute_stream;
cudaStreamCreate(©_stream);
cudaStreamCreate(&compute_stream);
cudaMemcpyAsync( x_d, x, n*sizeof(double), cudaMemcpyDefault, copy_stream );
init_data_kernel<<<ceil(n/256),256,0,compute_stream>>>(n, y_d);
cudaStreamSynchronize(copy_stream);
cudaStreamSynchronize(compute_stream);
cudaStreamDestroy(compute_stream);
cudaStreamDestroy(copy_stream);
}
void daxpy(int n, double a, double* x_d, double* y_d)
{
daxpy_kernel<<<ceil(n/256),256>>>(n,a,x_d,y_d);
cudaDeviceSynchronize();
}
void check_results( int n, double correctvalue, double* x_d )
{
check_results_kernel<<<ceil(n/256),256>>>(n,correctvalue,x_d);
}
void run_test(int n)
{
double* x;
double* x_d;
double* y_d;
cudaSetDevice(0);
cudaMallocHost((void**) &x, n*sizeof(double));
cudaMalloc((void**)&x_d,n*sizeof(double));
cudaMalloc((void**)&y_d,n*sizeof(double));
init_host_data(n, x);
init_data(n,x,x_d,y_d);
daxpy(n,1.0,x_d,y_d);
check_results(n, n, y_d);
cudaFree(y_d);
cudaFree(x_d);
cudaFreeHost(x);
cudaDeviceSynchronize();
}
int main()
{
int n = 1<<22;
run_test(n);
return 0;
}
|
91a90ea6759080fd4aa7d6a56a2ecb84fad0c171.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@author Azzam Haidar
@author Ahmad Abdelfattah
@generated from magmablas/zgetrf_batched_smallsq_noshfl.cu, normal z -> s, Wed Jan 2 14:18:51 2019
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "sync.cuh"
#include "shuffle.cuh"
#include "batched_kernel_param.h"
// This kernel uses registers for matrix storage, shared mem. for communication.
// It also uses lazy swap.
extern __shared__ float zdata[];
template<int N, int NPOW2>
__global__ void
sgetrf_batched_smallsq_noshfl_kernel( float** dA_array, int ldda,
magma_int_t** ipiv_array, magma_int_t *info_array, int batchCount)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int batchid = blockIdx.x * blockDim.y + ty;
if(batchid >= batchCount) return;
float* dA = dA_array[batchid];
magma_int_t* ipiv = ipiv_array[batchid];
magma_int_t* info = &info_array[batchid];
float rA[N] = {MAGMA_S_ZERO};
float reg = MAGMA_S_ZERO;
int max_id, rowid = tx;
int linfo = 0;
float rx_abs_max = MAGMA_D_ZERO;
float *sx = (float*)(zdata);
float* dsx = (float*)(sx + blockDim.y * NPOW2);
int* sipiv = (int*)(dsx + blockDim.y * NPOW2);
sx += ty * NPOW2;
dsx += ty * NPOW2;
sipiv += ty * NPOW2;
// read
if( tx < N ){
#pragma unroll
for(int i = 0; i < N; i++){
rA[i] = dA[ i * ldda + tx ];
}
}
#pragma unroll
for(int i = 0; i < N; i++){
// isamax and find pivot
dsx[ rowid ] = fabs(MAGMA_S_REAL( rA[i] )) + fabs(MAGMA_S_IMAG( rA[i] ));
magmablas_syncwarp();
rx_abs_max = dsx[i];
max_id = i;
#pragma unroll
for(int j = i+1; j < N; j++){
if( dsx[j] > rx_abs_max){
max_id = j;
rx_abs_max = dsx[j];
}
}
linfo = ( rx_abs_max == MAGMA_D_ZERO && linfo == 0) ? (i+1) : linfo;
if(rowid == max_id){
sipiv[i] = max_id;
rowid = i;
#pragma unroll
for(int j = i; j < N; j++){
sx[j] = rA[j];
}
}
else if(rowid == i){
rowid = max_id;
}
magmablas_syncwarp();
reg = MAGMA_S_DIV(MAGMA_S_ONE, sx[i] );
// scal and ger
if( rowid > i ){
rA[i] *= reg;
#pragma unroll
for(int j = i+1; j < N; j++){
rA[j] -= rA[i] * sx[j];
}
}
magmablas_syncwarp();
}
if(tx == 0){
(*info) = (magma_int_t)( linfo );
}
// write
if(tx < N) {
ipiv[ tx ] = (magma_int_t)(sipiv[tx] + 1); // fortran indexing
#pragma unroll
for(int i = 0; i < N; i++){
dA[ i * ldda + rowid ] = rA[i];
}
}
}
/***************************************************************************//**
Purpose
-------
sgetrf_batched_smallsq_noshfl computes the LU factorization of a square N-by-N matrix A
using partial pivoting with row interchanges.
This routine can deal only with square matrices of size up to 32
The factorization has the form
A = P * L * U
where P is a permutation matrix, L is lower triangular with unit
diagonal elements (lower trapezoidal if m > n), and U is upper
triangular (upper trapezoidal if m < n).
This is the right-looking Level 3 BLAS version of the algorithm.
This is a batched version that factors batchCount M-by-N matrices in parallel.
dA, ipiv, and info become arrays with one entry per matrix.
Arguments
---------
@param[in]
n INTEGER
The size of each matrix A. N >= 0.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a REAL array on the GPU, dimension (LDDA,N).
On entry, each pointer is an M-by-N matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
ldda INTEGER
The leading dimension of each array A. LDDA >= max(1,M).
@param[out]
ipiv_array Array of pointers, dimension (batchCount), for corresponding matrices.
Each is an INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[out]
info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices.
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
or another error occured, such as memory allocation failed.
- > 0: if INFO = i, U(i,i) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_getrf_batched
*******************************************************************************/
extern "C" magma_int_t
magma_sgetrf_batched_smallsq_noshfl(
magma_int_t n,
float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t arginfo = 0;
magma_int_t m = n;
if( (m < 0) || ( m > 32 ) ){
arginfo = -1;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
if( m == 0) return 0;
const magma_int_t ntcol = magma_get_sgetrf_batched_ntcol(m, n);
magma_int_t shmem = ntcol * magma_ceilpow2(m) * sizeof(int);
shmem += ntcol * magma_ceilpow2(m) * sizeof(float);
shmem += ntcol * magma_ceilpow2(m) * sizeof(float);
dim3 threads(magma_ceilpow2(m), ntcol, 1);
const magma_int_t gridx = magma_ceildiv(batchCount, ntcol);
dim3 grid(gridx, 1, 1);
switch(m){
case 1:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel< 1, magma_ceilpow2( 1)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 2:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel< 2, magma_ceilpow2( 2)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 3:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel< 3, magma_ceilpow2( 3)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 4:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel< 4, magma_ceilpow2( 4)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 5:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel< 5, magma_ceilpow2( 5)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 6:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel< 6, magma_ceilpow2( 6)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 7:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel< 7, magma_ceilpow2( 7)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 8:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel< 8, magma_ceilpow2( 8)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 9:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel< 9, magma_ceilpow2( 9)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 10:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<10, magma_ceilpow2(10)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 11:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<11, magma_ceilpow2(11)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 12:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<12, magma_ceilpow2(12)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 13:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<13, magma_ceilpow2(13)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 14:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<14, magma_ceilpow2(14)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 15:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<15, magma_ceilpow2(15)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 16:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<16, magma_ceilpow2(16)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 17:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<17, magma_ceilpow2(17)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 18:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<18, magma_ceilpow2(18)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 19:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<19, magma_ceilpow2(19)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 20:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<20, magma_ceilpow2(20)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 21:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<21, magma_ceilpow2(21)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 22:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<22, magma_ceilpow2(22)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 23:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<23, magma_ceilpow2(23)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 24:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<24, magma_ceilpow2(24)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 25:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<25, magma_ceilpow2(25)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 26:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<26, magma_ceilpow2(26)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 27:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<27, magma_ceilpow2(27)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 28:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<28, magma_ceilpow2(28)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 29:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<29, magma_ceilpow2(29)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 30:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<30, magma_ceilpow2(30)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 31:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<31, magma_ceilpow2(31)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 32:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<32, magma_ceilpow2(32)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
default: printf("error: size %lld is not supported\n", (long long) m);
}
return arginfo;
}
| 91a90ea6759080fd4aa7d6a56a2ecb84fad0c171.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@author Azzam Haidar
@author Ahmad Abdelfattah
@generated from magmablas/zgetrf_batched_smallsq_noshfl.cu, normal z -> s, Wed Jan 2 14:18:51 2019
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "sync.cuh"
#include "shuffle.cuh"
#include "batched_kernel_param.h"
// This kernel uses registers for matrix storage, shared mem. for communication.
// It also uses lazy swap.
extern __shared__ float zdata[];
template<int N, int NPOW2>
__global__ void
sgetrf_batched_smallsq_noshfl_kernel( float** dA_array, int ldda,
magma_int_t** ipiv_array, magma_int_t *info_array, int batchCount)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int batchid = blockIdx.x * blockDim.y + ty;
if(batchid >= batchCount) return;
float* dA = dA_array[batchid];
magma_int_t* ipiv = ipiv_array[batchid];
magma_int_t* info = &info_array[batchid];
float rA[N] = {MAGMA_S_ZERO};
float reg = MAGMA_S_ZERO;
int max_id, rowid = tx;
int linfo = 0;
float rx_abs_max = MAGMA_D_ZERO;
float *sx = (float*)(zdata);
float* dsx = (float*)(sx + blockDim.y * NPOW2);
int* sipiv = (int*)(dsx + blockDim.y * NPOW2);
sx += ty * NPOW2;
dsx += ty * NPOW2;
sipiv += ty * NPOW2;
// read
if( tx < N ){
#pragma unroll
for(int i = 0; i < N; i++){
rA[i] = dA[ i * ldda + tx ];
}
}
#pragma unroll
for(int i = 0; i < N; i++){
// isamax and find pivot
dsx[ rowid ] = fabs(MAGMA_S_REAL( rA[i] )) + fabs(MAGMA_S_IMAG( rA[i] ));
magmablas_syncwarp();
rx_abs_max = dsx[i];
max_id = i;
#pragma unroll
for(int j = i+1; j < N; j++){
if( dsx[j] > rx_abs_max){
max_id = j;
rx_abs_max = dsx[j];
}
}
linfo = ( rx_abs_max == MAGMA_D_ZERO && linfo == 0) ? (i+1) : linfo;
if(rowid == max_id){
sipiv[i] = max_id;
rowid = i;
#pragma unroll
for(int j = i; j < N; j++){
sx[j] = rA[j];
}
}
else if(rowid == i){
rowid = max_id;
}
magmablas_syncwarp();
reg = MAGMA_S_DIV(MAGMA_S_ONE, sx[i] );
// scal and ger
if( rowid > i ){
rA[i] *= reg;
#pragma unroll
for(int j = i+1; j < N; j++){
rA[j] -= rA[i] * sx[j];
}
}
magmablas_syncwarp();
}
if(tx == 0){
(*info) = (magma_int_t)( linfo );
}
// write
if(tx < N) {
ipiv[ tx ] = (magma_int_t)(sipiv[tx] + 1); // fortran indexing
#pragma unroll
for(int i = 0; i < N; i++){
dA[ i * ldda + rowid ] = rA[i];
}
}
}
/***************************************************************************//**
Purpose
-------
sgetrf_batched_smallsq_noshfl computes the LU factorization of a square N-by-N matrix A
using partial pivoting with row interchanges.
This routine can deal only with square matrices of size up to 32
The factorization has the form
A = P * L * U
where P is a permutation matrix, L is lower triangular with unit
diagonal elements (lower trapezoidal if m > n), and U is upper
triangular (upper trapezoidal if m < n).
This is the right-looking Level 3 BLAS version of the algorithm.
This is a batched version that factors batchCount M-by-N matrices in parallel.
dA, ipiv, and info become arrays with one entry per matrix.
Arguments
---------
@param[in]
n INTEGER
The size of each matrix A. N >= 0.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a REAL array on the GPU, dimension (LDDA,N).
On entry, each pointer is an M-by-N matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
ldda INTEGER
The leading dimension of each array A. LDDA >= max(1,M).
@param[out]
ipiv_array Array of pointers, dimension (batchCount), for corresponding matrices.
Each is an INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[out]
info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices.
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
or another error occured, such as memory allocation failed.
- > 0: if INFO = i, U(i,i) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_getrf_batched
*******************************************************************************/
extern "C" magma_int_t
magma_sgetrf_batched_smallsq_noshfl(
magma_int_t n,
float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t arginfo = 0;
magma_int_t m = n;
if( (m < 0) || ( m > 32 ) ){
arginfo = -1;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
if( m == 0) return 0;
const magma_int_t ntcol = magma_get_sgetrf_batched_ntcol(m, n);
magma_int_t shmem = ntcol * magma_ceilpow2(m) * sizeof(int);
shmem += ntcol * magma_ceilpow2(m) * sizeof(float);
shmem += ntcol * magma_ceilpow2(m) * sizeof(float);
dim3 threads(magma_ceilpow2(m), ntcol, 1);
const magma_int_t gridx = magma_ceildiv(batchCount, ntcol);
dim3 grid(gridx, 1, 1);
switch(m){
case 1: sgetrf_batched_smallsq_noshfl_kernel< 1, magma_ceilpow2( 1)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 2: sgetrf_batched_smallsq_noshfl_kernel< 2, magma_ceilpow2( 2)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 3: sgetrf_batched_smallsq_noshfl_kernel< 3, magma_ceilpow2( 3)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 4: sgetrf_batched_smallsq_noshfl_kernel< 4, magma_ceilpow2( 4)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 5: sgetrf_batched_smallsq_noshfl_kernel< 5, magma_ceilpow2( 5)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 6: sgetrf_batched_smallsq_noshfl_kernel< 6, magma_ceilpow2( 6)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 7: sgetrf_batched_smallsq_noshfl_kernel< 7, magma_ceilpow2( 7)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 8: sgetrf_batched_smallsq_noshfl_kernel< 8, magma_ceilpow2( 8)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 9: sgetrf_batched_smallsq_noshfl_kernel< 9, magma_ceilpow2( 9)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 10: sgetrf_batched_smallsq_noshfl_kernel<10, magma_ceilpow2(10)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 11: sgetrf_batched_smallsq_noshfl_kernel<11, magma_ceilpow2(11)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 12: sgetrf_batched_smallsq_noshfl_kernel<12, magma_ceilpow2(12)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 13: sgetrf_batched_smallsq_noshfl_kernel<13, magma_ceilpow2(13)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 14: sgetrf_batched_smallsq_noshfl_kernel<14, magma_ceilpow2(14)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 15: sgetrf_batched_smallsq_noshfl_kernel<15, magma_ceilpow2(15)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 16: sgetrf_batched_smallsq_noshfl_kernel<16, magma_ceilpow2(16)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 17: sgetrf_batched_smallsq_noshfl_kernel<17, magma_ceilpow2(17)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 18: sgetrf_batched_smallsq_noshfl_kernel<18, magma_ceilpow2(18)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 19: sgetrf_batched_smallsq_noshfl_kernel<19, magma_ceilpow2(19)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 20: sgetrf_batched_smallsq_noshfl_kernel<20, magma_ceilpow2(20)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 21: sgetrf_batched_smallsq_noshfl_kernel<21, magma_ceilpow2(21)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 22: sgetrf_batched_smallsq_noshfl_kernel<22, magma_ceilpow2(22)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 23: sgetrf_batched_smallsq_noshfl_kernel<23, magma_ceilpow2(23)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 24: sgetrf_batched_smallsq_noshfl_kernel<24, magma_ceilpow2(24)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 25: sgetrf_batched_smallsq_noshfl_kernel<25, magma_ceilpow2(25)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 26: sgetrf_batched_smallsq_noshfl_kernel<26, magma_ceilpow2(26)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 27: sgetrf_batched_smallsq_noshfl_kernel<27, magma_ceilpow2(27)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 28: sgetrf_batched_smallsq_noshfl_kernel<28, magma_ceilpow2(28)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 29: sgetrf_batched_smallsq_noshfl_kernel<29, magma_ceilpow2(29)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 30: sgetrf_batched_smallsq_noshfl_kernel<30, magma_ceilpow2(30)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 31: sgetrf_batched_smallsq_noshfl_kernel<31, magma_ceilpow2(31)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 32: sgetrf_batched_smallsq_noshfl_kernel<32, magma_ceilpow2(32)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
default: printf("error: size %lld is not supported\n", (long long) m);
}
return arginfo;
}
|
699616c7c8e85a175fe8b039fd55e26041cb3bc9.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/device_resources.hpp>
#include <raft/linalg/rsvd.cuh>
#include <raft/linalg/svd.cuh>
#include <raft/matrix/diagonal.cuh>
#include <raft/matrix/matrix.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace linalg {
template <typename T>
struct randomized_svdInputs {
T tolerance;
int n_row;
int n_col;
int k;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const randomized_svdInputs<T>& dims)
{
return os;
}
template <typename T>
class randomized_svdTest : public ::testing::TestWithParam<randomized_svdInputs<T>> {
public:
randomized_svdTest()
: params(::testing::TestWithParam<randomized_svdInputs<T>>::GetParam()),
stream(handle.get_stream()),
data(params.n_row * params.n_col, stream),
reconst(params.n_row * params.n_col, stream),
left_eig_vectors_act(params.n_row * params.k, stream),
right_eig_vectors_act(params.k * params.n_col, stream),
sing_vals_act(params.k, stream),
left_eig_vectors_ref(params.n_row * params.n_col, stream),
right_eig_vectors_ref(params.n_col * params.n_col, stream),
sing_vals_ref(params.k, stream)
{
}
protected:
void basicTest()
{
int len = params.n_row * params.n_col;
ASSERT(params.n_row == 5 && params.n_col == 5, "This test only supports nrows=5 && ncols=5!");
T data_h[] = {0.76420743, 0.61411544, 0.81724151, 0.42040879, 0.03446089,
0.03697287, 0.85962444, 0.67584086, 0.45594666, 0.02074835,
0.42018265, 0.39204509, 0.12657948, 0.90250559, 0.23076218,
0.50339844, 0.92974961, 0.21213988, 0.63962457, 0.58124562,
0.58325673, 0.11589871, 0.39831112, 0.21492685, 0.00540355};
raft::update_device(data.data(), data_h, len, stream);
T left_eig_vectors_ref_h[] = {0.42823088,
0.59131151,
0.4220887,
0.50441194,
0.18541506,
0.27047497,
-0.17195579,
0.69362791,
-0.43253894,
-0.47860724};
T right_eig_vectors_ref_h[] = {0.53005494,
0.44104121,
0.40720732,
0.54337293,
0.25189773,
0.5789401,
0.15264214,
-0.45215699,
-0.53184873,
0.3927082};
T sing_vals_ref_h[] = {2.36539241, 0.81117785, 0.68562255, 0.41390509, 0.01519322};
raft::update_device(
left_eig_vectors_ref.data(), left_eig_vectors_ref_h, params.n_row * params.k, stream);
raft::update_device(
right_eig_vectors_ref.data(), right_eig_vectors_ref_h, params.k * params.n_col, stream);
raft::update_device(sing_vals_ref.data(), sing_vals_ref_h, params.k, stream);
randomized_svd(handle,
raft::make_device_matrix_view<const T, uint32_t, raft::col_major>(
data.data(), params.n_row, params.n_col),
raft::make_device_vector_view<T, uint32_t>(sing_vals_act.data(), params.k),
std::make_optional(raft::make_device_matrix_view<T, uint32_t, raft::col_major>(
left_eig_vectors_act.data(), params.n_row, params.k)),
std::make_optional(raft::make_device_matrix_view<T, uint32_t, raft::col_major>(
right_eig_vectors_act.data(), params.k, params.n_col)),
2,
2);
handle.sync_stream(stream);
}
void apiTest()
{
int len = params.n_row * params.n_col;
ASSERT(params.n_row == 5 && params.n_col == 5, "This test only supports nrows=5 && ncols=5!");
T data_h[] = {0.76420743, 0.61411544, 0.81724151, 0.42040879, 0.03446089,
0.03697287, 0.85962444, 0.67584086, 0.45594666, 0.02074835,
0.42018265, 0.39204509, 0.12657948, 0.90250559, 0.23076218,
0.50339844, 0.92974961, 0.21213988, 0.63962457, 0.58124562,
0.58325673, 0.11589871, 0.39831112, 0.21492685, 0.00540355};
raft::update_device(data.data(), data_h, len, stream);
T left_eig_vectors_ref_h[] = {0.42823088,
0.59131151,
0.4220887,
0.50441194,
0.18541506,
0.27047497,
-0.17195579,
0.69362791,
-0.43253894,
-0.47860724};
T right_eig_vectors_ref_h[] = {0.53005494,
0.44104121,
0.40720732,
0.54337293,
0.25189773,
0.5789401,
0.15264214,
-0.45215699,
-0.53184873,
0.3927082};
T sing_vals_ref_h[] = {2.36539241, 0.81117785, 0.68562255, 0.41390509, 0.01519322};
raft::update_device(
left_eig_vectors_ref.data(), left_eig_vectors_ref_h, params.n_row * params.k, stream);
raft::update_device(
right_eig_vectors_ref.data(), right_eig_vectors_ref_h, params.k * params.n_col, stream);
raft::update_device(sing_vals_ref.data(), sing_vals_ref_h, params.k, stream);
randomized_svd(handle,
raft::make_device_matrix_view<const T, uint32_t, raft::col_major>(
data.data(), params.n_row, params.n_col),
raft::make_device_vector_view<T, uint32_t>(sing_vals_act.data(), params.k),
std::nullopt,
std::make_optional(raft::make_device_matrix_view<T, uint32_t, raft::col_major>(
right_eig_vectors_act.data(), params.k, params.n_col)),
2,
2);
randomized_svd(handle,
raft::make_device_matrix_view<const T, uint32_t, raft::col_major>(
data.data(), params.n_row, params.n_col),
raft::make_device_vector_view<T, uint32_t>(sing_vals_act.data(), params.k),
std::make_optional(raft::make_device_matrix_view<T, uint32_t, raft::col_major>(
left_eig_vectors_act.data(), params.n_row, params.k)),
std::nullopt,
2,
2);
randomized_svd(handle,
raft::make_device_matrix_view<const T, uint32_t, raft::col_major>(
data.data(), params.n_row, params.n_col),
raft::make_device_vector_view<T, uint32_t>(sing_vals_act.data(), params.k),
std::nullopt,
std::nullopt,
2,
2);
handle.sync_stream(stream);
}
void SetUp() override
{
int major = 0;
int minor = 0;
cusolverGetProperty(HIP_LIBRARY_MAJOR_VERSION, &major);
cusolverGetProperty(HIP_LIBRARY_MINOR_VERSION, &minor);
int cusolv_version = major * 1000 + minor * 10;
if (cusolv_version >= 11050) apiTest();
basicTest();
}
protected:
raft::device_resources handle;
hipStream_t stream;
randomized_svdInputs<T> params;
rmm::device_uvector<T> data, left_eig_vectors_act, right_eig_vectors_act, sing_vals_act,
left_eig_vectors_ref, right_eig_vectors_ref, sing_vals_ref, reconst;
};
const std::vector<randomized_svdInputs<float>> inputsf1 = {{0.0001f, 5, 5, 2, 1234ULL}};
const std::vector<randomized_svdInputs<double>> inputsd1 = {{0.0001, 5, 5, 2, 1234ULL}};
typedef randomized_svdTest<float> randomized_svdTestF;
TEST_P(randomized_svdTestF, Result)
{
ASSERT_TRUE(raft::devArrMatch(sing_vals_ref.data(),
sing_vals_act.data(),
params.k,
raft::CompareApproxAbs<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(left_eig_vectors_ref.data(),
left_eig_vectors_act.data(),
params.n_row * params.k,
raft::CompareApproxAbs<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(right_eig_vectors_ref.data(),
right_eig_vectors_act.data(),
params.k * params.n_col,
raft::CompareApproxAbs<float>(params.tolerance)));
}
typedef randomized_svdTest<double> randomized_svdTestD;
TEST_P(randomized_svdTestD, Result)
{
ASSERT_TRUE(raft::devArrMatch(sing_vals_ref.data(),
sing_vals_act.data(),
params.k,
raft::CompareApproxAbs<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(left_eig_vectors_ref.data(),
left_eig_vectors_act.data(),
params.n_row * params.k,
raft::CompareApproxAbs<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(right_eig_vectors_ref.data(),
right_eig_vectors_act.data(),
params.k * params.n_col,
raft::CompareApproxAbs<double>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(randomized_svdTests1, randomized_svdTestF, ::testing::ValuesIn(inputsf1));
INSTANTIATE_TEST_SUITE_P(randomized_svdTests1, randomized_svdTestD, ::testing::ValuesIn(inputsd1));
} // end namespace linalg
} // end namespace raft
| 699616c7c8e85a175fe8b039fd55e26041cb3bc9.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/device_resources.hpp>
#include <raft/linalg/rsvd.cuh>
#include <raft/linalg/svd.cuh>
#include <raft/matrix/diagonal.cuh>
#include <raft/matrix/matrix.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace linalg {
template <typename T>
struct randomized_svdInputs {
T tolerance;
int n_row;
int n_col;
int k;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const randomized_svdInputs<T>& dims)
{
return os;
}
template <typename T>
class randomized_svdTest : public ::testing::TestWithParam<randomized_svdInputs<T>> {
public:
randomized_svdTest()
: params(::testing::TestWithParam<randomized_svdInputs<T>>::GetParam()),
stream(handle.get_stream()),
data(params.n_row * params.n_col, stream),
reconst(params.n_row * params.n_col, stream),
left_eig_vectors_act(params.n_row * params.k, stream),
right_eig_vectors_act(params.k * params.n_col, stream),
sing_vals_act(params.k, stream),
left_eig_vectors_ref(params.n_row * params.n_col, stream),
right_eig_vectors_ref(params.n_col * params.n_col, stream),
sing_vals_ref(params.k, stream)
{
}
protected:
void basicTest()
{
int len = params.n_row * params.n_col;
ASSERT(params.n_row == 5 && params.n_col == 5, "This test only supports nrows=5 && ncols=5!");
T data_h[] = {0.76420743, 0.61411544, 0.81724151, 0.42040879, 0.03446089,
0.03697287, 0.85962444, 0.67584086, 0.45594666, 0.02074835,
0.42018265, 0.39204509, 0.12657948, 0.90250559, 0.23076218,
0.50339844, 0.92974961, 0.21213988, 0.63962457, 0.58124562,
0.58325673, 0.11589871, 0.39831112, 0.21492685, 0.00540355};
raft::update_device(data.data(), data_h, len, stream);
T left_eig_vectors_ref_h[] = {0.42823088,
0.59131151,
0.4220887,
0.50441194,
0.18541506,
0.27047497,
-0.17195579,
0.69362791,
-0.43253894,
-0.47860724};
T right_eig_vectors_ref_h[] = {0.53005494,
0.44104121,
0.40720732,
0.54337293,
0.25189773,
0.5789401,
0.15264214,
-0.45215699,
-0.53184873,
0.3927082};
T sing_vals_ref_h[] = {2.36539241, 0.81117785, 0.68562255, 0.41390509, 0.01519322};
raft::update_device(
left_eig_vectors_ref.data(), left_eig_vectors_ref_h, params.n_row * params.k, stream);
raft::update_device(
right_eig_vectors_ref.data(), right_eig_vectors_ref_h, params.k * params.n_col, stream);
raft::update_device(sing_vals_ref.data(), sing_vals_ref_h, params.k, stream);
randomized_svd(handle,
raft::make_device_matrix_view<const T, uint32_t, raft::col_major>(
data.data(), params.n_row, params.n_col),
raft::make_device_vector_view<T, uint32_t>(sing_vals_act.data(), params.k),
std::make_optional(raft::make_device_matrix_view<T, uint32_t, raft::col_major>(
left_eig_vectors_act.data(), params.n_row, params.k)),
std::make_optional(raft::make_device_matrix_view<T, uint32_t, raft::col_major>(
right_eig_vectors_act.data(), params.k, params.n_col)),
2,
2);
handle.sync_stream(stream);
}
void apiTest()
{
int len = params.n_row * params.n_col;
ASSERT(params.n_row == 5 && params.n_col == 5, "This test only supports nrows=5 && ncols=5!");
T data_h[] = {0.76420743, 0.61411544, 0.81724151, 0.42040879, 0.03446089,
0.03697287, 0.85962444, 0.67584086, 0.45594666, 0.02074835,
0.42018265, 0.39204509, 0.12657948, 0.90250559, 0.23076218,
0.50339844, 0.92974961, 0.21213988, 0.63962457, 0.58124562,
0.58325673, 0.11589871, 0.39831112, 0.21492685, 0.00540355};
raft::update_device(data.data(), data_h, len, stream);
T left_eig_vectors_ref_h[] = {0.42823088,
0.59131151,
0.4220887,
0.50441194,
0.18541506,
0.27047497,
-0.17195579,
0.69362791,
-0.43253894,
-0.47860724};
T right_eig_vectors_ref_h[] = {0.53005494,
0.44104121,
0.40720732,
0.54337293,
0.25189773,
0.5789401,
0.15264214,
-0.45215699,
-0.53184873,
0.3927082};
T sing_vals_ref_h[] = {2.36539241, 0.81117785, 0.68562255, 0.41390509, 0.01519322};
raft::update_device(
left_eig_vectors_ref.data(), left_eig_vectors_ref_h, params.n_row * params.k, stream);
raft::update_device(
right_eig_vectors_ref.data(), right_eig_vectors_ref_h, params.k * params.n_col, stream);
raft::update_device(sing_vals_ref.data(), sing_vals_ref_h, params.k, stream);
randomized_svd(handle,
raft::make_device_matrix_view<const T, uint32_t, raft::col_major>(
data.data(), params.n_row, params.n_col),
raft::make_device_vector_view<T, uint32_t>(sing_vals_act.data(), params.k),
std::nullopt,
std::make_optional(raft::make_device_matrix_view<T, uint32_t, raft::col_major>(
right_eig_vectors_act.data(), params.k, params.n_col)),
2,
2);
randomized_svd(handle,
raft::make_device_matrix_view<const T, uint32_t, raft::col_major>(
data.data(), params.n_row, params.n_col),
raft::make_device_vector_view<T, uint32_t>(sing_vals_act.data(), params.k),
std::make_optional(raft::make_device_matrix_view<T, uint32_t, raft::col_major>(
left_eig_vectors_act.data(), params.n_row, params.k)),
std::nullopt,
2,
2);
randomized_svd(handle,
raft::make_device_matrix_view<const T, uint32_t, raft::col_major>(
data.data(), params.n_row, params.n_col),
raft::make_device_vector_view<T, uint32_t>(sing_vals_act.data(), params.k),
std::nullopt,
std::nullopt,
2,
2);
handle.sync_stream(stream);
}
void SetUp() override
{
int major = 0;
int minor = 0;
cusolverGetProperty(MAJOR_VERSION, &major);
cusolverGetProperty(MINOR_VERSION, &minor);
int cusolv_version = major * 1000 + minor * 10;
if (cusolv_version >= 11050) apiTest();
basicTest();
}
protected:
raft::device_resources handle;
cudaStream_t stream;
randomized_svdInputs<T> params;
rmm::device_uvector<T> data, left_eig_vectors_act, right_eig_vectors_act, sing_vals_act,
left_eig_vectors_ref, right_eig_vectors_ref, sing_vals_ref, reconst;
};
const std::vector<randomized_svdInputs<float>> inputsf1 = {{0.0001f, 5, 5, 2, 1234ULL}};
const std::vector<randomized_svdInputs<double>> inputsd1 = {{0.0001, 5, 5, 2, 1234ULL}};
typedef randomized_svdTest<float> randomized_svdTestF;
TEST_P(randomized_svdTestF, Result)
{
ASSERT_TRUE(raft::devArrMatch(sing_vals_ref.data(),
sing_vals_act.data(),
params.k,
raft::CompareApproxAbs<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(left_eig_vectors_ref.data(),
left_eig_vectors_act.data(),
params.n_row * params.k,
raft::CompareApproxAbs<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(right_eig_vectors_ref.data(),
right_eig_vectors_act.data(),
params.k * params.n_col,
raft::CompareApproxAbs<float>(params.tolerance)));
}
typedef randomized_svdTest<double> randomized_svdTestD;
TEST_P(randomized_svdTestD, Result)
{
ASSERT_TRUE(raft::devArrMatch(sing_vals_ref.data(),
sing_vals_act.data(),
params.k,
raft::CompareApproxAbs<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(left_eig_vectors_ref.data(),
left_eig_vectors_act.data(),
params.n_row * params.k,
raft::CompareApproxAbs<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(right_eig_vectors_ref.data(),
right_eig_vectors_act.data(),
params.k * params.n_col,
raft::CompareApproxAbs<double>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(randomized_svdTests1, randomized_svdTestF, ::testing::ValuesIn(inputsf1));
INSTANTIATE_TEST_SUITE_P(randomized_svdTests1, randomized_svdTestD, ::testing::ValuesIn(inputsd1));
} // end namespace linalg
} // end namespace raft
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.