hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
5963e4e2d0c02bcb408e394fe6db81d9d90d3ded.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "set_dynamic_positions.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *arr = NULL;
hipMalloc(&arr, XSIZE*YSIZE);
float t = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
set_dynamic_positions), dim3(gridBlock),dim3(threadBlock), 0, 0, arr,t);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
set_dynamic_positions), dim3(gridBlock),dim3(threadBlock), 0, 0, arr,t);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
set_dynamic_positions), dim3(gridBlock),dim3(threadBlock), 0, 0, arr,t);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 5963e4e2d0c02bcb408e394fe6db81d9d90d3ded.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "set_dynamic_positions.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *arr = NULL;
cudaMalloc(&arr, XSIZE*YSIZE);
float t = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
set_dynamic_positions<<<gridBlock,threadBlock>>>(arr,t);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
set_dynamic_positions<<<gridBlock,threadBlock>>>(arr,t);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
set_dynamic_positions<<<gridBlock,threadBlock>>>(arr,t);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
e66766c56a72a262e9dbfce4114b35a23635f1de.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "uplo_log10.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int sd = 1;
const int unit = 1;
const int bottom = 1;
const REAL *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
const int offset_a = 1;
const int ld_a = 1;
REAL *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
const int offset_b = 1;
const int ld_b = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
uplo_log10), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
uplo_log10), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
uplo_log10), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | e66766c56a72a262e9dbfce4114b35a23635f1de.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "uplo_log10.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int sd = 1;
const int unit = 1;
const int bottom = 1;
const REAL *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
const int offset_a = 1;
const int ld_a = 1;
REAL *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
const int offset_b = 1;
const int ld_b = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
uplo_log10<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
uplo_log10<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
uplo_log10<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
366d767d3497c538983bfee836c928f38054fdee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void matchValidity_kernel(float *d_flow, float *d_disparity, int n_cols, int n_rows) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < n_cols) & (y < n_rows)) // are we in the image?
{
unsigned int ind = x + y * n_cols;
bool valid = (isfinite(d_flow[ind]) && isfinite(d_disparity[ind]));
if (!valid) {
d_flow[ind] = nanf("");
d_flow[ind + n_cols * n_rows] = nanf("");
d_disparity[ind] = nanf("");
}
}
} | 366d767d3497c538983bfee836c928f38054fdee.cu | #include "includes.h"
__global__ void matchValidity_kernel(float *d_flow, float *d_disparity, int n_cols, int n_rows) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < n_cols) & (y < n_rows)) // are we in the image?
{
unsigned int ind = x + y * n_cols;
bool valid = (isfinite(d_flow[ind]) && isfinite(d_disparity[ind]));
if (!valid) {
d_flow[ind] = nanf("");
d_flow[ind + n_cols * n_rows] = nanf("");
d_disparity[ind] = nanf("");
}
}
} |
0bf107cab9f91024a61b0b5e13ab6087cb3d07d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "debug_rendering.h"
// Kernel that writes the depth image to the OpenGL PBO directly.
__global__ void sendDepthImageBufferToPBO(float4* PBOpos, glm::vec2 resolution, DPixel* depthBuffer){
int r = (blockIdx.y * blockDim.y) + threadIdx.y;
int c = (blockIdx.x * blockDim.x) + threadIdx.x;
int i = (r * resolution.x) + c;
if(r<resolution.y && c<resolution.x) {
// Cast to float for storage
float depth = depthBuffer[i].depth;
// Each thread writes one pixel location in the texture (textel)
// Store depth in every component except alpha
PBOpos[i].x = depth;
PBOpos[i].y = depth;
PBOpos[i].z = depth;
PBOpos[i].w = 1.0f;
}
}
// Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendColorImageBufferToPBO(float4* PBOpos, glm::vec2 resolution, ColorPixel* colorBuffer){
int r = (blockIdx.y * blockDim.y) + threadIdx.y;
int c = (blockIdx.x * blockDim.x) + threadIdx.x;
int i = (r * resolution.x) + c;
if(r<resolution.y && c<resolution.x){
glm::vec3 color;
color.r = colorBuffer[i].r/255.0f;
color.g = colorBuffer[i].g/255.0f;
color.b = colorBuffer[i].b/255.0f;
// Each thread writes one pixel location in the texture (textel)
PBOpos[i].x = color.r;
PBOpos[i].y = color.g;
PBOpos[i].z = color.b;
PBOpos[i].w = 1.0f;
}
}
// Draws depth image buffer to the texture.
// Texture width and height must match the resolution of the depth image.
// Returns false if width or height does not match, true otherwise
__host__ void drawDepthImageBufferToPBO(float4* dev_PBOpos, DPixel* dev_depthImageBuffer, int texWidth, int texHeight)
{
int tileSize = 16;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(texWidth)/float(tileSize)),
(int)ceil(float(texHeight)/float(tileSize)));
hipLaunchKernelGGL(( sendDepthImageBufferToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_PBOpos, glm::vec2(texWidth, texHeight), dev_depthImageBuffer);
}
// Draws color image buffer to the texture.
// Texture width and height must match the resolution of the color image.
// Returns false if width or height does not match, true otherwise
// dev_PBOpos must be a CUDA device pointer
__host__ void drawColorImageBufferToPBO(float4* dev_PBOpos, ColorPixel* dev_colorImageBuffer, int texWidth, int texHeight)
{
int tileSize = 16;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(texWidth)/float(tileSize)),
(int)ceil(float(texHeight)/float(tileSize)));
hipLaunchKernelGGL(( sendColorImageBufferToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_PBOpos, glm::vec2(texWidth, texHeight), dev_colorImageBuffer);
}
__global__ void sendFloat3SOAToPBO(float4* pbo, float* x_src, float* y_src, float* z_src, float w,
int xRes, int yRes, int pboXRes, int pboYRes)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int i = (y * xRes) + x;
int pboi = (y * pboXRes) + x;
if(y < yRes && x < xRes){
// Each thread writes one pixel location in the texture (textel)
pbo[pboi].x = x_src[i];
pbo[pboi].y = y_src[i];
pbo[pboi].z = z_src[i];
pbo[pboi].w = w;
}
}
__global__ void sendFloat1SOAToPBO(float4* pbo, float* x_src, float w,
int xRes, int yRes, int pboXRes, int pboYRes)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int i = (y * xRes) + x;
int pboi = (y * pboXRes) + x;
if(y < yRes && x < xRes){
// Each thread writes one pixel location in the texture (textel)
pbo[pboi].x = x_src[i];
pbo[pboi].y = w;
pbo[pboi].z = w;
pbo[pboi].w = w;
}
}
__global__ void clearPBOKernel(float4* pbo, int xRes, int yRes, float clearValue)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int i = (y * xRes) + x;
if(y < yRes && x < xRes){
// Each thread writes one pixel location in the texture (textel)
//
float4 clear;
clear.x = clearValue;
clear.y = clearValue;
clear.z = clearValue;
clear.w = clearValue;
pbo[i] = clear;
}
}
__host__ void clearPBO(float4* pbo, int xRes, int yRes, float clearValue)
{
int tileSize = 16;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(xRes)/float(tileSize)),
(int)ceil(float(yRes)/float(tileSize)));
hipLaunchKernelGGL(( clearPBOKernel), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, pbo, xRes, yRes, clearValue);
}
__host__ void drawVMaptoPBO(float4* pbo, Float3SOAPyramid vmap, int level, int xRes, int yRes)
{
int tileSize = 16;
if(level < NUM_PYRAMID_LEVELS)
{
int scaledXRes = xRes >> level;
int scaledYRes = yRes >> level;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(scaledXRes)/float(tileSize)),
(int)ceil(float(scaledYRes)/float(tileSize)));
hipLaunchKernelGGL(( sendFloat3SOAToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, pbo, vmap.x[level], vmap.y[level], vmap.z[level], 1.0,
scaledXRes, scaledYRes, xRes, yRes);
}
}
__host__ void drawNMaptoPBO(float4* pbo, Float3SOAPyramid nmap, int level, int xRes, int yRes)
{
int tileSize = 16;
if(level < NUM_PYRAMID_LEVELS)
{
int scaledXRes = xRes >> level;
int scaledYRes = yRes >> level;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(scaledXRes)/float(tileSize)),
(int)ceil(float(scaledYRes)/float(tileSize)));
hipLaunchKernelGGL(( sendFloat3SOAToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, pbo, nmap.x[level], nmap.y[level], nmap.z[level], 0.0,
scaledXRes, scaledYRes, xRes, yRes);
}
}
__host__ void drawRGBMaptoPBO(float4* pbo, Float3SOAPyramid rgbMap, int level, int xRes, int yRes)
{
int tileSize = 16;
if(level < NUM_PYRAMID_LEVELS)
{
int scaledXRes = xRes >> level;
int scaledYRes = yRes >> level;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(scaledXRes)/float(tileSize)),
(int)ceil(float(scaledYRes)/float(tileSize)));
hipLaunchKernelGGL(( sendFloat3SOAToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, pbo, rgbMap.x[level], rgbMap.y[level], rgbMap.z[level], 1.0,
scaledXRes, scaledYRes, xRes, yRes);
}
}
__host__ void drawCurvaturetoPBO(float4* pbo, float* curvature, int xRes, int yRes)
{
int tileSize = 16;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(xRes)/float(tileSize)),
(int)ceil(float(yRes)/float(tileSize)));
hipLaunchKernelGGL(( sendFloat1SOAToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, pbo, curvature, 0.0,
xRes, yRes, xRes, yRes);
}
__global__ void sendInt1SOAToPBO(float4* pbo, int* x_src, int xRes, int yRes, int pboXRes, int pboYRes)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int i = (y * xRes) + x;
int pboi = (y * pboXRes) + x;
if(y < yRes && x < xRes){
// Each thread writes one pixel location in the texture (textel)
pbo[pboi].x = x_src[i];
pbo[pboi].y = 0.0;
pbo[pboi].z = 0.0;
pbo[pboi].w = 0.0;
}
}
__host__ void drawNormalVoxelsToPBO(float4* pbo, int* voxels, int pboXRes, int pboYRes, int voxelAzimuthBins, int voxelPolarBins)
{
int tileSize = 16;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(voxelAzimuthBins)/float(tileSize)),
(int)ceil(float(voxelPolarBins)/float(tileSize)));
hipLaunchKernelGGL(( sendInt1SOAToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, pbo, voxels, voxelAzimuthBins, voxelPolarBins, pboXRes, pboYRes);
}
__global__ void sendHistogramSOAToPBO(float4* pbo, int* binsX, int* binsY, int* binsZ, int length, int pboXRes, int pboYRes)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int pboi = (y * pboXRes) + x;
if(y < pboYRes && x < length){
// Each thread writes one pixel location in the texture (textel)
pbo[pboi].x = binsX[x];
pbo[pboi].y = binsY[x];
pbo[pboi].z = binsZ[x];
pbo[pboi].w = 0.0;
}
}
__host__ void drawDecoupledHistogramsToPBO(float4* pbo, Int3SOA histograms, int length, int pboXRes, int pboYRes)
{
int tileSize = 16;
assert(length < pboXRes);
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(length)/float(tileSize)),
(int)ceil(float(pboYRes)/float(tileSize)));
hipLaunchKernelGGL(( sendHistogramSOAToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, pbo, histograms.x, histograms.y, histograms.z, length, pboXRes, pboYRes);
}
__global__ void sendInt3SOAToPBO(float4* pbo, int* x_src, int* y_src, int* z_src, int xRes, int yRes, int pboXRes, int pboYRes)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int i = (y * xRes) + x;
int pboi = (y * pboXRes) + x;
if(y < yRes && x < xRes){
// Each thread writes one pixel location in the texture (textel)
pbo[pboi].x = x_src[i];
pbo[pboi].y = y_src[i];
pbo[pboi].z = z_src[i];
pbo[pboi].w = 0.0;
}
}
__global__ void sendSegmentDataToPBO(float4* pbo, int* segments, float* projectedDistances, int xRes, int yRes, int pboXRes, int pboYRes)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int i = (y * xRes) + x;
int pboi = (y * pboXRes) + x;
if(y < yRes && x < xRes){
// Each thread writes one pixel location in the texture (textel)
pbo[pboi].x = segments[i];
pbo[pboi].y = projectedDistances[i];
pbo[pboi].z = 0.0;
pbo[pboi].w = 0.0;
}
}
__host__ void drawNormalSegmentsToPBO(float4* pbo, int* normalSegments, float* projectedDistanceMap,
int xRes, int yRes, int pboXRes, int pboYRes)
{
int tileSize = 16;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(xRes)/float(tileSize)),
(int)ceil(float(yRes)/float(tileSize)));
hipLaunchKernelGGL(( sendSegmentDataToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, pbo, normalSegments, projectedDistanceMap,
xRes, yRes, pboXRes, pboYRes);
}
__global__ void sendFinalSegmentDataToPBO(float4* pbo, int* segments, float* projectedDistances, float* projectedSx, float* projectedSy,
int xRes, int yRes, int pboXRes, int pboYRes)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int i = (y * xRes) + x;
int pboi = (y * pboXRes) + x;
if(y < yRes && x < xRes){
// Each thread writes one pixel location in the texture (textel)
pbo[pboi].x = segments[i];
pbo[pboi].y = projectedDistances[i];
pbo[pboi].z = projectedSx[i];
pbo[pboi].w = projectedSy[i];
}
}
__host__ void drawSegmentsDataToPBO(float4* pbo, int* normalSegments, float* projectedDistanceMap, float* projectedSx, float* projectedSy,
int xRes, int yRes, int pboXRes, int pboYRes)
{
int tileSize = 16;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(xRes)/float(tileSize)),
(int)ceil(float(yRes)/float(tileSize)));
hipLaunchKernelGGL(( sendFinalSegmentDataToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, pbo, normalSegments, projectedDistanceMap, projectedSx, projectedSy,
xRes, yRes, pboXRes, pboYRes);
}
__global__ void drawScaledHistogramToPBOKernel(float4* pbo, int* hist, glm::vec3 color, float scaleInv, int length, int pboXRes, int pboYRes)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int pboi = (y * pboXRes) + x;
if(y < pboYRes && x < length){
// Each thread writes one pixel location in the texture (textel)
pbo[pboi].x = color.x;
pbo[pboi].y = color.y;
pbo[pboi].z = color.z;
pbo[pboi].w = hist[x] * scaleInv;
}
}
__host__ void drawScaledHistogramToPBO(float4* pbo, int* histogram, glm::vec3 color, int maxValue, int length, int pboXRes, int pboYRes)
{
int tileSize = 16;
assert(length < pboXRes);
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(length)/float(tileSize)),
(int)ceil(float(pboYRes)/float(tileSize)));
hipLaunchKernelGGL(( drawScaledHistogramToPBOKernel), dim3(fullBlocksPerGrid),dim3(threadsPerBlock), 0, 0, pbo, histogram, color, float(1.0f/maxValue), length, pboXRes, pboYRes);
}
__global__ void drawPlaneProjectedTexturetoPBOKernel(float4* pbo, Float4SOA projectedTexture, int texWidth, int texHeight,
int texBufferWidth, int pboXRes, int pboYRes)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int pboi = (y * pboXRes) + x;
if(x < pboXRes && y < pboYRes){
//Calculate Scale recip
int scale = 1;
if(texWidth > pboXRes || texHeight > pboYRes)
{
scale = glm::max(ceil(texWidth/float(pboXRes)),ceil(texHeight/float(pboYRes)));
}
x*=scale;
y*=scale;
int i = (y * texBufferWidth) + x;
//TODO: Texture oversize scaling
float4 texVal = {CUDART_NAN_F,CUDART_NAN_F,CUDART_NAN_F,CUDART_NAN_F};
if(x < texWidth && y < texHeight)
{
texVal.x = projectedTexture.x[i];
texVal.y = projectedTexture.y[i];
texVal.z = projectedTexture.z[i];
texVal.w = projectedTexture.w[i];
}
// Each thread writes one pixel location in the texture (textel)
pbo[pboi] = texVal;
}
}
__host__ void drawPlaneProjectedTexturetoPBO(float4* pbo, Float4SOA projectedTexture, int texWidth, int texHeight,
int texBufferWidth, int pboXRes, int pboYRes)
{
int tileSize = 16;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(pboXRes)/float(tileSize)),
(int)ceil(float(pboYRes)/float(tileSize)));
//Parallel by PBO pixel (is for debug, not exact recreation)
hipLaunchKernelGGL(( drawPlaneProjectedTexturetoPBOKernel), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, pbo, projectedTexture,
texWidth, texHeight, texBufferWidth, pboXRes, pboYRes);
}
__global__ void drawQuadtreetoPBOKernel(float4* pbo, int* quadTree, int texWidth, int texHeight,
int texBufferWidth, int pboXRes, int pboYRes)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int pboi = (y * pboXRes) + x;
if(x < pboXRes && y < pboYRes){
//Calculate Scale recip
int scale = 1;
if(texWidth > pboXRes || texHeight > pboYRes)
{
scale = glm::max(ceil(texWidth/float(pboXRes)),ceil(texHeight/float(pboYRes)));
}
x*=scale;
y*=scale;
int i = (y * texBufferWidth) + x;
//TODO: Texture oversize scaling
float4 texVal = {CUDART_NAN_F,CUDART_NAN_F,CUDART_NAN_F,CUDART_NAN_F};
if(x < texWidth && y < texHeight)
{
texVal.x = quadTree[i];
}
// Each thread writes one pixel location in the texture (textel)
pbo[pboi] = texVal;
}
}
__host__ void drawQuadtreetoPBO(float4* pbo, int* dev_quadTree, int texWidth, int texHeight,
int texBufferWidth, int pboXRes, int pboYRes)
{
int tileSize = 16;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(pboXRes)/float(tileSize)),
(int)ceil(float(pboYRes)/float(tileSize)));
//Parallel by PBO pixel (is for debug, not exact recreation)
hipLaunchKernelGGL(( drawQuadtreetoPBOKernel), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, pbo, dev_quadTree,
texWidth, texHeight, texBufferWidth, pboXRes, pboYRes);
}
| 0bf107cab9f91024a61b0b5e13ab6087cb3d07d5.cu | #include "debug_rendering.h"
// Kernel that writes the depth image to the OpenGL PBO directly.
__global__ void sendDepthImageBufferToPBO(float4* PBOpos, glm::vec2 resolution, DPixel* depthBuffer){
int r = (blockIdx.y * blockDim.y) + threadIdx.y;
int c = (blockIdx.x * blockDim.x) + threadIdx.x;
int i = (r * resolution.x) + c;
if(r<resolution.y && c<resolution.x) {
// Cast to float for storage
float depth = depthBuffer[i].depth;
// Each thread writes one pixel location in the texture (textel)
// Store depth in every component except alpha
PBOpos[i].x = depth;
PBOpos[i].y = depth;
PBOpos[i].z = depth;
PBOpos[i].w = 1.0f;
}
}
// Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendColorImageBufferToPBO(float4* PBOpos, glm::vec2 resolution, ColorPixel* colorBuffer){
int r = (blockIdx.y * blockDim.y) + threadIdx.y;
int c = (blockIdx.x * blockDim.x) + threadIdx.x;
int i = (r * resolution.x) + c;
if(r<resolution.y && c<resolution.x){
glm::vec3 color;
color.r = colorBuffer[i].r/255.0f;
color.g = colorBuffer[i].g/255.0f;
color.b = colorBuffer[i].b/255.0f;
// Each thread writes one pixel location in the texture (textel)
PBOpos[i].x = color.r;
PBOpos[i].y = color.g;
PBOpos[i].z = color.b;
PBOpos[i].w = 1.0f;
}
}
// Draws depth image buffer to the texture.
// Texture width and height must match the resolution of the depth image.
// Returns false if width or height does not match, true otherwise
__host__ void drawDepthImageBufferToPBO(float4* dev_PBOpos, DPixel* dev_depthImageBuffer, int texWidth, int texHeight)
{
int tileSize = 16;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(texWidth)/float(tileSize)),
(int)ceil(float(texHeight)/float(tileSize)));
sendDepthImageBufferToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(dev_PBOpos, glm::vec2(texWidth, texHeight), dev_depthImageBuffer);
}
// Draws color image buffer to the texture.
// Texture width and height must match the resolution of the color image.
// Returns false if width or height does not match, true otherwise
// dev_PBOpos must be a CUDA device pointer
__host__ void drawColorImageBufferToPBO(float4* dev_PBOpos, ColorPixel* dev_colorImageBuffer, int texWidth, int texHeight)
{
int tileSize = 16;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(texWidth)/float(tileSize)),
(int)ceil(float(texHeight)/float(tileSize)));
sendColorImageBufferToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(dev_PBOpos, glm::vec2(texWidth, texHeight), dev_colorImageBuffer);
}
__global__ void sendFloat3SOAToPBO(float4* pbo, float* x_src, float* y_src, float* z_src, float w,
int xRes, int yRes, int pboXRes, int pboYRes)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int i = (y * xRes) + x;
int pboi = (y * pboXRes) + x;
if(y < yRes && x < xRes){
// Each thread writes one pixel location in the texture (textel)
pbo[pboi].x = x_src[i];
pbo[pboi].y = y_src[i];
pbo[pboi].z = z_src[i];
pbo[pboi].w = w;
}
}
__global__ void sendFloat1SOAToPBO(float4* pbo, float* x_src, float w,
int xRes, int yRes, int pboXRes, int pboYRes)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int i = (y * xRes) + x;
int pboi = (y * pboXRes) + x;
if(y < yRes && x < xRes){
// Each thread writes one pixel location in the texture (textel)
pbo[pboi].x = x_src[i];
pbo[pboi].y = w;
pbo[pboi].z = w;
pbo[pboi].w = w;
}
}
__global__ void clearPBOKernel(float4* pbo, int xRes, int yRes, float clearValue)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int i = (y * xRes) + x;
if(y < yRes && x < xRes){
// Each thread writes one pixel location in the texture (textel)
//
float4 clear;
clear.x = clearValue;
clear.y = clearValue;
clear.z = clearValue;
clear.w = clearValue;
pbo[i] = clear;
}
}
__host__ void clearPBO(float4* pbo, int xRes, int yRes, float clearValue)
{
int tileSize = 16;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(xRes)/float(tileSize)),
(int)ceil(float(yRes)/float(tileSize)));
clearPBOKernel<<<fullBlocksPerGrid, threadsPerBlock>>>(pbo, xRes, yRes, clearValue);
}
__host__ void drawVMaptoPBO(float4* pbo, Float3SOAPyramid vmap, int level, int xRes, int yRes)
{
int tileSize = 16;
if(level < NUM_PYRAMID_LEVELS)
{
int scaledXRes = xRes >> level;
int scaledYRes = yRes >> level;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(scaledXRes)/float(tileSize)),
(int)ceil(float(scaledYRes)/float(tileSize)));
sendFloat3SOAToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(pbo, vmap.x[level], vmap.y[level], vmap.z[level], 1.0,
scaledXRes, scaledYRes, xRes, yRes);
}
}
__host__ void drawNMaptoPBO(float4* pbo, Float3SOAPyramid nmap, int level, int xRes, int yRes)
{
int tileSize = 16;
if(level < NUM_PYRAMID_LEVELS)
{
int scaledXRes = xRes >> level;
int scaledYRes = yRes >> level;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(scaledXRes)/float(tileSize)),
(int)ceil(float(scaledYRes)/float(tileSize)));
sendFloat3SOAToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(pbo, nmap.x[level], nmap.y[level], nmap.z[level], 0.0,
scaledXRes, scaledYRes, xRes, yRes);
}
}
__host__ void drawRGBMaptoPBO(float4* pbo, Float3SOAPyramid rgbMap, int level, int xRes, int yRes)
{
int tileSize = 16;
if(level < NUM_PYRAMID_LEVELS)
{
int scaledXRes = xRes >> level;
int scaledYRes = yRes >> level;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(scaledXRes)/float(tileSize)),
(int)ceil(float(scaledYRes)/float(tileSize)));
sendFloat3SOAToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(pbo, rgbMap.x[level], rgbMap.y[level], rgbMap.z[level], 1.0,
scaledXRes, scaledYRes, xRes, yRes);
}
}
__host__ void drawCurvaturetoPBO(float4* pbo, float* curvature, int xRes, int yRes)
{
int tileSize = 16;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(xRes)/float(tileSize)),
(int)ceil(float(yRes)/float(tileSize)));
sendFloat1SOAToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(pbo, curvature, 0.0,
xRes, yRes, xRes, yRes);
}
__global__ void sendInt1SOAToPBO(float4* pbo, int* x_src, int xRes, int yRes, int pboXRes, int pboYRes)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int i = (y * xRes) + x;
int pboi = (y * pboXRes) + x;
if(y < yRes && x < xRes){
// Each thread writes one pixel location in the texture (textel)
pbo[pboi].x = x_src[i];
pbo[pboi].y = 0.0;
pbo[pboi].z = 0.0;
pbo[pboi].w = 0.0;
}
}
__host__ void drawNormalVoxelsToPBO(float4* pbo, int* voxels, int pboXRes, int pboYRes, int voxelAzimuthBins, int voxelPolarBins)
{
int tileSize = 16;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(voxelAzimuthBins)/float(tileSize)),
(int)ceil(float(voxelPolarBins)/float(tileSize)));
sendInt1SOAToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(pbo, voxels, voxelAzimuthBins, voxelPolarBins, pboXRes, pboYRes);
}
__global__ void sendHistogramSOAToPBO(float4* pbo, int* binsX, int* binsY, int* binsZ, int length, int pboXRes, int pboYRes)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int pboi = (y * pboXRes) + x;
if(y < pboYRes && x < length){
// Each thread writes one pixel location in the texture (textel)
pbo[pboi].x = binsX[x];
pbo[pboi].y = binsY[x];
pbo[pboi].z = binsZ[x];
pbo[pboi].w = 0.0;
}
}
__host__ void drawDecoupledHistogramsToPBO(float4* pbo, Int3SOA histograms, int length, int pboXRes, int pboYRes)
{
int tileSize = 16;
assert(length < pboXRes);
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(length)/float(tileSize)),
(int)ceil(float(pboYRes)/float(tileSize)));
sendHistogramSOAToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(pbo, histograms.x, histograms.y, histograms.z, length, pboXRes, pboYRes);
}
__global__ void sendInt3SOAToPBO(float4* pbo, int* x_src, int* y_src, int* z_src, int xRes, int yRes, int pboXRes, int pboYRes)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int i = (y * xRes) + x;
int pboi = (y * pboXRes) + x;
if(y < yRes && x < xRes){
// Each thread writes one pixel location in the texture (textel)
pbo[pboi].x = x_src[i];
pbo[pboi].y = y_src[i];
pbo[pboi].z = z_src[i];
pbo[pboi].w = 0.0;
}
}
__global__ void sendSegmentDataToPBO(float4* pbo, int* segments, float* projectedDistances, int xRes, int yRes, int pboXRes, int pboYRes)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int i = (y * xRes) + x;
int pboi = (y * pboXRes) + x;
if(y < yRes && x < xRes){
// Each thread writes one pixel location in the texture (textel)
pbo[pboi].x = segments[i];
pbo[pboi].y = projectedDistances[i];
pbo[pboi].z = 0.0;
pbo[pboi].w = 0.0;
}
}
__host__ void drawNormalSegmentsToPBO(float4* pbo, int* normalSegments, float* projectedDistanceMap,
int xRes, int yRes, int pboXRes, int pboYRes)
{
int tileSize = 16;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(xRes)/float(tileSize)),
(int)ceil(float(yRes)/float(tileSize)));
sendSegmentDataToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(pbo, normalSegments, projectedDistanceMap,
xRes, yRes, pboXRes, pboYRes);
}
__global__ void sendFinalSegmentDataToPBO(float4* pbo, int* segments, float* projectedDistances, float* projectedSx, float* projectedSy,
int xRes, int yRes, int pboXRes, int pboYRes)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int i = (y * xRes) + x;
int pboi = (y * pboXRes) + x;
if(y < yRes && x < xRes){
// Each thread writes one pixel location in the texture (textel)
pbo[pboi].x = segments[i];
pbo[pboi].y = projectedDistances[i];
pbo[pboi].z = projectedSx[i];
pbo[pboi].w = projectedSy[i];
}
}
__host__ void drawSegmentsDataToPBO(float4* pbo, int* normalSegments, float* projectedDistanceMap, float* projectedSx, float* projectedSy,
int xRes, int yRes, int pboXRes, int pboYRes)
{
int tileSize = 16;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(xRes)/float(tileSize)),
(int)ceil(float(yRes)/float(tileSize)));
sendFinalSegmentDataToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(pbo, normalSegments, projectedDistanceMap, projectedSx, projectedSy,
xRes, yRes, pboXRes, pboYRes);
}
__global__ void drawScaledHistogramToPBOKernel(float4* pbo, int* hist, glm::vec3 color, float scaleInv, int length, int pboXRes, int pboYRes)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int pboi = (y * pboXRes) + x;
if(y < pboYRes && x < length){
// Each thread writes one pixel location in the texture (textel)
pbo[pboi].x = color.x;
pbo[pboi].y = color.y;
pbo[pboi].z = color.z;
pbo[pboi].w = hist[x] * scaleInv;
}
}
__host__ void drawScaledHistogramToPBO(float4* pbo, int* histogram, glm::vec3 color, int maxValue, int length, int pboXRes, int pboYRes)
{
int tileSize = 16;
assert(length < pboXRes);
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(length)/float(tileSize)),
(int)ceil(float(pboYRes)/float(tileSize)));
drawScaledHistogramToPBOKernel<<<fullBlocksPerGrid,threadsPerBlock>>>(pbo, histogram, color, float(1.0f/maxValue), length, pboXRes, pboYRes);
}
__global__ void drawPlaneProjectedTexturetoPBOKernel(float4* pbo, Float4SOA projectedTexture, int texWidth, int texHeight,
int texBufferWidth, int pboXRes, int pboYRes)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int pboi = (y * pboXRes) + x;
if(x < pboXRes && y < pboYRes){
//Calculate Scale recip
int scale = 1;
if(texWidth > pboXRes || texHeight > pboYRes)
{
scale = glm::max(ceil(texWidth/float(pboXRes)),ceil(texHeight/float(pboYRes)));
}
x*=scale;
y*=scale;
int i = (y * texBufferWidth) + x;
//TODO: Texture oversize scaling
float4 texVal = {CUDART_NAN_F,CUDART_NAN_F,CUDART_NAN_F,CUDART_NAN_F};
if(x < texWidth && y < texHeight)
{
texVal.x = projectedTexture.x[i];
texVal.y = projectedTexture.y[i];
texVal.z = projectedTexture.z[i];
texVal.w = projectedTexture.w[i];
}
// Each thread writes one pixel location in the texture (textel)
pbo[pboi] = texVal;
}
}
__host__ void drawPlaneProjectedTexturetoPBO(float4* pbo, Float4SOA projectedTexture, int texWidth, int texHeight,
int texBufferWidth, int pboXRes, int pboYRes)
{
int tileSize = 16;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(pboXRes)/float(tileSize)),
(int)ceil(float(pboYRes)/float(tileSize)));
//Parallel by PBO pixel (is for debug, not exact recreation)
drawPlaneProjectedTexturetoPBOKernel<<<fullBlocksPerGrid, threadsPerBlock>>>(pbo, projectedTexture,
texWidth, texHeight, texBufferWidth, pboXRes, pboYRes);
}
__global__ void drawQuadtreetoPBOKernel(float4* pbo, int* quadTree, int texWidth, int texHeight,
int texBufferWidth, int pboXRes, int pboYRes)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int pboi = (y * pboXRes) + x;
if(x < pboXRes && y < pboYRes){
//Calculate Scale recip
int scale = 1;
if(texWidth > pboXRes || texHeight > pboYRes)
{
scale = glm::max(ceil(texWidth/float(pboXRes)),ceil(texHeight/float(pboYRes)));
}
x*=scale;
y*=scale;
int i = (y * texBufferWidth) + x;
//TODO: Texture oversize scaling
float4 texVal = {CUDART_NAN_F,CUDART_NAN_F,CUDART_NAN_F,CUDART_NAN_F};
if(x < texWidth && y < texHeight)
{
texVal.x = quadTree[i];
}
// Each thread writes one pixel location in the texture (textel)
pbo[pboi] = texVal;
}
}
__host__ void drawQuadtreetoPBO(float4* pbo, int* dev_quadTree, int texWidth, int texHeight,
int texBufferWidth, int pboXRes, int pboYRes)
{
int tileSize = 16;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(pboXRes)/float(tileSize)),
(int)ceil(float(pboYRes)/float(tileSize)));
//Parallel by PBO pixel (is for debug, not exact recreation)
drawQuadtreetoPBOKernel<<<fullBlocksPerGrid, threadsPerBlock>>>(pbo, dev_quadTree,
texWidth, texHeight, texBufferWidth, pboXRes, pboYRes);
}
|
6a83fe35eee2eca4b143763679ab633183210fd5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "parent.cu"
int main(int argc, char* argv[]) {
float host_a[kDataLen];
float host_b[kDataLen];
float host_c[kDataLen];
for (int i=0; i < kDataLen; i++) {
host_a[i] = i;
host_b[i] = 2*i;
}
// Copy input data to device.
float* device_a;
float* device_b;
float* device_c;
hipMalloc(&device_a, kDataLen * sizeof(float));
hipMalloc(&device_b, kDataLen * sizeof(float));
hipMalloc(&device_c, kDataLen * sizeof(float));
hipMemcpy(device_a, host_a, kDataLen * sizeof(float),
hipMemcpyHostToDevice);
hipMemcpy(device_b, host_b, kDataLen * sizeof(float),
hipMemcpyHostToDevice);
// Launch the kernel.
hipLaunchKernelGGL(( parent), dim3(5), dim3(kDataLen/5), 0, 0, device_a, device_b, device_c);
// Copy output data to host.
hipDeviceSynchronize();
hipMemcpy(host_c, device_c, kDataLen * sizeof(float),
hipMemcpyDeviceToHost);
// Print the results.
for (int i = 0; i < kDataLen; ++i) {
std::cout << "y[" << i << "] = " << host_c[i] << "\n";
}
hipDeviceReset();
return 0;
}
| 6a83fe35eee2eca4b143763679ab633183210fd5.cu | #include <iostream>
#include "parent.cu"
int main(int argc, char* argv[]) {
float host_a[kDataLen];
float host_b[kDataLen];
float host_c[kDataLen];
for (int i=0; i < kDataLen; i++) {
host_a[i] = i;
host_b[i] = 2*i;
}
// Copy input data to device.
float* device_a;
float* device_b;
float* device_c;
cudaMalloc(&device_a, kDataLen * sizeof(float));
cudaMalloc(&device_b, kDataLen * sizeof(float));
cudaMalloc(&device_c, kDataLen * sizeof(float));
cudaMemcpy(device_a, host_a, kDataLen * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(device_b, host_b, kDataLen * sizeof(float),
cudaMemcpyHostToDevice);
// Launch the kernel.
parent<<<5, kDataLen/5>>>(device_a, device_b, device_c);
// Copy output data to host.
cudaDeviceSynchronize();
cudaMemcpy(host_c, device_c, kDataLen * sizeof(float),
cudaMemcpyDeviceToHost);
// Print the results.
for (int i = 0; i < kDataLen; ++i) {
std::cout << "y[" << i << "] = " << host_c[i] << "\n";
}
cudaDeviceReset();
return 0;
}
|
2c1452288b2ddb86757d25d3ff13ca13f49110bc.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define XBLOCK_SIZE 32
#define YBLOCK_SIZE 24
__global__ void mandelKernel(float lowerX, float lowerY, float stepX, float stepY,size_t width,int count, int *output) {
// To avoid error caused by the floating number, use the following pseudo code
//
// float x = lowerX + thisX * stepX;
// float y = lowerY + thisY * stepY;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
float c_re = lowerX + i * stepX;
float c_im = lowerY + j * stepY;
int idx;
float z_re = c_re, z_im = c_im;
for (idx = 0; idx < count; ++idx)
{
if (z_re * z_re + z_im * z_im > 4.f)
break;
float new_re = z_re * z_re - z_im * z_im;
float new_im = 2.f * z_re * z_im;
z_re = c_re + new_re;
z_im = c_im + new_im;
}
int* row = (int*)((char*)output + j * width);
row[i] = idx;
}
// Host front-end function that allocates the memory and launches the GPU kernel
void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations)
{
int size = resX * resY * sizeof(int);
float stepX = (upperX - lowerX) / resX;
float stepY = (upperY - lowerY) / resY;
// allocate memory in host & device
int *host_mem, *dev_mem;
size_t pitch;
hipHostMalloc(&host_mem, size, hipHostMallocDefault);
hipMallocPitch(&dev_mem, &pitch, resX * sizeof(int), resY);
// GPU processing
dim3 num_block(resX / XBLOCK_SIZE, resY / YBLOCK_SIZE);
dim3 block_size(XBLOCK_SIZE, YBLOCK_SIZE);
hipLaunchKernelGGL(( mandelKernel), dim3(num_block), dim3(block_size), 0, 0, lowerX, lowerY, stepX, stepY, pitch, maxIterations, dev_mem);
hipDeviceSynchronize();
// GPU translate result data back
hipMemcpy2D(host_mem, size/resY, dev_mem, pitch, resX * sizeof(int), resY, hipMemcpyDeviceToHost);
memcpy(img, host_mem, size);
hipHostFree(host_mem);
hipFree(dev_mem);
}
| 2c1452288b2ddb86757d25d3ff13ca13f49110bc.cu | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#define XBLOCK_SIZE 32
#define YBLOCK_SIZE 24
__global__ void mandelKernel(float lowerX, float lowerY, float stepX, float stepY,size_t width,int count, int *output) {
// To avoid error caused by the floating number, use the following pseudo code
//
// float x = lowerX + thisX * stepX;
// float y = lowerY + thisY * stepY;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
float c_re = lowerX + i * stepX;
float c_im = lowerY + j * stepY;
int idx;
float z_re = c_re, z_im = c_im;
for (idx = 0; idx < count; ++idx)
{
if (z_re * z_re + z_im * z_im > 4.f)
break;
float new_re = z_re * z_re - z_im * z_im;
float new_im = 2.f * z_re * z_im;
z_re = c_re + new_re;
z_im = c_im + new_im;
}
int* row = (int*)((char*)output + j * width);
row[i] = idx;
}
// Host front-end function that allocates the memory and launches the GPU kernel
void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations)
{
int size = resX * resY * sizeof(int);
float stepX = (upperX - lowerX) / resX;
float stepY = (upperY - lowerY) / resY;
// allocate memory in host & device
int *host_mem, *dev_mem;
size_t pitch;
cudaHostAlloc(&host_mem, size, cudaHostAllocDefault);
cudaMallocPitch(&dev_mem, &pitch, resX * sizeof(int), resY);
// GPU processing
dim3 num_block(resX / XBLOCK_SIZE, resY / YBLOCK_SIZE);
dim3 block_size(XBLOCK_SIZE, YBLOCK_SIZE);
mandelKernel<<<num_block, block_size>>>(lowerX, lowerY, stepX, stepY, pitch, maxIterations, dev_mem);
cudaDeviceSynchronize();
// GPU translate result data back
cudaMemcpy2D(host_mem, size/resY, dev_mem, pitch, resX * sizeof(int), resY, cudaMemcpyDeviceToHost);
memcpy(img, host_mem, size);
cudaFreeHost(host_mem);
cudaFree(dev_mem);
}
|
2f0fdea8a43e254956b9034f87fa617683910b96.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hipsparse.h>
#include "common.h"
const char* version_name = "cuSPARSE SpTRSV";\
#define CHECK_CUSPARSE(ret) if(ret != HIPSPARSE_STATUS_SUCCESS) { fprintf(stderr, "error in line %d\n", __LINE__);}
typedef struct {
hipsparseHandle_t handle;
hipsparseMatDescr_t descrA;
csrsv2Info_t info;
void *pBuffer;
} additional_info_t;
typedef additional_info_t *info_ptr_t;
void preprocess(dist_matrix_t *mat) {
info_ptr_t p = (info_ptr_t)malloc(sizeof(additional_info_t));
int pBufferSize;
hipsparseCreate(&p->handle);
hipsparseSetPointerMode(p->handle, HIPSPARSE_POINTER_MODE_HOST);
hipsparseCreateMatDescr(&p->descrA);
hipsparseSetMatFillMode(p->descrA, HIPSPARSE_FILL_MODE_LOWER);
hipsparseSetMatDiagType(p->descrA, HIPSPARSE_DIAG_TYPE_NON_UNIT);
//hipsparseSetMatIndexBase(p->descrA, HIPSPARSE_INDEX_BASE_ZERO);
hipsparseCreateCsrsv2Info(&p->info);
hipsparseDcsrsv2_bufferSize(p->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, mat->global_m, \
mat->global_nnz, p->descrA, mat->gpu_values, mat->gpu_r_pos, \
mat->gpu_c_idx, p->info, &pBufferSize);
hipMalloc(&p->pBuffer, pBufferSize);
hipsparseDcsrsv2_analysis(p->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, mat->global_m, \
mat->global_nnz, p->descrA, mat->gpu_values, mat->gpu_r_pos, \
mat->gpu_c_idx, p->info, HIPSPARSE_SOLVE_POLICY_USE_LEVEL, p->pBuffer);
mat->additional_info = p;
mat->perm = (int*) malloc(sizeof(int) * mat->global_m);
for(int i = 0; i < mat->global_m; ++i) {
mat->perm[i] = i;
}
}
void destroy_additional_info(void *additional_info) {
info_ptr_t p = (info_ptr_t)additional_info;
hipFree(p->pBuffer);
hipsparseDestroyCsrsv2Info(p->info);
hipsparseDestroyMatDescr(p->descrA);
hipsparseDestroy(p->handle);
free(p);
}
void sptrsv(dist_matrix_t *mat, const data_t* b, data_t* x) {
int m = mat->global_m, nnz = mat->global_nnz;
const data_t alpha = 1.0;
info_ptr_t p = (info_ptr_t)mat->additional_info;
hipsparseDcsrsv2_solve(p->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, m, nnz, &alpha, \
p->descrA, mat->gpu_values, mat->gpu_r_pos, mat->gpu_c_idx, \
p->info, b, x, HIPSPARSE_SOLVE_POLICY_USE_LEVEL, p->pBuffer);
}
| 2f0fdea8a43e254956b9034f87fa617683910b96.cu | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <cuda.h>
#include <cusparse.h>
#include "common.h"
const char* version_name = "cuSPARSE SpTRSV";\
#define CHECK_CUSPARSE(ret) if(ret != CUSPARSE_STATUS_SUCCESS) { fprintf(stderr, "error in line %d\n", __LINE__);}
typedef struct {
cusparseHandle_t handle;
cusparseMatDescr_t descrA;
csrsv2Info_t info;
void *pBuffer;
} additional_info_t;
typedef additional_info_t *info_ptr_t;
void preprocess(dist_matrix_t *mat) {
info_ptr_t p = (info_ptr_t)malloc(sizeof(additional_info_t));
int pBufferSize;
cusparseCreate(&p->handle);
cusparseSetPointerMode(p->handle, CUSPARSE_POINTER_MODE_HOST);
cusparseCreateMatDescr(&p->descrA);
cusparseSetMatFillMode(p->descrA, CUSPARSE_FILL_MODE_LOWER);
cusparseSetMatDiagType(p->descrA, CUSPARSE_DIAG_TYPE_NON_UNIT);
//cusparseSetMatIndexBase(p->descrA, CUSPARSE_INDEX_BASE_ZERO);
cusparseCreateCsrsv2Info(&p->info);
cusparseDcsrsv2_bufferSize(p->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, mat->global_m, \
mat->global_nnz, p->descrA, mat->gpu_values, mat->gpu_r_pos, \
mat->gpu_c_idx, p->info, &pBufferSize);
cudaMalloc(&p->pBuffer, pBufferSize);
cusparseDcsrsv2_analysis(p->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, mat->global_m, \
mat->global_nnz, p->descrA, mat->gpu_values, mat->gpu_r_pos, \
mat->gpu_c_idx, p->info, CUSPARSE_SOLVE_POLICY_USE_LEVEL, p->pBuffer);
mat->additional_info = p;
mat->perm = (int*) malloc(sizeof(int) * mat->global_m);
for(int i = 0; i < mat->global_m; ++i) {
mat->perm[i] = i;
}
}
void destroy_additional_info(void *additional_info) {
info_ptr_t p = (info_ptr_t)additional_info;
cudaFree(p->pBuffer);
cusparseDestroyCsrsv2Info(p->info);
cusparseDestroyMatDescr(p->descrA);
cusparseDestroy(p->handle);
free(p);
}
void sptrsv(dist_matrix_t *mat, const data_t* b, data_t* x) {
int m = mat->global_m, nnz = mat->global_nnz;
const data_t alpha = 1.0;
info_ptr_t p = (info_ptr_t)mat->additional_info;
cusparseDcsrsv2_solve(p->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, m, nnz, &alpha, \
p->descrA, mat->gpu_values, mat->gpu_r_pos, mat->gpu_c_idx, \
p->info, b, x, CUSPARSE_SOLVE_POLICY_USE_LEVEL, p->pBuffer);
}
|
a49b3920c61f687fa277b5b5b6f673fd0d670505.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// Faster R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Shaoqing Ren
// ------------------------------------------------------------------
#include "caffe/util/gpu_nms.hpp"
#include "caffe/common.hpp"
#include <vector>
#include <iostream>
namespace caffe {
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 4];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 4 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 0];
block_boxes[threadIdx.x * 4 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 1];
block_boxes[threadIdx.x * 4 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 2];
block_boxes[threadIdx.x * 4 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 3];
//block_boxes[threadIdx.x * 5 + 4] =
// dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 4;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 4) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _set_device(int device_id) {
if (device_id<=0) return;
int current_device;
CUDA_CHECK(hipGetDevice(¤t_device));
if (current_device == device_id) {
return;
}
// The call to hipSetDevice must come before any calls to Get, which
// may perform initialization using the GPU.
CUDA_CHECK(hipSetDevice(device_id));
}
void gpu_nms(int* keep_out, int* num_out, const float* boxes_dev, int boxes_num,
int boxes_dim, float nms_overlap_thresh, int device_id) {
_set_device(device_id);
// float* boxes_dev = NULL;
unsigned long long* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
// CUDA_CHECK(hipMalloc(&boxes_dev,
// boxes_num * boxes_dim * sizeof(float)));
// CUDA_CHECK(hipMemcpy(boxes_dev,
// boxes_host,
// boxes_num * boxes_dim * sizeof(float),
// hipMemcpyHostToDevice));
CUDA_CHECK(hipMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(unsigned long long)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
CUDA_CHECK(hipMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
hipMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
// CUDA_CHECK(hipFree(boxes_dev));
CUDA_CHECK(hipFree(mask_dev));
}
} // namespace caffe
| a49b3920c61f687fa277b5b5b6f673fd0d670505.cu | // ------------------------------------------------------------------
// Faster R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Shaoqing Ren
// ------------------------------------------------------------------
#include "caffe/util/gpu_nms.hpp"
#include "caffe/common.hpp"
#include <vector>
#include <iostream>
namespace caffe {
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 4];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 4 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 0];
block_boxes[threadIdx.x * 4 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 1];
block_boxes[threadIdx.x * 4 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 2];
block_boxes[threadIdx.x * 4 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 3];
//block_boxes[threadIdx.x * 5 + 4] =
// dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 4;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 4) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _set_device(int device_id) {
if (device_id<=0) return;
int current_device;
CUDA_CHECK(cudaGetDevice(¤t_device));
if (current_device == device_id) {
return;
}
// The call to cudaSetDevice must come before any calls to Get, which
// may perform initialization using the GPU.
CUDA_CHECK(cudaSetDevice(device_id));
}
void gpu_nms(int* keep_out, int* num_out, const float* boxes_dev, int boxes_num,
int boxes_dim, float nms_overlap_thresh, int device_id) {
_set_device(device_id);
// float* boxes_dev = NULL;
unsigned long long* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
// CUDA_CHECK(cudaMalloc(&boxes_dev,
// boxes_num * boxes_dim * sizeof(float)));
// CUDA_CHECK(cudaMemcpy(boxes_dev,
// boxes_host,
// boxes_num * boxes_dim * sizeof(float),
// cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(unsigned long long)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
nms_kernel<<<blocks, threads>>>(boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
CUDA_CHECK(cudaMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
// CUDA_CHECK(cudaFree(boxes_dev));
CUDA_CHECK(cudaFree(mask_dev));
}
} // namespace caffe
|
43dfa92cfd5ae9114e4ba2a81d7c53ae001f8414.hip | // !!! This is a file automatically generated by hipify!!!
// EMD approximation module (based on auction algorithm)
// author: Renwu Li
// torch version: https://github.com/Colin97/MSN-Point-Cloud-Completion/blob/HEAD/emd/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <hip/hip_runtime.h>
__device__ __forceinline__ float atomicMax(float *address, float val)
{
int ret = __float_as_int(*address);
while(val > __int_as_float(ret))
{
int old = ret;
if((ret = atomicCAS((int *)address, old, __float_as_int(val))) == old)
break;
}
return __int_as_float(ret);
}
__global__ void clear(int b, int * cnt_tmp, int * unass_cnt) {
for (int i = threadIdx.x; i < b; i += blockDim.x) {
cnt_tmp[i] = 0;
unass_cnt[i] = 0;
}
}
__global__ void calc_unass_cnt(int b, int n, int * assignment, int * unass_cnt) {
// count the number of unassigned points in each batch
const int BLOCK_SIZE = 1024;
__shared__ int scan_array[BLOCK_SIZE];
for (int i = blockIdx.x; i < b; i += gridDim.x) {
scan_array[threadIdx.x] = assignment[i * n + blockIdx.y * BLOCK_SIZE + threadIdx.x] == -1 ? 1 : 0;
__syncthreads();
int stride = 1;
while(stride <= BLOCK_SIZE / 2) {
int index = (threadIdx.x + 1) * stride * 2 - 1;
if(index < BLOCK_SIZE)
scan_array[index] += scan_array[index - stride];
stride = stride * 2;
__syncthreads();
}
__syncthreads();
if (threadIdx.x == BLOCK_SIZE - 1) {
atomicAdd(&unass_cnt[i], scan_array[threadIdx.x]);
}
__syncthreads();
}
}
__global__ void calc_unass_cnt_sum(int b, int * unass_cnt, int * unass_cnt_sum) {
// count the cumulative sum over over unass_cnt
const int BLOCK_SIZE = 512; // batch_size <= 512
__shared__ int scan_array[BLOCK_SIZE];
scan_array[threadIdx.x] = unass_cnt[threadIdx.x];
__syncthreads();
int stride = 1;
while(stride <= BLOCK_SIZE / 2) {
int index = (threadIdx.x + 1) * stride * 2 - 1;
if(index < BLOCK_SIZE)
scan_array[index] += scan_array[index - stride];
stride = stride * 2;
__syncthreads();
}
__syncthreads();
stride = BLOCK_SIZE / 4;
while(stride > 0) {
int index = (threadIdx.x + 1) * stride * 2 - 1;
if((index + stride) < BLOCK_SIZE)
scan_array[index + stride] += scan_array[index];
stride = stride / 2;
__syncthreads();
}
__syncthreads();
unass_cnt_sum[threadIdx.x] = scan_array[threadIdx.x];
}
__global__ void calc_unass_idx(int b, int n, int * assignment, int * unass_idx, int * unass_cnt, int * unass_cnt_sum, int * cnt_tmp) {
// list all the unassigned points
for (int i = blockIdx.x; i < b; i += gridDim.x) {
if (assignment[i * n + blockIdx.y * 1024 + threadIdx.x] == -1) {
int idx = atomicAdd(&cnt_tmp[i], 1);
unass_idx[unass_cnt_sum[i] - unass_cnt[i] + idx] = blockIdx.y * 1024 + threadIdx.x;
}
}
}
__global__ void Bid(int b, int n, const float * xyz1, const float * xyz2, float eps, int * assignment, int * assignment_inv, float * price,
int * bid, float * bid_increments, float * max_increments, int * unass_cnt, int * unass_cnt_sum, int * unass_idx) {
const int batch = 2048, block_size = 1024, block_cnt = n / 1024;
__shared__ float xyz2_buf[batch * 3];
__shared__ float price_buf[batch];
__shared__ float best_buf[block_size];
__shared__ float better_buf[block_size];
__shared__ int best_i_buf[block_size];
for (int i = blockIdx.x; i < b; i += gridDim.x) {
int _unass_cnt = unass_cnt[i];
if (_unass_cnt == 0)
continue;
int _unass_cnt_sum = unass_cnt_sum[i];
int unass_per_block = (_unass_cnt + block_cnt - 1) / block_cnt;
int thread_per_unass = block_size / unass_per_block;
int unass_this_block = max(min(_unass_cnt - (int) blockIdx.y * unass_per_block, unass_per_block), 0);
float x1, y1, z1, best = -1e9, better = -1e9;
int best_i = -1, _unass_id = -1, thread_in_unass;
if (threadIdx.x < thread_per_unass * unass_this_block) {
_unass_id = unass_per_block * blockIdx.y + threadIdx.x / thread_per_unass + _unass_cnt_sum - _unass_cnt;
_unass_id = unass_idx[_unass_id];
thread_in_unass = threadIdx.x % thread_per_unass;
x1 = xyz1[(i * n + _unass_id) * 3 + 0];
y1 = xyz1[(i * n + _unass_id) * 3 + 1];
z1 = xyz1[(i * n + _unass_id) * 3 + 2];
}
for (int k2 = 0; k2 < n; k2 += batch) {
int end_k = min(n, k2 + batch) - k2;
for (int j = threadIdx.x; j < end_k * 3; j += blockDim.x) {
xyz2_buf[j] = xyz2[(i * n + k2) * 3 + j];
}
for (int j = threadIdx.x; j < end_k; j += blockDim.x) {
price_buf[j] = price[i * n + k2 + j];
}
__syncthreads();
if (_unass_id != -1) {
int delta = (end_k + thread_per_unass - 1) / thread_per_unass;
int l = thread_in_unass * delta;
int r = min((thread_in_unass + 1) * delta, end_k);
for (int k = l; k < r; k++)
//if (!last || assignment_inv[i * n + k + k2] == -1)
{
float x2 = xyz2_buf[k * 3 + 0] - x1;
float y2 = xyz2_buf[k * 3 + 1] - y1;
float z2 = xyz2_buf[k * 3 + 2] - z1;
// the coordinates of points should be normalized to [0, 1]
float d = 3.0 - sqrtf(x2 * x2 + y2 * y2 + z2 * z2) - price_buf[k];
if (d > best) {
better = best;
best = d;
best_i = k + k2;
}
else if (d > better) {
better = d;
}
}
}
__syncthreads();
}
best_buf[threadIdx.x] = best;
better_buf[threadIdx.x] = better;
best_i_buf[threadIdx.x] = best_i;
__syncthreads();
if (_unass_id != -1 && thread_in_unass == 0) {
for (int j = threadIdx.x + 1; j < threadIdx.x + thread_per_unass; j++) {
if (best_buf[j] > best) {
better = max(best, better_buf[j]);
best = best_buf[j];
best_i = best_i_buf[j];
}
else better = max(better, best_buf[j]);
}
bid[i * n + _unass_id] = best_i;
bid_increments[i * n + _unass_id] = best - better + eps;
atomicMax(&max_increments[i * n + best_i], best - better + eps);
}
}
}
__global__ void GetMax(int b, int n, int * assignment, int * bid, float * bid_increments, float * max_increments, int * max_idx) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
int j = threadIdx.x + blockIdx.y * blockDim.x;
if (assignment[i * n + j] == -1) {
int bid_id = bid[i * n + j];
float bid_inc = bid_increments[i * n + j];
float max_inc = max_increments[i * n + bid_id];
if (bid_inc - 1e-6 <= max_inc && max_inc <= bid_inc + 1e-6)
{
max_idx[i * n + bid_id] = j;
}
}
}
}
__global__ void Assign(int b, int n, int * assignment, int * assignment_inv, float * price, int * bid, float * bid_increments, float * max_increments, int * max_idx, bool last) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
int j = threadIdx.x + blockIdx.y * blockDim.x;
if (assignment[i * n + j] == -1) {
int bid_id = bid[i * n + j];
if (last || max_idx[i * n + bid_id] == j)
{
float bid_inc = bid_increments[i * n + j];
int ass_inv = assignment_inv[i * n + bid_id];
if (!last && ass_inv != -1) {
assignment[i * n + ass_inv] = -1;
}
assignment_inv[i * n + bid_id] = j;
assignment[i * n + j] = bid_id;
price[i * n + bid_id] += bid_inc;
max_increments[i * n + bid_id] = -1e9;
}
}
}
}
__global__ void CalcDist(int b, int n, const float * xyz1, const float * xyz2, float * dist, const int * assignment) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
int j = threadIdx.x + blockIdx.y * blockDim.x;
int k = assignment[i * n + j];
float deltax = xyz1[(i * n + j) * 3 + 0] - xyz2[(i * n + k) * 3 + 0];
float deltay = xyz1[(i * n + j) * 3 + 1] - xyz2[(i * n + k) * 3 + 1];
float deltaz = xyz1[(i * n + j) * 3 + 2] - xyz2[(i * n + k) * 3 + 2];
dist[i * n + j] = deltax * deltax + deltay * deltay + deltaz * deltaz;
}
}
int emdMatchLauncher(int b, int n, int m, \
const float* xyz1, \
const float* xyz2, \
int* assignment, \
float* price, \
int* assignment_inv, \
int* bid, \
float* bid_increments, \
float* max_increments, \
int* unass_idx, \
int* unass_cnt, \
int* unass_cnt_sum, \
int* cnt_tmp, \
int* max_idx, \
const float* cu_eps,
const int* cu_iters
) {
if (n != m) {
printf("Input error! The two point clouds should have the same size!\n");
return -1;
}
if (b > 512) {
printf("Input error! The batch size should not exceed 512!\n");
return -1;
}
if (n % 1024 != 0) {
printf("Input error! The size of the input point clouds should be a multiple of 1024.\n");
return -1;
}
hipMemset(assignment, -1, b * n * sizeof(int));
hipMemset(assignment_inv, -1, b * n * sizeof(int));
hipMemset(price, 0, b * m * sizeof(float));
hipMemset(bid, 0, b * n * sizeof(int));
hipMemset(bid_increments, 0, b * n * sizeof(float));
hipMemset(max_increments, 0, b * m * sizeof(float));
hipMemset(unass_idx, 0, b * n * sizeof(int));
hipMemset(max_idx, 0, b * m * sizeof(int));
hipMemset(unass_cnt, 0, 512 * sizeof(int));
hipMemset(unass_cnt_sum, 0, 512 * sizeof(int));
hipMemset(cnt_tmp, 0, 512 * sizeof(int));
int eps;
int iters;
hipMemcpy(&eps, cu_eps, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(&iters, cu_iters, sizeof(float), hipMemcpyDeviceToHost);
for (int i = 0; i < iters; i++) {
hipLaunchKernelGGL(( clear), dim3(1), dim3(b), 0, 0, b, cnt_tmp, unass_cnt);
hipLaunchKernelGGL(( calc_unass_cnt), dim3(dim3(b, n / 1024, 1)), dim3(1024), 0, 0, b, n, assignment, unass_cnt);
hipLaunchKernelGGL(( calc_unass_cnt_sum), dim3(1), dim3(b), 0, 0, b, unass_cnt, unass_cnt_sum);
hipLaunchKernelGGL(( calc_unass_idx), dim3(dim3(b, n / 1024, 1)), dim3(1024), 0, 0, b, n, assignment, unass_idx, unass_cnt,
unass_cnt_sum, cnt_tmp);
hipLaunchKernelGGL(( Bid), dim3(dim3(b, n / 1024, 1)), dim3(1024), 0, 0, b, n, xyz1, xyz2, eps, assignment, assignment_inv,
price, bid, bid_increments, max_increments,
unass_cnt, unass_cnt_sum, unass_idx);
hipLaunchKernelGGL(( GetMax), dim3(dim3(b, n / 1024, 1)), dim3(1024), 0, 0, b, n, assignment, bid, bid_increments, max_increments, max_idx);
hipLaunchKernelGGL(( Assign), dim3(dim3(b, n / 1024, 1)), dim3(1024), 0, 0, b, n, assignment, assignment_inv, price, bid,
bid_increments, max_increments, max_idx, i == iters - 1);
}
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("Error in nnd Output: %s\n", hipGetErrorString(err));
return 0;
}
return 1;
}
int emdCostLauncher(int b, int n, const float* xyz1, const float* xyz2, float* dist, const int* assignment) {
hipLaunchKernelGGL(( CalcDist), dim3(dim3(b, n / 1024, 1)), dim3(1024), 0, 0, b, n, xyz1, xyz2, dist, assignment);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("Error in nnd Output: %s\n", hipGetErrorString(err));
return 0;
}
return 1;
}
__global__ void emdcostGrad(int b, int n, int m, const float* xyz1, const float* xyz2, const float* grad_dist, const int* assignment, float* grad_xyz1, float* grad_xyz2) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
for (int j = threadIdx.x + blockIdx.y * blockDim.x; j < n; j += blockDim.x * gridDim.y) {
float x1 = xyz1[(i * n + j) * 3 + 0];
float y1 = xyz1[(i * n + j) * 3 + 1];
float z1 = xyz1[(i * n + j) * 3 + 2];
int j2 = assignment[i * n + j];
float x2 = xyz2[(i * n + j2) * 3 + 0];
float y2 = xyz2[(i * n + j2) * 3 + 1];
float z2 = xyz2[(i * n + j2) * 3 + 2];
float g = grad_dist[i * n + j] * 2;
atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 0]), g * (x1 - x2));
atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 1]), g * (y1 - y2));
atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 2]), g * (z1 - z2));
atomicAdd(&(grad_xyz2[(i * n + j2) * 3 + 0]), -g * (x1 - x2));
atomicAdd(&(grad_xyz2[(i * n + j2) * 3 + 1]), -g * (y1 - y2));
atomicAdd(&(grad_xyz2[(i * n + j2) * 3 + 2]), -g * (z1 - z2));
}
}
}
void emdcostGradLauncher(int b, int n, int m, const float* xyz1, const float* xyz2, const float* grad_cost, const int* assignment, float* grad_xyz1, float* grad_xyz2) {
hipMemset(grad_xyz1, 0, b * n * 3 * sizeof(float));
hipMemset(grad_xyz2, 0, b * n * 3 * sizeof(float));
hipLaunchKernelGGL(( emdcostGrad), dim3(dim3(b, n / 1024, 1)), dim3(1024), 0, 0, b, n, m, xyz1, xyz2, grad_cost, assignment, grad_xyz1, grad_xyz2);
}
| 43dfa92cfd5ae9114e4ba2a81d7c53ae001f8414.cu | // EMD approximation module (based on auction algorithm)
// author: Renwu Li
// torch version: https://github.com/Colin97/MSN-Point-Cloud-Completion/blob/HEAD/emd/
#include <stdio.h>
#include <cuda.h>
#include <iostream>
#include <cuda_runtime.h>
__device__ __forceinline__ float atomicMax(float *address, float val)
{
int ret = __float_as_int(*address);
while(val > __int_as_float(ret))
{
int old = ret;
if((ret = atomicCAS((int *)address, old, __float_as_int(val))) == old)
break;
}
return __int_as_float(ret);
}
__global__ void clear(int b, int * cnt_tmp, int * unass_cnt) {
for (int i = threadIdx.x; i < b; i += blockDim.x) {
cnt_tmp[i] = 0;
unass_cnt[i] = 0;
}
}
__global__ void calc_unass_cnt(int b, int n, int * assignment, int * unass_cnt) {
// count the number of unassigned points in each batch
const int BLOCK_SIZE = 1024;
__shared__ int scan_array[BLOCK_SIZE];
for (int i = blockIdx.x; i < b; i += gridDim.x) {
scan_array[threadIdx.x] = assignment[i * n + blockIdx.y * BLOCK_SIZE + threadIdx.x] == -1 ? 1 : 0;
__syncthreads();
int stride = 1;
while(stride <= BLOCK_SIZE / 2) {
int index = (threadIdx.x + 1) * stride * 2 - 1;
if(index < BLOCK_SIZE)
scan_array[index] += scan_array[index - stride];
stride = stride * 2;
__syncthreads();
}
__syncthreads();
if (threadIdx.x == BLOCK_SIZE - 1) {
atomicAdd(&unass_cnt[i], scan_array[threadIdx.x]);
}
__syncthreads();
}
}
__global__ void calc_unass_cnt_sum(int b, int * unass_cnt, int * unass_cnt_sum) {
// count the cumulative sum over over unass_cnt
const int BLOCK_SIZE = 512; // batch_size <= 512
__shared__ int scan_array[BLOCK_SIZE];
scan_array[threadIdx.x] = unass_cnt[threadIdx.x];
__syncthreads();
int stride = 1;
while(stride <= BLOCK_SIZE / 2) {
int index = (threadIdx.x + 1) * stride * 2 - 1;
if(index < BLOCK_SIZE)
scan_array[index] += scan_array[index - stride];
stride = stride * 2;
__syncthreads();
}
__syncthreads();
stride = BLOCK_SIZE / 4;
while(stride > 0) {
int index = (threadIdx.x + 1) * stride * 2 - 1;
if((index + stride) < BLOCK_SIZE)
scan_array[index + stride] += scan_array[index];
stride = stride / 2;
__syncthreads();
}
__syncthreads();
unass_cnt_sum[threadIdx.x] = scan_array[threadIdx.x];
}
__global__ void calc_unass_idx(int b, int n, int * assignment, int * unass_idx, int * unass_cnt, int * unass_cnt_sum, int * cnt_tmp) {
// list all the unassigned points
for (int i = blockIdx.x; i < b; i += gridDim.x) {
if (assignment[i * n + blockIdx.y * 1024 + threadIdx.x] == -1) {
int idx = atomicAdd(&cnt_tmp[i], 1);
unass_idx[unass_cnt_sum[i] - unass_cnt[i] + idx] = blockIdx.y * 1024 + threadIdx.x;
}
}
}
__global__ void Bid(int b, int n, const float * xyz1, const float * xyz2, float eps, int * assignment, int * assignment_inv, float * price,
int * bid, float * bid_increments, float * max_increments, int * unass_cnt, int * unass_cnt_sum, int * unass_idx) {
const int batch = 2048, block_size = 1024, block_cnt = n / 1024;
__shared__ float xyz2_buf[batch * 3];
__shared__ float price_buf[batch];
__shared__ float best_buf[block_size];
__shared__ float better_buf[block_size];
__shared__ int best_i_buf[block_size];
for (int i = blockIdx.x; i < b; i += gridDim.x) {
int _unass_cnt = unass_cnt[i];
if (_unass_cnt == 0)
continue;
int _unass_cnt_sum = unass_cnt_sum[i];
int unass_per_block = (_unass_cnt + block_cnt - 1) / block_cnt;
int thread_per_unass = block_size / unass_per_block;
int unass_this_block = max(min(_unass_cnt - (int) blockIdx.y * unass_per_block, unass_per_block), 0);
float x1, y1, z1, best = -1e9, better = -1e9;
int best_i = -1, _unass_id = -1, thread_in_unass;
if (threadIdx.x < thread_per_unass * unass_this_block) {
_unass_id = unass_per_block * blockIdx.y + threadIdx.x / thread_per_unass + _unass_cnt_sum - _unass_cnt;
_unass_id = unass_idx[_unass_id];
thread_in_unass = threadIdx.x % thread_per_unass;
x1 = xyz1[(i * n + _unass_id) * 3 + 0];
y1 = xyz1[(i * n + _unass_id) * 3 + 1];
z1 = xyz1[(i * n + _unass_id) * 3 + 2];
}
for (int k2 = 0; k2 < n; k2 += batch) {
int end_k = min(n, k2 + batch) - k2;
for (int j = threadIdx.x; j < end_k * 3; j += blockDim.x) {
xyz2_buf[j] = xyz2[(i * n + k2) * 3 + j];
}
for (int j = threadIdx.x; j < end_k; j += blockDim.x) {
price_buf[j] = price[i * n + k2 + j];
}
__syncthreads();
if (_unass_id != -1) {
int delta = (end_k + thread_per_unass - 1) / thread_per_unass;
int l = thread_in_unass * delta;
int r = min((thread_in_unass + 1) * delta, end_k);
for (int k = l; k < r; k++)
//if (!last || assignment_inv[i * n + k + k2] == -1)
{
float x2 = xyz2_buf[k * 3 + 0] - x1;
float y2 = xyz2_buf[k * 3 + 1] - y1;
float z2 = xyz2_buf[k * 3 + 2] - z1;
// the coordinates of points should be normalized to [0, 1]
float d = 3.0 - sqrtf(x2 * x2 + y2 * y2 + z2 * z2) - price_buf[k];
if (d > best) {
better = best;
best = d;
best_i = k + k2;
}
else if (d > better) {
better = d;
}
}
}
__syncthreads();
}
best_buf[threadIdx.x] = best;
better_buf[threadIdx.x] = better;
best_i_buf[threadIdx.x] = best_i;
__syncthreads();
if (_unass_id != -1 && thread_in_unass == 0) {
for (int j = threadIdx.x + 1; j < threadIdx.x + thread_per_unass; j++) {
if (best_buf[j] > best) {
better = max(best, better_buf[j]);
best = best_buf[j];
best_i = best_i_buf[j];
}
else better = max(better, best_buf[j]);
}
bid[i * n + _unass_id] = best_i;
bid_increments[i * n + _unass_id] = best - better + eps;
atomicMax(&max_increments[i * n + best_i], best - better + eps);
}
}
}
__global__ void GetMax(int b, int n, int * assignment, int * bid, float * bid_increments, float * max_increments, int * max_idx) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
int j = threadIdx.x + blockIdx.y * blockDim.x;
if (assignment[i * n + j] == -1) {
int bid_id = bid[i * n + j];
float bid_inc = bid_increments[i * n + j];
float max_inc = max_increments[i * n + bid_id];
if (bid_inc - 1e-6 <= max_inc && max_inc <= bid_inc + 1e-6)
{
max_idx[i * n + bid_id] = j;
}
}
}
}
__global__ void Assign(int b, int n, int * assignment, int * assignment_inv, float * price, int * bid, float * bid_increments, float * max_increments, int * max_idx, bool last) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
int j = threadIdx.x + blockIdx.y * blockDim.x;
if (assignment[i * n + j] == -1) {
int bid_id = bid[i * n + j];
if (last || max_idx[i * n + bid_id] == j)
{
float bid_inc = bid_increments[i * n + j];
int ass_inv = assignment_inv[i * n + bid_id];
if (!last && ass_inv != -1) {
assignment[i * n + ass_inv] = -1;
}
assignment_inv[i * n + bid_id] = j;
assignment[i * n + j] = bid_id;
price[i * n + bid_id] += bid_inc;
max_increments[i * n + bid_id] = -1e9;
}
}
}
}
__global__ void CalcDist(int b, int n, const float * xyz1, const float * xyz2, float * dist, const int * assignment) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
int j = threadIdx.x + blockIdx.y * blockDim.x;
int k = assignment[i * n + j];
float deltax = xyz1[(i * n + j) * 3 + 0] - xyz2[(i * n + k) * 3 + 0];
float deltay = xyz1[(i * n + j) * 3 + 1] - xyz2[(i * n + k) * 3 + 1];
float deltaz = xyz1[(i * n + j) * 3 + 2] - xyz2[(i * n + k) * 3 + 2];
dist[i * n + j] = deltax * deltax + deltay * deltay + deltaz * deltaz;
}
}
int emdMatchLauncher(int b, int n, int m, \
const float* xyz1, \
const float* xyz2, \
int* assignment, \
float* price, \
int* assignment_inv, \
int* bid, \
float* bid_increments, \
float* max_increments, \
int* unass_idx, \
int* unass_cnt, \
int* unass_cnt_sum, \
int* cnt_tmp, \
int* max_idx, \
const float* cu_eps,
const int* cu_iters
) {
if (n != m) {
printf("Input error! The two point clouds should have the same size!\n");
return -1;
}
if (b > 512) {
printf("Input error! The batch size should not exceed 512!\n");
return -1;
}
if (n % 1024 != 0) {
printf("Input error! The size of the input point clouds should be a multiple of 1024.\n");
return -1;
}
cudaMemset(assignment, -1, b * n * sizeof(int));
cudaMemset(assignment_inv, -1, b * n * sizeof(int));
cudaMemset(price, 0, b * m * sizeof(float));
cudaMemset(bid, 0, b * n * sizeof(int));
cudaMemset(bid_increments, 0, b * n * sizeof(float));
cudaMemset(max_increments, 0, b * m * sizeof(float));
cudaMemset(unass_idx, 0, b * n * sizeof(int));
cudaMemset(max_idx, 0, b * m * sizeof(int));
cudaMemset(unass_cnt, 0, 512 * sizeof(int));
cudaMemset(unass_cnt_sum, 0, 512 * sizeof(int));
cudaMemset(cnt_tmp, 0, 512 * sizeof(int));
int eps;
int iters;
cudaMemcpy(&eps, cu_eps, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&iters, cu_iters, sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < iters; i++) {
clear<<<1, b>>>(b, cnt_tmp, unass_cnt);
calc_unass_cnt<<<dim3(b, n / 1024, 1), 1024>>>(b, n, assignment, unass_cnt);
calc_unass_cnt_sum<<<1, b>>>(b, unass_cnt, unass_cnt_sum);
calc_unass_idx<<<dim3(b, n / 1024, 1), 1024>>>(b, n, assignment, unass_idx, unass_cnt,
unass_cnt_sum, cnt_tmp);
Bid<<<dim3(b, n / 1024, 1), 1024>>>(b, n, xyz1, xyz2, eps, assignment, assignment_inv,
price, bid, bid_increments, max_increments,
unass_cnt, unass_cnt_sum, unass_idx);
GetMax<<<dim3(b, n / 1024, 1), 1024>>>(b, n, assignment, bid, bid_increments, max_increments, max_idx);
Assign<<<dim3(b, n / 1024, 1), 1024>>>(b, n, assignment, assignment_inv, price, bid,
bid_increments, max_increments, max_idx, i == iters - 1);
}
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("Error in nnd Output: %s\n", cudaGetErrorString(err));
return 0;
}
return 1;
}
int emdCostLauncher(int b, int n, const float* xyz1, const float* xyz2, float* dist, const int* assignment) {
CalcDist<<<dim3(b, n / 1024, 1), 1024>>>(b, n, xyz1, xyz2, dist, assignment);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("Error in nnd Output: %s\n", cudaGetErrorString(err));
return 0;
}
return 1;
}
__global__ void emdcostGrad(int b, int n, int m, const float* xyz1, const float* xyz2, const float* grad_dist, const int* assignment, float* grad_xyz1, float* grad_xyz2) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
for (int j = threadIdx.x + blockIdx.y * blockDim.x; j < n; j += blockDim.x * gridDim.y) {
float x1 = xyz1[(i * n + j) * 3 + 0];
float y1 = xyz1[(i * n + j) * 3 + 1];
float z1 = xyz1[(i * n + j) * 3 + 2];
int j2 = assignment[i * n + j];
float x2 = xyz2[(i * n + j2) * 3 + 0];
float y2 = xyz2[(i * n + j2) * 3 + 1];
float z2 = xyz2[(i * n + j2) * 3 + 2];
float g = grad_dist[i * n + j] * 2;
atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 0]), g * (x1 - x2));
atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 1]), g * (y1 - y2));
atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 2]), g * (z1 - z2));
atomicAdd(&(grad_xyz2[(i * n + j2) * 3 + 0]), -g * (x1 - x2));
atomicAdd(&(grad_xyz2[(i * n + j2) * 3 + 1]), -g * (y1 - y2));
atomicAdd(&(grad_xyz2[(i * n + j2) * 3 + 2]), -g * (z1 - z2));
}
}
}
void emdcostGradLauncher(int b, int n, int m, const float* xyz1, const float* xyz2, const float* grad_cost, const int* assignment, float* grad_xyz1, float* grad_xyz2) {
cudaMemset(grad_xyz1, 0, b * n * 3 * sizeof(float));
cudaMemset(grad_xyz2, 0, b * n * 3 * sizeof(float));
emdcostGrad<<<dim3(b, n / 1024, 1), 1024>>>(b, n, m, xyz1, xyz2, grad_cost, assignment, grad_xyz1, grad_xyz2);
}
|
86cb22a0dd02469c22eefe6cbab9c604283f6255.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <thrust\device_vector.h>
#include <thrust\host_vector.h>
#include <stdio.h>
#include <iostream>
using namespace std;
__global__ void kernel(int *, const int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
{
a[i] = a[i] + 3;
}
}
void showArray(int *a, const int N)
{
for (int i = 0; i < N; i++)
{
cout << a[i] << ", ";
}
cout << endl;
}
void run()
{
const int N = 20;
static int h_a[N];
int *d_a;
int size = N * sizeof(int);
dim3 blocks(10);
dim3 threads(2);
int count;
cudaDeviceCount(&count);
cudaDeviceProperties prop;
if (count == 1)
{
hipGetDeviceProperties(&prop, count - 1);
}
if (!prop.canMapHostMemory)
{
// cudaHostMalloc(&h_a, size, hipHostMallocMapped);
printf("cannot use map memory")
return;
}
cudaHostMalloc(&h_a, size, hipHostMallocMapped);
hipHostGetDevicePointer(&d_a, h_a, 0);
hipLaunchKernelGGL(( kernel), dim3(blocks), dim3(threads), 0, 0, d_a, N);
hipDeviceSynchronize();
hipHostFree(h_a);
cout << "result: " << endl;
showArray(h_a, N);
system("pause");
return 0;
} | 86cb22a0dd02469c22eefe6cbab9c604283f6255.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <thrust\device_vector.h>
#include <thrust\host_vector.h>
#include <stdio.h>
#include <iostream>
using namespace std;
__global__ void kernel(int *, const int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
{
a[i] = a[i] + 3;
}
}
void showArray(int *a, const int N)
{
for (int i = 0; i < N; i++)
{
cout << a[i] << ", ";
}
cout << endl;
}
void run()
{
const int N = 20;
static int h_a[N];
int *d_a;
int size = N * sizeof(int);
dim3 blocks(10);
dim3 threads(2);
int count;
cudaDeviceCount(&count);
cudaDeviceProperties prop;
if (count == 1)
{
cudaGetDeviceProperties(&prop, count - 1);
}
if (!prop.canMapHostMemory)
{
// cudaHostMalloc(&h_a, size, cudaHostAllocMapped);
printf("cannot use map memory")
return;
}
cudaHostMalloc(&h_a, size, cudaHostAllocMapped);
cudaHostGetDevicePointer(&d_a, h_a, 0);
kernel<<<blocks, threads>>>(d_a, N);
cudaDeviceSynchronize();
cudaFreeHost(h_a);
cout << "result: " << endl;
showArray(h_a, N);
system("pause");
return 0;
} |
f571e0b1d8ccd1acbb70de551cfdea82b0510be2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#define BLOCK_SIZE 256
#define STR_SIZE 256
#define DEVICE 0
#define HALO 1 // halo width along one direction when advancing to the next iteration
#define BENCH_PRINT
void run(int argc, char** argv);
long long int rows, cols;
int* data;
int** wall;
int* result;
#define M_SEED 9
int pyramid_height;
//#define BENCH_PRINT
void
init(int argc, char** argv)
{
if(argc==4){
cols = atoll(argv[1]);
rows = atoll(argv[2]);
pyramid_height=atoi(argv[3]);
}else{
printf("Usage: dynproc row_len col_len pyramid_height\n");
exit(0);
}
data = new int[rows*cols];
wall = new int*[rows];
for(long long n=0; n<rows; n++)
wall[n]=data+cols*n;
result = new int[cols];
int seed = M_SEED;
srand(seed);
for (long long i = 0; i < rows; i++)
{
for (long long j = 0; j < cols; j++)
{
wall[i][j] = rand() % 10;
}
}
#ifdef BENCH_PRINT
/*
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
printf("%d ",wall[i][j]) ;
}
printf("\n") ;
}
*/
#endif
}
void
fatal(char *s)
{
fprintf(stderr, "error: %s\n", s);
}
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
__global__ void dynproc_kernel(
int iteration,
int *gpuWall,
int *gpuSrc,
int *gpuResults,
int cols,
int rows,
int startStep,
int border)
{
__shared__ int prev[BLOCK_SIZE];
__shared__ int result[BLOCK_SIZE];
int bx = blockIdx.x;
int tx=threadIdx.x;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_cols = BLOCK_SIZE-iteration*HALO*2;
// calculate the boundary for the block according to
// the boundary of its small block
int blkX = small_block_cols*bx-border;
int blkXmax = blkX+BLOCK_SIZE-1;
// calculate the global thread coordination
int xidx = blkX+tx;
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > cols-1) ? BLOCK_SIZE-1-(blkXmax-cols+1) : BLOCK_SIZE-1;
int W = tx-1;
int E = tx+1;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool isValid = IN_RANGE(tx, validXmin, validXmax);
if(IN_RANGE(xidx, 0, cols-1)){
prev[tx] = gpuSrc[xidx];
}
__syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012
bool computed;
for (int i=0; i<iteration ; i++){
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \
isValid){
computed = true;
int left = prev[W];
int up = prev[tx];
int right = prev[E];
int shortest = MIN(left, up);
shortest = MIN(shortest, right);
int index = cols*(startStep+i)+xidx;
result[tx] = shortest + gpuWall[index];
}
__syncthreads();
if(i==iteration-1)
break;
if(computed) //Assign the computation range
prev[tx]= result[tx];
__syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed){
gpuResults[xidx]=result[tx];
}
}
/*
compute N time steps
*/
int calc_path(int *gpuWall, int *gpuResult[2], int rows, int cols, \
int pyramid_height, int blockCols, int borderCols)
{
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(blockCols);
int src = 1, dst = 0;
for (int t = 0; t < rows-1; t+=pyramid_height) {
int temp = src;
src = dst;
dst = temp;
hipLaunchKernelGGL(( dynproc_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
MIN(pyramid_height, rows-t-1),
gpuWall, gpuResult[src], gpuResult[dst],
cols,rows, t, borderCols);
}
return dst;
}
int main(int argc, char** argv)
{
int num_devices;
hipGetDeviceCount(&num_devices);
if (num_devices > 1) hipSetDevice(DEVICE);
run(argc,argv);
return EXIT_SUCCESS;
}
void run(int argc, char** argv)
{
init(argc, argv);
/* --------------- pyramid parameters --------------- */
int borderCols = (pyramid_height)*HALO;
int smallBlockCol = BLOCK_SIZE-(pyramid_height)*HALO*2;
int blockCols = cols/smallBlockCol+((cols%smallBlockCol==0)?0:1);
//printf("pyramidHeight: %d\ngridSize: [%d]\nborder:[%d]\nblockSize: %d\nblockGrid:[%d]\ntargetBlock:[%d]\n",\
pyramid_height, cols, borderCols, BLOCK_SIZE, blockCols, smallBlockCol);
int *gpuWall, *gpuResult[2];
long long int size = rows*cols;
hipMalloc((void**)&gpuResult[0], sizeof(int)*cols);
hipMalloc((void**)&gpuResult[1], sizeof(int)*cols);
hipMemcpy(gpuResult[0], data, sizeof(int)*cols, hipMemcpyHostToDevice);
hipMalloc((void**)&gpuWall, sizeof(int)*(size-cols));
hipMemcpy(gpuWall, data+cols, sizeof(int)*(size-cols), hipMemcpyHostToDevice);
int final_ret = calc_path(gpuWall, gpuResult, rows, cols, \
pyramid_height, blockCols, borderCols);
hipMemcpy(result, gpuResult[final_ret], sizeof(int)*cols, hipMemcpyDeviceToHost);
#ifdef BENCH_PRINT
/*
for (int i = 0; i < cols; i++)
printf("%d ",data[i]) ;
printf("\n") ;
for (int i = 0; i < cols; i++)
printf("%d ",result[i]) ;
printf("\n") ;
*/
#endif
hipFree(gpuWall);
hipFree(gpuResult[0]);
hipFree(gpuResult[1]);
delete [] data;
delete [] wall;
delete [] result;
}
| f571e0b1d8ccd1acbb70de551cfdea82b0510be2.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#define BLOCK_SIZE 256
#define STR_SIZE 256
#define DEVICE 0
#define HALO 1 // halo width along one direction when advancing to the next iteration
#define BENCH_PRINT
void run(int argc, char** argv);
long long int rows, cols;
int* data;
int** wall;
int* result;
#define M_SEED 9
int pyramid_height;
//#define BENCH_PRINT
void
init(int argc, char** argv)
{
if(argc==4){
cols = atoll(argv[1]);
rows = atoll(argv[2]);
pyramid_height=atoi(argv[3]);
}else{
printf("Usage: dynproc row_len col_len pyramid_height\n");
exit(0);
}
data = new int[rows*cols];
wall = new int*[rows];
for(long long n=0; n<rows; n++)
wall[n]=data+cols*n;
result = new int[cols];
int seed = M_SEED;
srand(seed);
for (long long i = 0; i < rows; i++)
{
for (long long j = 0; j < cols; j++)
{
wall[i][j] = rand() % 10;
}
}
#ifdef BENCH_PRINT
/*
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
printf("%d ",wall[i][j]) ;
}
printf("\n") ;
}
*/
#endif
}
void
fatal(char *s)
{
fprintf(stderr, "error: %s\n", s);
}
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
__global__ void dynproc_kernel(
int iteration,
int *gpuWall,
int *gpuSrc,
int *gpuResults,
int cols,
int rows,
int startStep,
int border)
{
__shared__ int prev[BLOCK_SIZE];
__shared__ int result[BLOCK_SIZE];
int bx = blockIdx.x;
int tx=threadIdx.x;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_cols = BLOCK_SIZE-iteration*HALO*2;
// calculate the boundary for the block according to
// the boundary of its small block
int blkX = small_block_cols*bx-border;
int blkXmax = blkX+BLOCK_SIZE-1;
// calculate the global thread coordination
int xidx = blkX+tx;
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > cols-1) ? BLOCK_SIZE-1-(blkXmax-cols+1) : BLOCK_SIZE-1;
int W = tx-1;
int E = tx+1;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool isValid = IN_RANGE(tx, validXmin, validXmax);
if(IN_RANGE(xidx, 0, cols-1)){
prev[tx] = gpuSrc[xidx];
}
__syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012
bool computed;
for (int i=0; i<iteration ; i++){
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \
isValid){
computed = true;
int left = prev[W];
int up = prev[tx];
int right = prev[E];
int shortest = MIN(left, up);
shortest = MIN(shortest, right);
int index = cols*(startStep+i)+xidx;
result[tx] = shortest + gpuWall[index];
}
__syncthreads();
if(i==iteration-1)
break;
if(computed) //Assign the computation range
prev[tx]= result[tx];
__syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed){
gpuResults[xidx]=result[tx];
}
}
/*
compute N time steps
*/
int calc_path(int *gpuWall, int *gpuResult[2], int rows, int cols, \
int pyramid_height, int blockCols, int borderCols)
{
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(blockCols);
int src = 1, dst = 0;
for (int t = 0; t < rows-1; t+=pyramid_height) {
int temp = src;
src = dst;
dst = temp;
dynproc_kernel<<<dimGrid, dimBlock>>>(
MIN(pyramid_height, rows-t-1),
gpuWall, gpuResult[src], gpuResult[dst],
cols,rows, t, borderCols);
}
return dst;
}
int main(int argc, char** argv)
{
int num_devices;
cudaGetDeviceCount(&num_devices);
if (num_devices > 1) cudaSetDevice(DEVICE);
run(argc,argv);
return EXIT_SUCCESS;
}
void run(int argc, char** argv)
{
init(argc, argv);
/* --------------- pyramid parameters --------------- */
int borderCols = (pyramid_height)*HALO;
int smallBlockCol = BLOCK_SIZE-(pyramid_height)*HALO*2;
int blockCols = cols/smallBlockCol+((cols%smallBlockCol==0)?0:1);
//printf("pyramidHeight: %d\ngridSize: [%d]\nborder:[%d]\nblockSize: %d\nblockGrid:[%d]\ntargetBlock:[%d]\n",\
pyramid_height, cols, borderCols, BLOCK_SIZE, blockCols, smallBlockCol);
int *gpuWall, *gpuResult[2];
long long int size = rows*cols;
cudaMalloc((void**)&gpuResult[0], sizeof(int)*cols);
cudaMalloc((void**)&gpuResult[1], sizeof(int)*cols);
cudaMemcpy(gpuResult[0], data, sizeof(int)*cols, cudaMemcpyHostToDevice);
cudaMalloc((void**)&gpuWall, sizeof(int)*(size-cols));
cudaMemcpy(gpuWall, data+cols, sizeof(int)*(size-cols), cudaMemcpyHostToDevice);
int final_ret = calc_path(gpuWall, gpuResult, rows, cols, \
pyramid_height, blockCols, borderCols);
cudaMemcpy(result, gpuResult[final_ret], sizeof(int)*cols, cudaMemcpyDeviceToHost);
#ifdef BENCH_PRINT
/*
for (int i = 0; i < cols; i++)
printf("%d ",data[i]) ;
printf("\n") ;
for (int i = 0; i < cols; i++)
printf("%d ",result[i]) ;
printf("\n") ;
*/
#endif
cudaFree(gpuWall);
cudaFree(gpuResult[0]);
cudaFree(gpuResult[1]);
delete [] data;
delete [] wall;
delete [] result;
}
|
b27aa0e5bc7dd26ea1b69f25329a7a079b110f90.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include "../box_iou_polygon_utils.h"
// 2D block with 32 * 16 = 512 threads per block
const int BLOCK_DIM_X = 32;
const int BLOCK_DIM_Y = 16;
template <typename T>
__global__ void box_iou_polygon_cuda_kernel(
const int n_boxes1,
const int n_boxes2,
const T* dev_boxes1,
const T* dev_boxes2,
T* dev_ious) {
const int row_start = blockIdx.x * blockDim.x;
const int col_start = blockIdx.y * blockDim.y;
const int row_size = min(n_boxes1 - row_start, blockDim.x);
const int col_size = min(n_boxes2 - col_start, blockDim.y);
__shared__ float block_boxes1[BLOCK_DIM_X * 8];
__shared__ float block_boxes2[BLOCK_DIM_Y * 8];
// It's safe to copy using threadIdx.x since BLOCK_DIM_X >= BLOCK_DIM_Y
if (threadIdx.x < row_size && threadIdx.y == 0) {
block_boxes1[threadIdx.x * 8 + 0] =
dev_boxes1[(row_start + threadIdx.x) * 8 + 0];
block_boxes1[threadIdx.x * 8 + 1] =
dev_boxes1[(row_start + threadIdx.x) * 8 + 1];
block_boxes1[threadIdx.x * 8 + 2] =
dev_boxes1[(row_start + threadIdx.x) * 8 + 2];
block_boxes1[threadIdx.x * 8 + 3] =
dev_boxes1[(row_start + threadIdx.x) * 8 + 3];
block_boxes1[threadIdx.x * 8 + 4] =
dev_boxes1[(row_start + threadIdx.x) * 8 + 4];
block_boxes1[threadIdx.x * 8 + 5] =
dev_boxes1[(row_start + threadIdx.x) * 8 + 5];
block_boxes1[threadIdx.x * 8 + 6] =
dev_boxes1[(row_start + threadIdx.x) * 8 + 6];
block_boxes1[threadIdx.x * 8 + 7] =
dev_boxes1[(row_start + threadIdx.x) * 8 + 7];
}
if (threadIdx.x < col_size && threadIdx.y == 0) {
block_boxes2[threadIdx.x * 8 + 0] =
dev_boxes2[(col_start + threadIdx.x) * 8 + 0];
block_boxes2[threadIdx.x * 8 + 1] =
dev_boxes2[(col_start + threadIdx.x) * 8 + 1];
block_boxes2[threadIdx.x * 8 + 2] =
dev_boxes2[(col_start + threadIdx.x) * 8 + 2];
block_boxes2[threadIdx.x * 8 + 3] =
dev_boxes2[(col_start + threadIdx.x) * 8 + 3];
block_boxes2[threadIdx.x * 8 + 4] =
dev_boxes2[(col_start + threadIdx.x) * 8 + 4];
block_boxes2[threadIdx.x * 8 + 5] =
dev_boxes2[(col_start + threadIdx.x) * 8 + 5];
block_boxes2[threadIdx.x * 8 + 6] =
dev_boxes2[(col_start + threadIdx.x) * 8 + 6];
block_boxes2[threadIdx.x * 8 + 7] =
dev_boxes2[(col_start + threadIdx.x) * 8 + 7];
}
__syncthreads();
if (threadIdx.x < row_size && threadIdx.y < col_size) {
int offset = (row_start + threadIdx.x) * n_boxes2 + col_start + threadIdx.y;
dev_ious[offset] = single_poly_iou<T>(
block_boxes1 + threadIdx.x * 8, block_boxes2 + threadIdx.y * 8);
}
}
at::Tensor box_iou_polygon_cuda(
const at::Tensor& boxes1,
const at::Tensor& boxes2) {
using scalar_t = float;
AT_ASSERTM(boxes1.type().is_cuda(), "boxes1 must be a CUDA tensor");
AT_ASSERTM(boxes2.type().is_cuda(), "boxes2 must be a CUDA tensor");
at::hip::HIPGuardMasqueradingAsCUDA device_guard(boxes1.device());
int num_boxes1 = boxes1.size(0);
int num_boxes2 = boxes2.size(0);
at::Tensor ious =
at::empty({num_boxes1 * num_boxes2}, boxes1.options().dtype(at::kFloat));
if (num_boxes1 > 0 && num_boxes2 > 0) {
const int blocks_x = at::cuda::ATenCeilDiv(num_boxes1, BLOCK_DIM_X);
const int blocks_y = at::cuda::ATenCeilDiv(num_boxes2, BLOCK_DIM_Y);
dim3 blocks(blocks_x, blocks_y);
dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( box_iou_polygon_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
num_boxes1,
num_boxes2,
// boxes1.data_ptr<scalar_t>(),
// boxes2.data_ptr<scalar_t>(),
// (scalar_t*)ious.data_ptr<scalar_t>());
boxes1.data<scalar_t>(),
boxes2.data<scalar_t>(),
(scalar_t*)ious.data<scalar_t>());
AT_CUDA_CHECK(hipGetLastError());
}
auto shape = std::vector<int64_t>{num_boxes1, num_boxes2};
return ious.reshape(shape);
}
| b27aa0e5bc7dd26ea1b69f25329a7a079b110f90.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include "../box_iou_polygon_utils.h"
// 2D block with 32 * 16 = 512 threads per block
const int BLOCK_DIM_X = 32;
const int BLOCK_DIM_Y = 16;
template <typename T>
__global__ void box_iou_polygon_cuda_kernel(
const int n_boxes1,
const int n_boxes2,
const T* dev_boxes1,
const T* dev_boxes2,
T* dev_ious) {
const int row_start = blockIdx.x * blockDim.x;
const int col_start = blockIdx.y * blockDim.y;
const int row_size = min(n_boxes1 - row_start, blockDim.x);
const int col_size = min(n_boxes2 - col_start, blockDim.y);
__shared__ float block_boxes1[BLOCK_DIM_X * 8];
__shared__ float block_boxes2[BLOCK_DIM_Y * 8];
// It's safe to copy using threadIdx.x since BLOCK_DIM_X >= BLOCK_DIM_Y
if (threadIdx.x < row_size && threadIdx.y == 0) {
block_boxes1[threadIdx.x * 8 + 0] =
dev_boxes1[(row_start + threadIdx.x) * 8 + 0];
block_boxes1[threadIdx.x * 8 + 1] =
dev_boxes1[(row_start + threadIdx.x) * 8 + 1];
block_boxes1[threadIdx.x * 8 + 2] =
dev_boxes1[(row_start + threadIdx.x) * 8 + 2];
block_boxes1[threadIdx.x * 8 + 3] =
dev_boxes1[(row_start + threadIdx.x) * 8 + 3];
block_boxes1[threadIdx.x * 8 + 4] =
dev_boxes1[(row_start + threadIdx.x) * 8 + 4];
block_boxes1[threadIdx.x * 8 + 5] =
dev_boxes1[(row_start + threadIdx.x) * 8 + 5];
block_boxes1[threadIdx.x * 8 + 6] =
dev_boxes1[(row_start + threadIdx.x) * 8 + 6];
block_boxes1[threadIdx.x * 8 + 7] =
dev_boxes1[(row_start + threadIdx.x) * 8 + 7];
}
if (threadIdx.x < col_size && threadIdx.y == 0) {
block_boxes2[threadIdx.x * 8 + 0] =
dev_boxes2[(col_start + threadIdx.x) * 8 + 0];
block_boxes2[threadIdx.x * 8 + 1] =
dev_boxes2[(col_start + threadIdx.x) * 8 + 1];
block_boxes2[threadIdx.x * 8 + 2] =
dev_boxes2[(col_start + threadIdx.x) * 8 + 2];
block_boxes2[threadIdx.x * 8 + 3] =
dev_boxes2[(col_start + threadIdx.x) * 8 + 3];
block_boxes2[threadIdx.x * 8 + 4] =
dev_boxes2[(col_start + threadIdx.x) * 8 + 4];
block_boxes2[threadIdx.x * 8 + 5] =
dev_boxes2[(col_start + threadIdx.x) * 8 + 5];
block_boxes2[threadIdx.x * 8 + 6] =
dev_boxes2[(col_start + threadIdx.x) * 8 + 6];
block_boxes2[threadIdx.x * 8 + 7] =
dev_boxes2[(col_start + threadIdx.x) * 8 + 7];
}
__syncthreads();
if (threadIdx.x < row_size && threadIdx.y < col_size) {
int offset = (row_start + threadIdx.x) * n_boxes2 + col_start + threadIdx.y;
dev_ious[offset] = single_poly_iou<T>(
block_boxes1 + threadIdx.x * 8, block_boxes2 + threadIdx.y * 8);
}
}
at::Tensor box_iou_polygon_cuda(
const at::Tensor& boxes1,
const at::Tensor& boxes2) {
using scalar_t = float;
AT_ASSERTM(boxes1.type().is_cuda(), "boxes1 must be a CUDA tensor");
AT_ASSERTM(boxes2.type().is_cuda(), "boxes2 must be a CUDA tensor");
at::cuda::CUDAGuard device_guard(boxes1.device());
int num_boxes1 = boxes1.size(0);
int num_boxes2 = boxes2.size(0);
at::Tensor ious =
at::empty({num_boxes1 * num_boxes2}, boxes1.options().dtype(at::kFloat));
if (num_boxes1 > 0 && num_boxes2 > 0) {
const int blocks_x = at::cuda::ATenCeilDiv(num_boxes1, BLOCK_DIM_X);
const int blocks_y = at::cuda::ATenCeilDiv(num_boxes2, BLOCK_DIM_Y);
dim3 blocks(blocks_x, blocks_y);
dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
box_iou_polygon_cuda_kernel<scalar_t><<<blocks, threads, 0, stream>>>(
num_boxes1,
num_boxes2,
// boxes1.data_ptr<scalar_t>(),
// boxes2.data_ptr<scalar_t>(),
// (scalar_t*)ious.data_ptr<scalar_t>());
boxes1.data<scalar_t>(),
boxes2.data<scalar_t>(),
(scalar_t*)ious.data<scalar_t>());
AT_CUDA_CHECK(cudaGetLastError());
}
auto shape = std::vector<int64_t>{num_boxes1, num_boxes2};
return ious.reshape(shape);
}
|
dd642f160341724abda4996b8798590700e331a1.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <traversal/bfs_ref.h>
#include <utilities/base_fixture.hpp>
#include <utilities/test_utilities.hpp>
#include <raft/error.hpp>
#include <raft/handle.hpp>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <gmock/gmock.h>
#include <cugraph/algorithms.hpp>
#include <cugraph/graph.hpp>
#include <fstream>
#include <queue>
#include <stack>
#include <utility>
#ifndef TEST_EPSILON
#define TEST_EPSILON 0.0001
#endif
// NOTE: Defines under which values the difference should be discarded when
// considering values are close to zero
// i.e: Do we consider that the difference between 1.3e-9 and 8.e-12 is
// significant
#ifndef TEST_ZERO_THRESHOLD
#define TEST_ZERO_THRESHOLD 1e-10
#endif
// ============================================================================
// C++ Reference Implementation
// ============================================================================
template <typename vertex_t, typename edge_t, typename weight_t, typename result_t>
edge_t get_edge_index_from_source_and_destination(vertex_t source_vertex,
vertex_t destination_vertex,
vertex_t const *indices,
edge_t const *offsets)
{
edge_t index = -1;
edge_t first_edge_idx = offsets[source_vertex];
edge_t last_edge_idx = offsets[source_vertex + 1];
auto index_it = std::find(indices + first_edge_idx, indices + last_edge_idx, destination_vertex);
if (index_it != (indices + last_edge_idx)) { index = std::distance(indices, index_it); }
return index;
}
template <typename vertex_t, typename edge_t, typename weight_t, typename result_t>
void ref_accumulation(result_t *result,
vertex_t const *indices,
edge_t const *offsets,
vertex_t const number_of_vertices,
std::stack<vertex_t> &S,
std::vector<std::vector<vertex_t>> &pred,
std::vector<double> &sigmas,
std::vector<double> &deltas,
vertex_t source)
{
for (vertex_t v = 0; v < number_of_vertices; ++v) { deltas[v] = 0; }
while (!S.empty()) {
vertex_t w = S.top();
S.pop();
for (vertex_t v : pred[w]) {
edge_t edge_idx =
get_edge_index_from_source_and_destination<vertex_t, edge_t, weight_t, result_t>(
v, w, indices, offsets);
double coefficient = (sigmas[v] / sigmas[w]) * (1.0 + deltas[w]);
deltas[v] += coefficient;
result[edge_idx] += coefficient;
}
}
}
// Algorithm 1: Shortest-path vertex betweenness, (Brandes, 2001)
template <typename vertex_t, typename edge_t, typename weight_t, typename result_t>
void reference_edge_betweenness_centrality_impl(vertex_t *indices,
edge_t *offsets,
vertex_t const number_of_vertices,
result_t *result,
vertex_t const *sources,
vertex_t const number_of_sources)
{
std::queue<vertex_t> Q;
std::stack<vertex_t> S;
// NOTE: dist is of type vertex_t not weight_t
std::vector<vertex_t> dist(number_of_vertices);
std::vector<std::vector<vertex_t>> pred(number_of_vertices);
std::vector<double> sigmas(number_of_vertices);
std::vector<double> deltas(number_of_vertices);
std::vector<vertex_t> neighbors;
if (sources) {
for (vertex_t source_idx = 0; source_idx < number_of_sources; ++source_idx) {
vertex_t s = sources[source_idx];
// Step 1: Single-source shortest-paths problem
// a. Initialization
ref_bfs<vertex_t, edge_t>(indices, offsets, number_of_vertices, Q, S, dist, pred, sigmas, s);
// Step 2: Accumulation
// Back propagation of dependencies
ref_accumulation<vertex_t, edge_t, weight_t, result_t>(
result, indices, offsets, number_of_vertices, S, pred, sigmas, deltas, s);
}
} else {
for (vertex_t s = 0; s < number_of_vertices; ++s) {
// Step 1: Single-source shortest-paths problem
// a. Initialization
ref_bfs<vertex_t, edge_t>(indices, offsets, number_of_vertices, Q, S, dist, pred, sigmas, s);
// Step 2: Accumulation
// Back propagation of dependencies
ref_accumulation<vertex_t, edge_t, weight_t, result_t>(
result, indices, offsets, number_of_vertices, S, pred, sigmas, deltas, s);
}
}
}
template <typename vertex_t, typename edge_t, typename weight_t, typename result_t>
void reference_rescale(result_t *result,
bool directed,
bool normalize,
vertex_t const number_of_vertices,
edge_t const number_of_edges)
{
result_t rescale_factor = static_cast<result_t>(1);
result_t casted_number_of_vertices = static_cast<result_t>(number_of_vertices);
if (normalize) {
if (number_of_vertices > 1) {
rescale_factor /= ((casted_number_of_vertices) * (casted_number_of_vertices - 1));
}
} else {
if (!directed) { rescale_factor /= static_cast<result_t>(2); }
}
for (auto idx = 0; idx < number_of_edges; ++idx) { result[idx] *= rescale_factor; }
}
template <typename vertex_t, typename edge_t, typename weight_t, typename result_t>
void reference_edge_betweenness_centrality(
cugraph::GraphCSRView<vertex_t, edge_t, weight_t> const &graph,
result_t *result,
bool normalize,
vertex_t const number_of_sources,
vertex_t const *sources)
{
vertex_t number_of_vertices = graph.number_of_vertices;
edge_t number_of_edges = graph.number_of_edges;
thrust::host_vector<vertex_t> h_indices(number_of_edges);
thrust::host_vector<edge_t> h_offsets(number_of_vertices + 1);
thrust::device_ptr<vertex_t> d_indices((vertex_t *)&graph.indices[0]);
thrust::device_ptr<edge_t> d_offsets((edge_t *)&graph.offsets[0]);
thrust::copy(d_indices, d_indices + number_of_edges, h_indices.begin());
thrust::copy(d_offsets, d_offsets + (number_of_vertices + 1), h_offsets.begin());
hipDeviceSynchronize();
reference_edge_betweenness_centrality_impl<vertex_t, edge_t, weight_t, result_t>(
&h_indices[0], &h_offsets[0], number_of_vertices, result, sources, number_of_sources);
reference_rescale<vertex_t, edge_t, weight_t, result_t>(
result, graph.prop.directed, normalize, number_of_vertices, number_of_edges);
}
// =============================================================================
// Utility functions
// =============================================================================
// Compare while allowing relatie error of epsilon
// zero_threshold indicates when we should drop comparison for small numbers
template <typename T, typename precision_t>
bool compare_close(const T &a, const T &b, const precision_t epsilon, precision_t zero_threshold)
{
return ((zero_threshold > a && zero_threshold > b)) ||
(a >= b * (1.0 - epsilon)) && (a <= b * (1.0 + epsilon));
}
// =============================================================================
// Test Suite
// =============================================================================
// Defines Betweenness Centrality UseCase
// SSSP's test suite code uses type of Graph parameter that could be used
// (MTX / RMAT)
typedef struct EdgeBC_Usecase_t {
std::string config_; // Path to graph file
std::string file_path_; // Complete path to graph using dataset_root_dir
int number_of_sources_; // Starting point from the traversal
EdgeBC_Usecase_t(const std::string &config, int number_of_sources)
: config_(config), number_of_sources_(number_of_sources)
{
// assume relative paths are relative to RAPIDS_DATASET_ROOT_DIR
// FIXME: Use platform independent stuff from c++14/17 on compiler update
const std::string &rapidsDatasetRootDir = cugraph::test::get_rapids_dataset_root_dir();
if ((config_ != "") && (config_[0] != '/')) {
file_path_ = rapidsDatasetRootDir + "/" + config_;
} else {
file_path_ = config_;
}
};
} EdgeBC_Usecase;
class Tests_EdgeBC : public ::testing::TestWithParam<EdgeBC_Usecase> {
raft::handle_t handle;
public:
Tests_EdgeBC() {}
static void SetupTestCase() {}
static void TearDownTestCase() {}
virtual void SetUp() {}
virtual void TearDown() {}
// FIXME: Should normalize be part of the configuration instead?
// vertex_t vertex identifier data type
// edge_t edge identifier data type
// weight_t edge weight data type
// result_t result data type
// normalize should the result be normalized
template <typename vertex_t,
typename edge_t,
typename weight_t,
typename result_t,
bool normalize>
void run_current_test(const EdgeBC_Usecase &configuration)
{
// Step 1: Construction of the graph based on configuration
bool is_directed = false;
auto csr = cugraph::test::generate_graph_csr_from_mm<vertex_t, edge_t, weight_t>(
is_directed, configuration.file_path_);
hipDeviceSynchronize();
cugraph::GraphCSRView<vertex_t, edge_t, weight_t> G = csr->view();
G.prop.directed = is_directed;
CUDA_TRY(hipGetLastError());
std::vector<result_t> result(G.number_of_edges, 0);
std::vector<result_t> expected(G.number_of_edges, 0);
// Step 2: Generation of sources based on configuration
// if number_of_sources_ is 0 then sources must be nullptr
// Otherwise we only use the first k values
ASSERT_TRUE(configuration.number_of_sources_ >= 0 &&
configuration.number_of_sources_ <= G.number_of_vertices)
<< "Number number of sources should be >= 0 and"
<< " less than the number of vertices in the graph";
std::vector<vertex_t> sources(configuration.number_of_sources_);
thrust::sequence(thrust::host, sources.begin(), sources.end(), 0);
vertex_t *sources_ptr = nullptr;
if (configuration.number_of_sources_ > 0) { sources_ptr = sources.data(); }
reference_edge_betweenness_centrality(
G, expected.data(), normalize, configuration.number_of_sources_, sources_ptr);
sources_ptr = nullptr;
if (configuration.number_of_sources_ > 0) { sources_ptr = sources.data(); }
rmm::device_vector<result_t> d_result(G.number_of_edges);
cugraph::edge_betweenness_centrality(handle,
G,
d_result.data().get(),
normalize,
static_cast<weight_t *>(nullptr),
configuration.number_of_sources_,
sources_ptr);
CUDA_TRY(hipMemcpy(result.data(),
d_result.data().get(),
sizeof(result_t) * G.number_of_edges,
hipMemcpyDeviceToHost));
for (int i = 0; i < G.number_of_edges; ++i)
EXPECT_TRUE(compare_close(result[i], expected[i], TEST_EPSILON, TEST_ZERO_THRESHOLD))
<< "[MISMATCH] vaid = " << i << ", cugraph = " << result[i]
<< " expected = " << expected[i];
}
};
// ============================================================================
// Tests
// ============================================================================
// Verifiy Un-Normalized results
TEST_P(Tests_EdgeBC, CheckFP32_NO_NORMALIZE)
{
run_current_test<int, int, float, float, false>(GetParam());
}
#if 0
// Temporarily disable some of the test combinations
// Full solution will be explored for issue #1555
TEST_P(Tests_EdgeBC, CheckFP64_NO_NORMALIZE)
{
run_current_test<int, int, double, double, false>(GetParam());
}
// Verifiy Normalized results
TEST_P(Tests_EdgeBC, CheckFP32_NORMALIZE)
{
run_current_test<int, int, float, float, true>(GetParam());
}
#endif
TEST_P(Tests_EdgeBC, CheckFP64_NORMALIZE)
{
run_current_test<int, int, double, double, true>(GetParam());
}
#if 0
// Temporarily disable some of the test combinations
// Full solution will be explored for issue #1555
INSTANTIATE_TEST_SUITE_P(simple_test,
Tests_EdgeBC,
::testing::Values(EdgeBC_Usecase("test/datasets/karate.mtx", 0),
EdgeBC_Usecase("test/datasets/netscience.mtx", 0),
EdgeBC_Usecase("test/datasets/netscience.mtx", 4),
EdgeBC_Usecase("test/datasets/wiki2003.mtx", 4),
EdgeBC_Usecase("test/datasets/wiki-Talk.mtx", 4)));
#else
INSTANTIATE_TEST_SUITE_P(simple_test,
Tests_EdgeBC,
::testing::Values(EdgeBC_Usecase("test/datasets/karate.mtx", 0),
EdgeBC_Usecase("test/datasets/netscience.mtx", 0),
EdgeBC_Usecase("test/datasets/netscience.mtx", 4)));
#endif
CUGRAPH_TEST_PROGRAM_MAIN()
| dd642f160341724abda4996b8798590700e331a1.cu | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <traversal/bfs_ref.h>
#include <utilities/base_fixture.hpp>
#include <utilities/test_utilities.hpp>
#include <raft/error.hpp>
#include <raft/handle.hpp>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <gmock/gmock.h>
#include <cugraph/algorithms.hpp>
#include <cugraph/graph.hpp>
#include <fstream>
#include <queue>
#include <stack>
#include <utility>
#ifndef TEST_EPSILON
#define TEST_EPSILON 0.0001
#endif
// NOTE: Defines under which values the difference should be discarded when
// considering values are close to zero
// i.e: Do we consider that the difference between 1.3e-9 and 8.e-12 is
// significant
#ifndef TEST_ZERO_THRESHOLD
#define TEST_ZERO_THRESHOLD 1e-10
#endif
// ============================================================================
// C++ Reference Implementation
// ============================================================================
template <typename vertex_t, typename edge_t, typename weight_t, typename result_t>
edge_t get_edge_index_from_source_and_destination(vertex_t source_vertex,
vertex_t destination_vertex,
vertex_t const *indices,
edge_t const *offsets)
{
edge_t index = -1;
edge_t first_edge_idx = offsets[source_vertex];
edge_t last_edge_idx = offsets[source_vertex + 1];
auto index_it = std::find(indices + first_edge_idx, indices + last_edge_idx, destination_vertex);
if (index_it != (indices + last_edge_idx)) { index = std::distance(indices, index_it); }
return index;
}
template <typename vertex_t, typename edge_t, typename weight_t, typename result_t>
void ref_accumulation(result_t *result,
vertex_t const *indices,
edge_t const *offsets,
vertex_t const number_of_vertices,
std::stack<vertex_t> &S,
std::vector<std::vector<vertex_t>> &pred,
std::vector<double> &sigmas,
std::vector<double> &deltas,
vertex_t source)
{
for (vertex_t v = 0; v < number_of_vertices; ++v) { deltas[v] = 0; }
while (!S.empty()) {
vertex_t w = S.top();
S.pop();
for (vertex_t v : pred[w]) {
edge_t edge_idx =
get_edge_index_from_source_and_destination<vertex_t, edge_t, weight_t, result_t>(
v, w, indices, offsets);
double coefficient = (sigmas[v] / sigmas[w]) * (1.0 + deltas[w]);
deltas[v] += coefficient;
result[edge_idx] += coefficient;
}
}
}
// Algorithm 1: Shortest-path vertex betweenness, (Brandes, 2001)
template <typename vertex_t, typename edge_t, typename weight_t, typename result_t>
void reference_edge_betweenness_centrality_impl(vertex_t *indices,
edge_t *offsets,
vertex_t const number_of_vertices,
result_t *result,
vertex_t const *sources,
vertex_t const number_of_sources)
{
std::queue<vertex_t> Q;
std::stack<vertex_t> S;
// NOTE: dist is of type vertex_t not weight_t
std::vector<vertex_t> dist(number_of_vertices);
std::vector<std::vector<vertex_t>> pred(number_of_vertices);
std::vector<double> sigmas(number_of_vertices);
std::vector<double> deltas(number_of_vertices);
std::vector<vertex_t> neighbors;
if (sources) {
for (vertex_t source_idx = 0; source_idx < number_of_sources; ++source_idx) {
vertex_t s = sources[source_idx];
// Step 1: Single-source shortest-paths problem
// a. Initialization
ref_bfs<vertex_t, edge_t>(indices, offsets, number_of_vertices, Q, S, dist, pred, sigmas, s);
// Step 2: Accumulation
// Back propagation of dependencies
ref_accumulation<vertex_t, edge_t, weight_t, result_t>(
result, indices, offsets, number_of_vertices, S, pred, sigmas, deltas, s);
}
} else {
for (vertex_t s = 0; s < number_of_vertices; ++s) {
// Step 1: Single-source shortest-paths problem
// a. Initialization
ref_bfs<vertex_t, edge_t>(indices, offsets, number_of_vertices, Q, S, dist, pred, sigmas, s);
// Step 2: Accumulation
// Back propagation of dependencies
ref_accumulation<vertex_t, edge_t, weight_t, result_t>(
result, indices, offsets, number_of_vertices, S, pred, sigmas, deltas, s);
}
}
}
template <typename vertex_t, typename edge_t, typename weight_t, typename result_t>
void reference_rescale(result_t *result,
bool directed,
bool normalize,
vertex_t const number_of_vertices,
edge_t const number_of_edges)
{
result_t rescale_factor = static_cast<result_t>(1);
result_t casted_number_of_vertices = static_cast<result_t>(number_of_vertices);
if (normalize) {
if (number_of_vertices > 1) {
rescale_factor /= ((casted_number_of_vertices) * (casted_number_of_vertices - 1));
}
} else {
if (!directed) { rescale_factor /= static_cast<result_t>(2); }
}
for (auto idx = 0; idx < number_of_edges; ++idx) { result[idx] *= rescale_factor; }
}
template <typename vertex_t, typename edge_t, typename weight_t, typename result_t>
void reference_edge_betweenness_centrality(
cugraph::GraphCSRView<vertex_t, edge_t, weight_t> const &graph,
result_t *result,
bool normalize,
vertex_t const number_of_sources,
vertex_t const *sources)
{
vertex_t number_of_vertices = graph.number_of_vertices;
edge_t number_of_edges = graph.number_of_edges;
thrust::host_vector<vertex_t> h_indices(number_of_edges);
thrust::host_vector<edge_t> h_offsets(number_of_vertices + 1);
thrust::device_ptr<vertex_t> d_indices((vertex_t *)&graph.indices[0]);
thrust::device_ptr<edge_t> d_offsets((edge_t *)&graph.offsets[0]);
thrust::copy(d_indices, d_indices + number_of_edges, h_indices.begin());
thrust::copy(d_offsets, d_offsets + (number_of_vertices + 1), h_offsets.begin());
cudaDeviceSynchronize();
reference_edge_betweenness_centrality_impl<vertex_t, edge_t, weight_t, result_t>(
&h_indices[0], &h_offsets[0], number_of_vertices, result, sources, number_of_sources);
reference_rescale<vertex_t, edge_t, weight_t, result_t>(
result, graph.prop.directed, normalize, number_of_vertices, number_of_edges);
}
// =============================================================================
// Utility functions
// =============================================================================
// Compare while allowing relatie error of epsilon
// zero_threshold indicates when we should drop comparison for small numbers
template <typename T, typename precision_t>
bool compare_close(const T &a, const T &b, const precision_t epsilon, precision_t zero_threshold)
{
return ((zero_threshold > a && zero_threshold > b)) ||
(a >= b * (1.0 - epsilon)) && (a <= b * (1.0 + epsilon));
}
// =============================================================================
// Test Suite
// =============================================================================
// Defines Betweenness Centrality UseCase
// SSSP's test suite code uses type of Graph parameter that could be used
// (MTX / RMAT)
typedef struct EdgeBC_Usecase_t {
std::string config_; // Path to graph file
std::string file_path_; // Complete path to graph using dataset_root_dir
int number_of_sources_; // Starting point from the traversal
EdgeBC_Usecase_t(const std::string &config, int number_of_sources)
: config_(config), number_of_sources_(number_of_sources)
{
// assume relative paths are relative to RAPIDS_DATASET_ROOT_DIR
// FIXME: Use platform independent stuff from c++14/17 on compiler update
const std::string &rapidsDatasetRootDir = cugraph::test::get_rapids_dataset_root_dir();
if ((config_ != "") && (config_[0] != '/')) {
file_path_ = rapidsDatasetRootDir + "/" + config_;
} else {
file_path_ = config_;
}
};
} EdgeBC_Usecase;
class Tests_EdgeBC : public ::testing::TestWithParam<EdgeBC_Usecase> {
raft::handle_t handle;
public:
Tests_EdgeBC() {}
static void SetupTestCase() {}
static void TearDownTestCase() {}
virtual void SetUp() {}
virtual void TearDown() {}
// FIXME: Should normalize be part of the configuration instead?
// vertex_t vertex identifier data type
// edge_t edge identifier data type
// weight_t edge weight data type
// result_t result data type
// normalize should the result be normalized
template <typename vertex_t,
typename edge_t,
typename weight_t,
typename result_t,
bool normalize>
void run_current_test(const EdgeBC_Usecase &configuration)
{
// Step 1: Construction of the graph based on configuration
bool is_directed = false;
auto csr = cugraph::test::generate_graph_csr_from_mm<vertex_t, edge_t, weight_t>(
is_directed, configuration.file_path_);
cudaDeviceSynchronize();
cugraph::GraphCSRView<vertex_t, edge_t, weight_t> G = csr->view();
G.prop.directed = is_directed;
CUDA_TRY(cudaGetLastError());
std::vector<result_t> result(G.number_of_edges, 0);
std::vector<result_t> expected(G.number_of_edges, 0);
// Step 2: Generation of sources based on configuration
// if number_of_sources_ is 0 then sources must be nullptr
// Otherwise we only use the first k values
ASSERT_TRUE(configuration.number_of_sources_ >= 0 &&
configuration.number_of_sources_ <= G.number_of_vertices)
<< "Number number of sources should be >= 0 and"
<< " less than the number of vertices in the graph";
std::vector<vertex_t> sources(configuration.number_of_sources_);
thrust::sequence(thrust::host, sources.begin(), sources.end(), 0);
vertex_t *sources_ptr = nullptr;
if (configuration.number_of_sources_ > 0) { sources_ptr = sources.data(); }
reference_edge_betweenness_centrality(
G, expected.data(), normalize, configuration.number_of_sources_, sources_ptr);
sources_ptr = nullptr;
if (configuration.number_of_sources_ > 0) { sources_ptr = sources.data(); }
rmm::device_vector<result_t> d_result(G.number_of_edges);
cugraph::edge_betweenness_centrality(handle,
G,
d_result.data().get(),
normalize,
static_cast<weight_t *>(nullptr),
configuration.number_of_sources_,
sources_ptr);
CUDA_TRY(cudaMemcpy(result.data(),
d_result.data().get(),
sizeof(result_t) * G.number_of_edges,
cudaMemcpyDeviceToHost));
for (int i = 0; i < G.number_of_edges; ++i)
EXPECT_TRUE(compare_close(result[i], expected[i], TEST_EPSILON, TEST_ZERO_THRESHOLD))
<< "[MISMATCH] vaid = " << i << ", cugraph = " << result[i]
<< " expected = " << expected[i];
}
};
// ============================================================================
// Tests
// ============================================================================
// Verifiy Un-Normalized results
TEST_P(Tests_EdgeBC, CheckFP32_NO_NORMALIZE)
{
run_current_test<int, int, float, float, false>(GetParam());
}
#if 0
// Temporarily disable some of the test combinations
// Full solution will be explored for issue #1555
TEST_P(Tests_EdgeBC, CheckFP64_NO_NORMALIZE)
{
run_current_test<int, int, double, double, false>(GetParam());
}
// Verifiy Normalized results
TEST_P(Tests_EdgeBC, CheckFP32_NORMALIZE)
{
run_current_test<int, int, float, float, true>(GetParam());
}
#endif
TEST_P(Tests_EdgeBC, CheckFP64_NORMALIZE)
{
run_current_test<int, int, double, double, true>(GetParam());
}
#if 0
// Temporarily disable some of the test combinations
// Full solution will be explored for issue #1555
INSTANTIATE_TEST_SUITE_P(simple_test,
Tests_EdgeBC,
::testing::Values(EdgeBC_Usecase("test/datasets/karate.mtx", 0),
EdgeBC_Usecase("test/datasets/netscience.mtx", 0),
EdgeBC_Usecase("test/datasets/netscience.mtx", 4),
EdgeBC_Usecase("test/datasets/wiki2003.mtx", 4),
EdgeBC_Usecase("test/datasets/wiki-Talk.mtx", 4)));
#else
INSTANTIATE_TEST_SUITE_P(simple_test,
Tests_EdgeBC,
::testing::Values(EdgeBC_Usecase("test/datasets/karate.mtx", 0),
EdgeBC_Usecase("test/datasets/netscience.mtx", 0),
EdgeBC_Usecase("test/datasets/netscience.mtx", 4)));
#endif
CUGRAPH_TEST_PROGRAM_MAIN()
|
716d1f09c8fea0063369e6c41cd7cbf38bf6806c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <nv_gpu_cache.hpp>
#include <hip/hip_cooperative_groups.h>
namespace cg = cooperative_groups;
// Overload CUDA atomic for other 64bit unsinged/signed integer type
__forceinline__
__device__ long atomicAdd(long* address, long val)
{
return (long)atomicAdd((unsigned long long*)address, (unsigned long long)val);
}
__forceinline__
__device__ long long atomicAdd(long long* address, long long val)
{
return (long long)atomicAdd((unsigned long long*)address, (unsigned long long)val);
}
__forceinline__
__device__ unsigned long atomicAdd(unsigned long* address, unsigned long val)
{
return (unsigned long)atomicAdd((unsigned long long*)address, (unsigned long long)val);
}
namespace gpu_cache {
#ifdef LIBCUDACXX_VERSION
template<int warp_size>
__forceinline__
__device__ void warp_tile_copy(const size_t lane_idx, const size_t emb_vec_size_in_float, float* d_dst, const float* d_src){
#pragma unroll
for(size_t i = lane_idx; i < emb_vec_size_in_float; i += warp_size){
d_dst[i] = d_src[i];
}
}
#else
template<int warp_size>
__forceinline__
__device__ void warp_tile_copy(const size_t lane_idx, const size_t emb_vec_size_in_float, volatile float* d_dst, volatile float* d_src){
#pragma unroll
for(size_t i = lane_idx; i < emb_vec_size_in_float; i += warp_size){
d_dst[i] = d_src[i];
}
}
#endif
#ifdef LIBCUDACXX_VERSION
// Will be called by multiple thread_block_tile((sub-)warp) on the same mutex
// Expect only one thread_block_tile return to execute critical section at any time
template<typename mutex, int warp_size>
__forceinline__
__device__ void warp_lock_mutex(const cg::thread_block_tile<warp_size>& warp_tile, mutex& set_mutex){
// The first thread of this (sub-)warp to acquire the lock
if(warp_tile.thread_rank() == 0){
set_mutex.acquire();
}
warp_tile.sync(); // Synchronize the threads in the (sub-)warp. Execution barrier + memory fence
}
// The (sub-)warp holding the mutex will unlock the mutex after finishing the critical section on a set
// Expect any following (sub-)warp that acquire the mutex can see its modification done in the critical section
template<typename mutex, int warp_size>
__forceinline__
__device__ void warp_unlock_mutex(const cg::thread_block_tile<warp_size>& warp_tile, mutex& set_mutex){
warp_tile.sync(); // Synchronize the threads in the (sub-)warp. Execution barrier + memory fence
// The first thread of this (sub-)warp to release the lock
if(warp_tile.thread_rank() == 0){
set_mutex.release();
}
}
#else
// Will be called by multiple thread_block_tile((sub-)warp) on the same mutex
// Expect only one thread_block_tile return to execute critical section at any time
template<int warp_size>
__forceinline__
__device__ void warp_lock_mutex(const cg::thread_block_tile<warp_size>& warp_tile, volatile int& set_mutex){
// The first thread of this (sub-)warp to acquire the lock
if(warp_tile.thread_rank() == 0){
while (0 == atomicCAS((int*)&set_mutex, 1, 0))
;
}
__threadfence();
warp_tile.sync(); // Synchronize the threads in the (sub-)warp. Execution barrier + memory fence
}
// The (sub-)warp holding the mutex will unlock the mutex after finishing the critical section on a set
// Expect any following (sub-)warp that acquire the mutex can see its modification done in the critical section
template<int warp_size>
__forceinline__
__device__ void warp_unlock_mutex(const cg::thread_block_tile<warp_size>& warp_tile, volatile int& set_mutex){
__threadfence();
warp_tile.sync(); // Synchronize the threads in the (sub-)warp. Execution barrier + memory fence
// The first thread of this (sub-)warp to release the lock
if(warp_tile.thread_rank() == 0){
atomicExch((int*)&set_mutex, 1);
}
}
#endif
// The (sub-)warp doing all reduction to find the slot with min slot_counter
// The slot with min slot_counter is the LR slot.
template<typename ref_counter_type, int warp_size>
__forceinline__
__device__ void warp_min_reduction(const cg::thread_block_tile<warp_size>& warp_tile,
ref_counter_type& min_slot_counter_val,
size_t& slab_distance,
size_t& slot_distance){
const size_t lane_idx = warp_tile.thread_rank();
slot_distance = lane_idx;
for(size_t i = (warp_tile.size() >> 1); i > 0 ; i = i >> 1){
ref_counter_type input_slot_counter_val = warp_tile.shfl_xor(min_slot_counter_val, (int)i);
size_t input_slab_distance = warp_tile.shfl_xor(slab_distance, (int)i);
size_t input_slot_distance = warp_tile.shfl_xor(slot_distance, (int)i);
if(input_slot_counter_val == min_slot_counter_val){
if(input_slab_distance == slab_distance){
if(input_slot_distance < slot_distance){
slot_distance = input_slot_distance;
}
}
else if(input_slab_distance < slab_distance){
slab_distance = input_slab_distance;
slot_distance = input_slot_distance;
}
}
else if(input_slot_counter_val < min_slot_counter_val){
min_slot_counter_val = input_slot_counter_val;
slab_distance = input_slab_distance;
slot_distance = input_slot_distance;
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef LIBCUDACXX_VERSION
// Kernel to initialize the GPU cache
// Init every entry of the cache with <unused_key, value> pair
template<typename slabset, typename ref_counter_type, typename atomic_ref_counter_type, typename key_type, typename mutex>
__global__ void init_cache(slabset* keys,
ref_counter_type* slot_counter,
atomic_ref_counter_type* global_counter,
const size_t num_slot,
const key_type empty_key,
mutex* set_mutex,
const size_t capacity_in_set)
{
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx < num_slot )
{
// Set the key of this slot to unused key
// Flatten the cache
key_type * key_slot = (key_type *)keys;
key_slot[idx] = empty_key;
// Clear the counter for this slot
slot_counter[idx] = 0;
}
// First CUDA thread clear the global counter
if( idx == 0 ){
new(global_counter) atomic_ref_counter_type(0);
}
// First capacity_in_set CUDA thread initialize mutex
if( idx < capacity_in_set ){
new(set_mutex + idx) mutex(1);
}
}
template<typename atomic_ref_counter_type, typename mutex>
__global__ void destruct_kernel(atomic_ref_counter_type* global_counter, mutex* set_mutex, const size_t capacity_in_set)
{
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
// First CUDA thread destruct the global_counter
if( idx == 0 ){
global_counter -> ~atomic_ref_counter_type();
}
// First capacity_in_set CUDA thread destruct the set mutex
if( idx < capacity_in_set ){
(set_mutex + idx) -> ~mutex();
}
}
#else
// Kernel to initialize the GPU cache
// Init every entry of the cache with <unused_key, value> pair
template<typename slabset, typename ref_counter_type, typename key_type>
__global__ void init_cache(slabset* keys,
ref_counter_type* slot_counter,
ref_counter_type* global_counter,
const size_t num_slot,
const key_type empty_key,
int* set_mutex,
const size_t capacity_in_set)
{
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx < num_slot )
{
// Set the key of this slot to unused key
// Flatten the cache
key_type * key_slot = (key_type *)keys;
key_slot[idx] = empty_key;
// Clear the counter for this slot
slot_counter[idx] = 0;
}
// First CUDA thread clear the global counter
if( idx == 0 ){
global_counter[idx] = 0;
}
// First capacity_in_set CUDA thread initialize mutex
if( idx < capacity_in_set ){
set_mutex[idx] = 1;
}
}
#endif
// Kernel to update global counter
// Resolve distance overflow issue as well
#ifdef LIBCUDACXX_VERSION
template<typename atomic_ref_counter_type>
__global__ void update_kernel_overflow_ignore(atomic_ref_counter_type* global_counter,
size_t* d_missing_len){
// Update global counter
global_counter -> fetch_add(1, cuda::std::memory_order_relaxed);
*d_missing_len = 0;
}
#else
template<typename ref_counter_type>
__global__ void update_kernel_overflow_ignore(ref_counter_type* global_counter,
size_t* d_missing_len){
// Update global counter
atomicAdd(global_counter, 1);
*d_missing_len = 0;
}
#endif
#ifdef LIBCUDACXX_VERSION
// Kernel to read from cache
// Also update locality information for touched slot
template<typename key_type,
typename ref_counter_type,
typename atomic_ref_counter_type,
typename slabset,
typename set_hasher,
typename slab_hasher,
typename mutex,
key_type empty_key,
int set_associativity,
int warp_size>
__global__ void get_kernel(const key_type* d_keys,
const size_t len,
float* d_values,
const size_t embedding_vec_size,
uint64_t* d_missing_index,
key_type* d_missing_keys,
size_t* d_missing_len,
const atomic_ref_counter_type* global_counter,
ref_counter_type* slot_counter,
const size_t capacity_in_set,
const slabset* keys,
const float* vals,
mutex* set_mutex){
// Lane(thread) global ID
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
// Lane(thread) ID within a warp_tile
cg::thread_block_tile<warp_size> warp_tile = cg::tiled_partition<warp_size>(cg::this_thread_block());
const size_t lane_idx = warp_tile.thread_rank();
// The assigned key for this lane(thread)
key_type key;
// The dst slabset and the dst slab inside this set
size_t src_set;
size_t src_slab;
// The variable that contains the missing key
key_type missing_key;
// The variable that contains the index for the missing key
uint64_t missing_index;
// The counter for counting the missing key in this warp
uint8_t warp_missing_counter = 0;
// Active flag: whether current lane(thread) has unfinished task
bool active = false;
if( idx < len ){
active = true;
key = d_keys[idx];
src_set = set_hasher::hash(key) % capacity_in_set;
src_slab = slab_hasher::hash(key) % set_associativity;
}
// Lane participate in warp_tile ballot to produce warp-level work queue
unsigned active_mask = warp_tile.ballot(active);
// The warp-level outer loop: finish all the tasks within the work queue
while(active_mask != 0){
// Next task in the work quere, start from lower index lane(thread)
int next_lane = __ffs(active_mask) - 1;
// Broadcast the task and the global index to all lane in the warp_tile
key_type next_key = warp_tile.shfl(key, next_lane);
size_t next_idx = warp_tile.shfl(idx, next_lane);
size_t next_set = warp_tile.shfl(src_set, next_lane);
size_t next_slab = warp_tile.shfl(src_slab, next_lane);
// Counter to record how many slab have been searched
size_t counter = 0;
// Working queue before task started
const unsigned old_active_mask = active_mask;
// Lock the slabset before operating the slabset
warp_lock_mutex<mutex, warp_size>(warp_tile, set_mutex[next_set]);
// The warp-level inner loop: finish a single task in the work queue
while(active_mask == old_active_mask){
// When all the slabs inside a slabset have been searched, mark missing task, task is completed
if(counter >= set_associativity){
if(lane_idx == warp_missing_counter){
missing_key = next_key;
missing_index = next_idx;
}
if(lane_idx == (size_t)next_lane){
active = false;
}
warp_missing_counter++;
active_mask = warp_tile.ballot(active);
break;
}
// The warp_tile read out the slab
key_type read_key = keys[next_set].set_[next_slab].slab_[lane_idx];
// Compare the slab data with the target key
int found_lane = __ffs(warp_tile.ballot(read_key == next_key)) - 1;
// If found, mark hit task, copy the founded data, the task is completed
if(found_lane >= 0){
size_t found_offset = (next_set * set_associativity + next_slab) * warp_size + found_lane;
if(lane_idx == (size_t)next_lane){
slot_counter[found_offset] = global_counter -> load(cuda::std::memory_order_relaxed);
active = false;
}
warp_tile_copy<warp_size>(lane_idx, embedding_vec_size, d_values + next_idx * embedding_vec_size, vals + found_offset * embedding_vec_size);
active_mask = warp_tile.ballot(active);
break;
}
// Compare the slab data with empty key, if found empty key, mark missing task, task is completed
if(warp_tile.ballot(read_key == empty_key) != 0){
if(lane_idx == warp_missing_counter){
missing_key = next_key;
missing_index = next_idx;
}
if(lane_idx == (size_t)next_lane){
active = false;
}
warp_missing_counter++;
active_mask = warp_tile.ballot(active);
break;
}
// Not found in this slab, the task is not completed, goto searching next slab
counter++;
next_slab = (next_slab + 1) % set_associativity;
}
// Unlock the slabset after operating the slabset
warp_unlock_mutex<mutex, warp_size>(warp_tile, set_mutex[next_set]);
}
// After warp_tile complete the working queue, save the result for output
// First thread of the warp_tile accumulate the missing length to global variable
size_t warp_position;
if(lane_idx == 0){
warp_position = atomicAdd(d_missing_len, (size_t)warp_missing_counter);
}
warp_position = warp_tile.shfl(warp_position, 0);
if(lane_idx < warp_missing_counter){
d_missing_keys[warp_position + lane_idx] = missing_key;
d_missing_index[warp_position + lane_idx] = missing_index;
}
}
#else
// Kernel to read from cache
// Also update locality information for touched slot
template<typename key_type,
typename ref_counter_type,
typename slabset,
typename set_hasher,
typename slab_hasher,
key_type empty_key,
int set_associativity,
int warp_size>
__global__ void get_kernel(const key_type* d_keys,
const size_t len,
float* d_values,
const size_t embedding_vec_size,
uint64_t* d_missing_index,
key_type* d_missing_keys,
size_t* d_missing_len,
ref_counter_type* global_counter,
volatile ref_counter_type* slot_counter,
const size_t capacity_in_set,
volatile slabset* keys,
volatile float* vals,
volatile int* set_mutex){
// Lane(thread) global ID
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
// Lane(thread) ID within a warp_tile
cg::thread_block_tile<warp_size> warp_tile = cg::tiled_partition<warp_size>(cg::this_thread_block());
const size_t lane_idx = warp_tile.thread_rank();
// The assigned key for this lane(thread)
key_type key;
// The dst slabset and the dst slab inside this set
size_t src_set;
size_t src_slab;
// The variable that contains the missing key
key_type missing_key;
// The variable that contains the index for the missing key
uint64_t missing_index;
// The counter for counting the missing key in this warp
uint8_t warp_missing_counter = 0;
// Active flag: whether current lane(thread) has unfinished task
bool active = false;
if( idx < len ){
active = true;
key = d_keys[idx];
src_set = set_hasher::hash(key) % capacity_in_set;
src_slab = slab_hasher::hash(key) % set_associativity;
}
// Lane participate in warp_tile ballot to produce warp-level work queue
unsigned active_mask = warp_tile.ballot(active);
// The warp-level outer loop: finish all the tasks within the work queue
while(active_mask != 0){
// Next task in the work quere, start from lower index lane(thread)
int next_lane = __ffs(active_mask) - 1;
// Broadcast the task and the global index to all lane in the warp_tile
key_type next_key = warp_tile.shfl(key, next_lane);
size_t next_idx = warp_tile.shfl(idx, next_lane);
size_t next_set = warp_tile.shfl(src_set, next_lane);
size_t next_slab = warp_tile.shfl(src_slab, next_lane);
// Counter to record how many slab have been searched
size_t counter = 0;
// Working queue before task started
const unsigned old_active_mask = active_mask;
// Lock the slabset before operating the slabset
warp_lock_mutex<warp_size>(warp_tile, set_mutex[next_set]);
// The warp-level inner loop: finish a single task in the work queue
while(active_mask == old_active_mask){
// When all the slabs inside a slabset have been searched, mark missing task, task is completed
if(counter >= set_associativity){
if(lane_idx == warp_missing_counter){
missing_key = next_key;
missing_index = next_idx;
}
if(lane_idx == (size_t)next_lane){
active = false;
}
warp_missing_counter++;
active_mask = warp_tile.ballot(active);
break;
}
// The warp_tile read out the slab
key_type read_key = ((volatile key_type*)(keys[next_set].set_[next_slab].slab_))[lane_idx];
// Compare the slab data with the target key
int found_lane = __ffs(warp_tile.ballot(read_key == next_key)) - 1;
// If found, mark hit task, copy the founded data, the task is completed
if(found_lane >= 0){
size_t found_offset = (next_set * set_associativity + next_slab) * warp_size + found_lane;
if(lane_idx == (size_t)next_lane){
slot_counter[found_offset] = atomicAdd(global_counter, 0);
active = false;
}
warp_tile_copy<warp_size>(lane_idx, embedding_vec_size, (volatile float*)(d_values + next_idx * embedding_vec_size), (volatile float*)(vals + found_offset * embedding_vec_size));
active_mask = warp_tile.ballot(active);
break;
}
// Compare the slab data with empty key, if found empty key, mark missing task, task is completed
if(warp_tile.ballot(read_key == empty_key) != 0){
if(lane_idx == warp_missing_counter){
missing_key = next_key;
missing_index = next_idx;
}
if(lane_idx == (size_t)next_lane){
active = false;
}
warp_missing_counter++;
active_mask = warp_tile.ballot(active);
break;
}
// Not found in this slab, the task is not completed, goto searching next slab
counter++;
next_slab = (next_slab + 1) % set_associativity;
}
// Unlock the slabset after operating the slabset
warp_unlock_mutex<warp_size>(warp_tile, set_mutex[next_set]);
}
// After warp_tile complete the working queue, save the result for output
// First thread of the warp_tile accumulate the missing length to global variable
size_t warp_position;
if(lane_idx == 0){
warp_position = atomicAdd(d_missing_len, (size_t)warp_missing_counter);
}
warp_position = warp_tile.shfl(warp_position, 0);
if(lane_idx < warp_missing_counter){
d_missing_keys[warp_position + lane_idx] = missing_key;
d_missing_index[warp_position + lane_idx] = missing_index;
}
}
#endif
#ifdef LIBCUDACXX_VERSION
// Kernel to insert or replace the <k,v> pairs into the cache
template<typename key_type,
typename slabset,
typename ref_counter_type,
typename mutex,
typename atomic_ref_counter_type,
typename set_hasher,
typename slab_hasher,
key_type empty_key,
int set_associativity,
int warp_size,
ref_counter_type max_ref_counter_type = std::numeric_limits<ref_counter_type>::max(),
size_t max_slab_distance = std::numeric_limits<size_t>::max()>
__global__ void insert_replace_kernel(const key_type* d_keys,
const float* d_values,
const size_t embedding_vec_size,
const size_t len,
slabset* keys,
float* vals,
ref_counter_type* slot_counter,
mutex* set_mutex,
const atomic_ref_counter_type* global_counter,
const size_t capacity_in_set){
// Lane(thread) global ID
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
// Lane(thread) ID within a warp_tile
cg::thread_block_tile<warp_size> warp_tile = cg::tiled_partition<warp_size>(cg::this_thread_block());
const size_t lane_idx = warp_tile.thread_rank();
// The assigned key for this lane(thread)
key_type key;
// The dst slabset and the dst slab inside this set
size_t src_set;
size_t src_slab;
// Active flag: whether current lane(thread) has unfinished task
bool active = false;
if( idx < len ){
active = true;
key = d_keys[idx];
src_set = set_hasher::hash(key) % capacity_in_set;
src_slab = slab_hasher::hash(key) % set_associativity;
}
// Lane participate in warp_tile ballot to produce warp-level work queue
unsigned active_mask = warp_tile.ballot(active);
// The warp-level outer loop: finish all the tasks within the work queue
while(active_mask != 0){
// Next task in the work quere, start from lower index lane(thread)
int next_lane = __ffs(active_mask) - 1;
// Broadcast the task, the global index and the src slabset and slab to all lane in a warp_tile
key_type next_key = warp_tile.shfl(key, next_lane);
size_t next_idx = warp_tile.shfl(idx, next_lane);
size_t next_set = warp_tile.shfl(src_set, next_lane);
size_t next_slab = warp_tile.shfl(src_slab, next_lane);
size_t first_slab = next_slab;
// Counter to record how many slab have been searched
size_t counter = 0;
// Variable to keep the min slot counter during the probing
ref_counter_type min_slot_counter_val = max_ref_counter_type;
// Variable to keep the slab distance for slot with min counter
size_t slab_distance = max_slab_distance;
// Variable to keep the slot distance for slot with min counter within the slab
size_t slot_distance;
// Working queue before task started
const unsigned old_active_mask = active_mask;
// Lock the slabset before operating the slabset
warp_lock_mutex<mutex, warp_size>(warp_tile, set_mutex[next_set]);
// The warp-level inner loop: finish a single task in the work queue
while(active_mask == old_active_mask){
// When all the slabs inside a slabset have been searched
// and no empty slots or target slots are found. Replace with LRU
if(counter >= set_associativity){
// (sub)Warp all-reduction, the reduction result store in all threads
warp_min_reduction<ref_counter_type, warp_size>(warp_tile, min_slot_counter_val, slab_distance, slot_distance);
// Calculate the position of LR slot
size_t target_slab = (first_slab + slab_distance) % set_associativity;
size_t slot_index = (next_set * set_associativity + target_slab) * warp_size + slot_distance;
// Replace the LR slot
if(lane_idx == (size_t)next_lane){
keys[next_set].set_[target_slab].slab_[slot_distance] = key;
slot_counter[slot_index] = global_counter -> load(cuda::std::memory_order_relaxed);
}
warp_tile_copy<warp_size>(lane_idx, embedding_vec_size, vals + slot_index * embedding_vec_size, d_values + next_idx * embedding_vec_size);
// Replace complete, mark this task completed
if(lane_idx == (size_t)next_lane){
active = false;
}
active_mask = warp_tile.ballot(active);
break;
}
// The warp_tile read out the slab
key_type read_key = keys[next_set].set_[next_slab].slab_[lane_idx];
// Compare the slab data with the target key
int found_lane = __ffs(warp_tile.ballot(read_key == next_key)) - 1;
// If found target key, the insertion/replace is no longer needed.
// Refresh the slot, the task is completed
if(found_lane >= 0){
size_t found_offset = (next_set * set_associativity + next_slab) * warp_size + found_lane;
if(lane_idx == (size_t)next_lane){
slot_counter[found_offset] = global_counter -> load(cuda::std::memory_order_relaxed);
active = false;
}
active_mask = warp_tile.ballot(active);
break;
}
// Compare the slab data with empty key.
// If found empty key, do insertion,the task is complete
found_lane = __ffs(warp_tile.ballot(read_key == empty_key)) - 1;
if(found_lane >= 0){
size_t found_offset = (next_set * set_associativity + next_slab) * warp_size + found_lane;
if(lane_idx == (size_t)next_lane){
keys[next_set].set_[next_slab].slab_[found_lane] = key;
slot_counter[found_offset] = global_counter -> load(cuda::std::memory_order_relaxed);
}
warp_tile_copy<warp_size>(lane_idx, embedding_vec_size, vals + found_offset * embedding_vec_size, d_values + next_idx * embedding_vec_size);
if(lane_idx == (size_t)next_lane){
active = false;
}
active_mask = warp_tile.ballot(active);
break;
}
// If no target or unused slot found in this slab,
// Refresh LR info, continue probing
ref_counter_type read_slot_counter = slot_counter[(next_set * set_associativity + next_slab) * warp_size + lane_idx];
if(read_slot_counter < min_slot_counter_val){
min_slot_counter_val = read_slot_counter;
slab_distance = counter;
}
counter++;
next_slab = (next_slab + 1) % set_associativity;
}
// Unlock the slabset after operating the slabset
warp_unlock_mutex<mutex, warp_size>(warp_tile, set_mutex[next_set]);
}
}
#else
// Kernel to insert or replace the <k,v> pairs into the cache
template<typename key_type,
typename slabset,
typename ref_counter_type,
typename set_hasher,
typename slab_hasher,
key_type empty_key,
int set_associativity,
int warp_size,
ref_counter_type max_ref_counter_type = std::numeric_limits<ref_counter_type>::max(),
size_t max_slab_distance = std::numeric_limits<size_t>::max()>
__global__ void insert_replace_kernel(const key_type* d_keys,
const float* d_values,
const size_t embedding_vec_size,
const size_t len,
volatile slabset* keys,
volatile float* vals,
volatile ref_counter_type* slot_counter,
volatile int* set_mutex,
ref_counter_type* global_counter,
const size_t capacity_in_set){
// Lane(thread) global ID
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
// Lane(thread) ID within a warp_tile
cg::thread_block_tile<warp_size> warp_tile = cg::tiled_partition<warp_size>(cg::this_thread_block());
const size_t lane_idx = warp_tile.thread_rank();
// The assigned key for this lane(thread)
key_type key;
// The dst slabset and the dst slab inside this set
size_t src_set;
size_t src_slab;
// Active flag: whether current lane(thread) has unfinished task
bool active = false;
if( idx < len ){
active = true;
key = d_keys[idx];
src_set = set_hasher::hash(key) % capacity_in_set;
src_slab = slab_hasher::hash(key) % set_associativity;
}
// Lane participate in warp_tile ballot to produce warp-level work queue
unsigned active_mask = warp_tile.ballot(active);
// The warp-level outer loop: finish all the tasks within the work queue
while(active_mask != 0){
// Next task in the work quere, start from lower index lane(thread)
int next_lane = __ffs(active_mask) - 1;
// Broadcast the task, the global index and the src slabset and slab to all lane in a warp_tile
key_type next_key = warp_tile.shfl(key, next_lane);
size_t next_idx = warp_tile.shfl(idx, next_lane);
size_t next_set = warp_tile.shfl(src_set, next_lane);
size_t next_slab = warp_tile.shfl(src_slab, next_lane);
size_t first_slab = next_slab;
// Counter to record how many slab have been searched
size_t counter = 0;
// Variable to keep the min slot counter during the probing
ref_counter_type min_slot_counter_val = max_ref_counter_type;
// Variable to keep the slab distance for slot with min counter
size_t slab_distance = max_slab_distance;
// Variable to keep the slot distance for slot with min counter within the slab
size_t slot_distance;
// Working queue before task started
const unsigned old_active_mask = active_mask;
// Lock the slabset before operating the slabset
warp_lock_mutex<warp_size>(warp_tile, set_mutex[next_set]);
// The warp-level inner loop: finish a single task in the work queue
while(active_mask == old_active_mask){
// When all the slabs inside a slabset have been searched
// and no empty slots or target slots are found. Replace with LRU
if(counter >= set_associativity){
// (sub)Warp all-reduction, the reduction result store in all threads
warp_min_reduction<ref_counter_type, warp_size>(warp_tile, min_slot_counter_val, slab_distance, slot_distance);
// Calculate the position of LR slot
size_t target_slab = (first_slab + slab_distance) % set_associativity;
size_t slot_index = (next_set * set_associativity + target_slab) * warp_size + slot_distance;
// Replace the LR slot
if(lane_idx == (size_t)next_lane){
((volatile key_type*)(keys[next_set].set_[target_slab].slab_))[slot_distance] = key;
slot_counter[slot_index] = atomicAdd(global_counter, 0);
}
warp_tile_copy<warp_size>(lane_idx, embedding_vec_size, (volatile float*)(vals + slot_index * embedding_vec_size), (volatile float*)(d_values + next_idx * embedding_vec_size));
// Replace complete, mark this task completed
if(lane_idx == (size_t)next_lane){
active = false;
}
active_mask = warp_tile.ballot(active);
break;
}
// The warp_tile read out the slab
key_type read_key = ((volatile key_type*)(keys[next_set].set_[next_slab].slab_))[lane_idx];
// Compare the slab data with the target key
int found_lane = __ffs(warp_tile.ballot(read_key == next_key)) - 1;
// If found target key, the insertion/replace is no longer needed.
// Refresh the slot, the task is completed
if(found_lane >= 0){
size_t found_offset = (next_set * set_associativity + next_slab) * warp_size + found_lane;
if(lane_idx == (size_t)next_lane){
slot_counter[found_offset] = atomicAdd(global_counter, 0);
active = false;
}
active_mask = warp_tile.ballot(active);
break;
}
// Compare the slab data with empty key.
// If found empty key, do insertion,the task is complete
found_lane = __ffs(warp_tile.ballot(read_key == empty_key)) - 1;
if(found_lane >= 0){
size_t found_offset = (next_set * set_associativity + next_slab) * warp_size + found_lane;
if(lane_idx == (size_t)next_lane){
((volatile key_type*)(keys[next_set].set_[next_slab].slab_))[found_lane] = key;
slot_counter[found_offset] = atomicAdd(global_counter, 0);
}
warp_tile_copy<warp_size>(lane_idx, embedding_vec_size, (volatile float*)(vals + found_offset * embedding_vec_size), (volatile float*)(d_values + next_idx * embedding_vec_size));
if(lane_idx == (size_t)next_lane){
active = false;
}
active_mask = warp_tile.ballot(active);
break;
}
// If no target or unused slot found in this slab,
// Refresh LR info, continue probing
ref_counter_type read_slot_counter = slot_counter[(next_set * set_associativity + next_slab) * warp_size + lane_idx];
if(read_slot_counter < min_slot_counter_val){
min_slot_counter_val = read_slot_counter;
slab_distance = counter;
}
counter++;
next_slab = (next_slab + 1) % set_associativity;
}
// Unlock the slabset after operating the slabset
warp_unlock_mutex<warp_size>(warp_tile, set_mutex[next_set]);
}
}
#endif
///////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef LIBCUDACXX_VERSION
template<typename key_type,
typename ref_counter_type,
key_type empty_key,
int set_associativity,
int warp_size,
typename set_hasher,
typename slab_hasher>
gpu_cache<key_type, ref_counter_type, empty_key, set_associativity, warp_size, set_hasher, slab_hasher>::gpu_cache(const size_t capacity_in_set,
const size_t embedding_vec_size)
:capacity_in_set_(capacity_in_set),
embedding_vec_size_(embedding_vec_size){
// Check parameter
if(capacity_in_set_ == 0){
printf("Error: Invalid value for capacity_in_set.\n");
return;
}
if(embedding_vec_size_ == 0){
printf("Error: Invalid value for embedding_vec_size.\n");
return;
}
if(set_associativity <= 0){
printf("Error: Invalid value for set_associativity.\n");
return;
}
if(warp_size != 1 && warp_size != 2 && warp_size != 4 && warp_size != 8 && warp_size != 16 && warp_size != 32){
printf("Error: Invalid value for warp_size.\n");
return;
}
// Get the current CUDA dev
CUDA_CHECK(hipGetDevice( &dev_ ));
// Calculate # of slot
num_slot_ = capacity_in_set_ * set_associativity * warp_size;
// Allocate GPU memory for cache
CUDA_CHECK(hipMalloc((void**)&keys_, sizeof(slabset) * capacity_in_set_));
CUDA_CHECK(hipMalloc((void**)&vals_, sizeof(float) * embedding_vec_size_ * num_slot_));
CUDA_CHECK(hipMalloc((void**)&slot_counter_, sizeof(ref_counter_type) * num_slot_));
CUDA_CHECK(hipMalloc((void**)&global_counter_, sizeof(atomic_ref_counter_type)));
// Allocate GPU memory for set mutex
CUDA_CHECK(hipMalloc((void**)&set_mutex_, sizeof(mutex) * capacity_in_set_));
// Initialize the cache, set all entry to unused <K,V>
hipLaunchKernelGGL(( init_cache), dim3(((num_slot_-1)/BLOCK_SIZE_)+1), dim3(BLOCK_SIZE_), 0, 0, keys_, slot_counter_, global_counter_, num_slot_, empty_key, set_mutex_, capacity_in_set_);
// Wait for initialization to finish
CUDA_CHECK(hipStreamSynchronize(0));
CUDA_CHECK(hipGetLastError());
}
#else
template<typename key_type,
typename ref_counter_type,
key_type empty_key,
int set_associativity,
int warp_size,
typename set_hasher,
typename slab_hasher>
gpu_cache<key_type, ref_counter_type, empty_key, set_associativity, warp_size, set_hasher, slab_hasher>::gpu_cache(const size_t capacity_in_set,
const size_t embedding_vec_size)
:capacity_in_set_(capacity_in_set),
embedding_vec_size_(embedding_vec_size){
// Check parameter
if(capacity_in_set_ == 0){
printf("Error: Invalid value for capacity_in_set.\n");
return;
}
if(embedding_vec_size_ == 0){
printf("Error: Invalid value for embedding_vec_size.\n");
return;
}
if(set_associativity <= 0){
printf("Error: Invalid value for set_associativity.\n");
return;
}
if(warp_size != 1 && warp_size != 2 && warp_size != 4 && warp_size != 8 && warp_size != 16 && warp_size != 32){
printf("Error: Invalid value for warp_size.\n");
return;
}
// Get the current CUDA dev
CUDA_CHECK(hipGetDevice( &dev_ ));
// Calculate # of slot
num_slot_ = capacity_in_set_ * set_associativity * warp_size;
// Allocate GPU memory for cache
CUDA_CHECK(hipMalloc((void**)&keys_, sizeof(slabset) * capacity_in_set_));
CUDA_CHECK(hipMalloc((void**)&vals_, sizeof(float) * embedding_vec_size_ * num_slot_));
CUDA_CHECK(hipMalloc((void**)&slot_counter_, sizeof(ref_counter_type) * num_slot_));
CUDA_CHECK(hipMalloc((void**)&global_counter_, sizeof(ref_counter_type)));
// Allocate GPU memory for set mutex
CUDA_CHECK(hipMalloc((void**)&set_mutex_, sizeof(int) * capacity_in_set_));
// Initialize the cache, set all entry to unused <K,V>
hipLaunchKernelGGL(( init_cache), dim3(((num_slot_-1)/BLOCK_SIZE_)+1), dim3(BLOCK_SIZE_), 0, 0, keys_, slot_counter_, global_counter_, num_slot_, empty_key, set_mutex_, capacity_in_set_);
// Wait for initialization to finish
CUDA_CHECK(hipStreamSynchronize(0));
CUDA_CHECK(hipGetLastError());
}
#endif
#ifdef LIBCUDACXX_VERSION
template<typename key_type,
typename ref_counter_type,
key_type empty_key,
int set_associativity,
int warp_size,
typename set_hasher,
typename slab_hasher>
gpu_cache<key_type, ref_counter_type, empty_key, set_associativity, warp_size, set_hasher, slab_hasher>::~gpu_cache(){
// Device Restorer
nv::CudaDeviceRestorer dev_restorer;
// Set device
CUDA_CHECK(hipSetDevice(dev_));
// Destruct CUDA std object
hipLaunchKernelGGL(( destruct_kernel), dim3(((capacity_in_set_-1)/BLOCK_SIZE_)+1), dim3(BLOCK_SIZE_), 0, 0, global_counter_, set_mutex_, capacity_in_set_);
// Wait for destruction to finish
CUDA_CHECK(hipStreamSynchronize(0));
// Free GPU memory for cache
CUDA_CHECK(hipFree( keys_ ));
CUDA_CHECK(hipFree( vals_ ));
CUDA_CHECK(hipFree( slot_counter_ ));
CUDA_CHECK(hipFree( global_counter_ ));
// Free GPU memory for set mutex
CUDA_CHECK(hipFree( set_mutex_ ));
}
#else
template<typename key_type,
typename ref_counter_type,
key_type empty_key,
int set_associativity,
int warp_size,
typename set_hasher,
typename slab_hasher>
gpu_cache<key_type, ref_counter_type, empty_key, set_associativity, warp_size, set_hasher, slab_hasher>::~gpu_cache() noexcept(false) {
// Device Restorer
nv::CudaDeviceRestorer dev_restorer;
// Set device
CUDA_CHECK(hipSetDevice(dev_));
// Free GPU memory for cache
CUDA_CHECK(hipFree( keys_ ));
CUDA_CHECK(hipFree( vals_ ));
CUDA_CHECK(hipFree( slot_counter_ ));
CUDA_CHECK(hipFree( global_counter_ ));
// Free GPU memory for set mutex
CUDA_CHECK(hipFree( set_mutex_ ));
}
#endif
#ifdef LIBCUDACXX_VERSION
template<typename key_type,
typename ref_counter_type,
key_type empty_key,
int set_associativity,
int warp_size,
typename set_hasher,
typename slab_hasher>
void gpu_cache<key_type, ref_counter_type, empty_key, set_associativity, warp_size, set_hasher, slab_hasher>::
Query(const key_type* d_keys,
const size_t len,
float* d_values,
uint64_t* d_missing_index,
key_type* d_missing_keys,
size_t* d_missing_len,
hipStream_t stream){
// Device Restorer
nv::CudaDeviceRestorer dev_restorer;
// Set to the device of this cache
CUDA_CHECK(hipSetDevice(dev_));
// Check if it is a valid query
if(len == 0){
// Set the d_missing_len to 0 before return
CUDA_CHECK(hipMemsetAsync(d_missing_len, 0, sizeof(size_t), stream));
return;
}
// Update the global counter as user perform a new(most recent) read operation to the cache
// Resolve distance overflow issue as well.
hipLaunchKernelGGL(( update_kernel_overflow_ignore<atomic_ref_counter_type>), dim3(1), dim3(1), 0, stream, global_counter_, d_missing_len);
// Read from the cache
// Touch and refresh the hitting slot
hipLaunchKernelGGL(( get_kernel<key_type, ref_counter_type, atomic_ref_counter_type, slabset, set_hasher, slab_hasher, mutex, empty_key, set_associativity, warp_size>)
, dim3(((len-1)/BLOCK_SIZE_)+1), dim3(BLOCK_SIZE_), 0, stream, d_keys, len, d_values, embedding_vec_size_, d_missing_index,
d_missing_keys, d_missing_len, global_counter_,
slot_counter_, capacity_in_set_, keys_, vals_, set_mutex_);
// Check for GPU error before return
CUDA_CHECK(hipGetLastError());
}
#else
template<typename key_type,
typename ref_counter_type,
key_type empty_key,
int set_associativity,
int warp_size,
typename set_hasher,
typename slab_hasher>
void gpu_cache<key_type, ref_counter_type, empty_key, set_associativity, warp_size, set_hasher, slab_hasher>::
Query(const key_type* d_keys,
const size_t len,
float* d_values,
uint64_t* d_missing_index,
key_type* d_missing_keys,
size_t* d_missing_len,
hipStream_t stream){
// Device Restorer
nv::CudaDeviceRestorer dev_restorer;
// Set to the device of this cache
CUDA_CHECK(hipSetDevice(dev_));
// Check if it is a valid query
if(len == 0){
// Set the d_missing_len to 0 before return
CUDA_CHECK(hipMemsetAsync(d_missing_len, 0, sizeof(size_t), stream));
return;
}
// Update the global counter as user perform a new(most recent) read operation to the cache
// Resolve distance overflow issue as well.
hipLaunchKernelGGL(( update_kernel_overflow_ignore<ref_counter_type>), dim3(1), dim3(1), 0, stream, global_counter_, d_missing_len);
// Read from the cache
// Touch and refresh the hitting slot
hipLaunchKernelGGL(( get_kernel<key_type, ref_counter_type, slabset, set_hasher, slab_hasher, empty_key, set_associativity, warp_size>)
, dim3(((len-1)/BLOCK_SIZE_)+1), dim3(BLOCK_SIZE_), 0, stream, d_keys, len, d_values, embedding_vec_size_, d_missing_index,
d_missing_keys, d_missing_len, global_counter_,
slot_counter_, capacity_in_set_, keys_, vals_, set_mutex_);
// Check for GPU error before return
CUDA_CHECK(hipGetLastError());
}
#endif
#ifdef LIBCUDACXX_VERSION
template<typename key_type,
typename ref_counter_type,
key_type empty_key,
int set_associativity,
int warp_size,
typename set_hasher,
typename slab_hasher>
void gpu_cache<key_type, ref_counter_type, empty_key, set_associativity, warp_size, set_hasher, slab_hasher>::
Replace(const key_type* d_keys,
const size_t len,
const float* d_values,
hipStream_t stream){
// Check if it is a valid replacement
if(len == 0){
return;
}
// Device Restorer
nv::CudaDeviceRestorer dev_restorer;
// Set to the device of this cache
CUDA_CHECK(hipSetDevice(dev_));
// Try to insert the <k,v> paris into the cache as long as there are unused slot
// Then replace the <k,v> pairs into the cache
hipLaunchKernelGGL(( insert_replace_kernel<key_type, slabset, ref_counter_type, mutex, atomic_ref_counter_type, set_hasher, slab_hasher, empty_key, set_associativity, warp_size>)
, dim3(((len-1)/BLOCK_SIZE_)+1), dim3(BLOCK_SIZE_), 0, stream,
d_keys, d_values, embedding_vec_size_, len, keys_, vals_, slot_counter_, set_mutex_, global_counter_, capacity_in_set_);
// Check for GPU error before return
CUDA_CHECK(hipGetLastError());
}
#else
template<typename key_type,
typename ref_counter_type,
key_type empty_key,
int set_associativity,
int warp_size,
typename set_hasher,
typename slab_hasher>
void gpu_cache<key_type, ref_counter_type, empty_key, set_associativity, warp_size, set_hasher, slab_hasher>::
Replace(const key_type* d_keys,
const size_t len,
const float* d_values,
hipStream_t stream){
// Check if it is a valid replacement
if(len == 0){
return;
}
// Device Restorer
nv::CudaDeviceRestorer dev_restorer;
// Set to the device of this cache
CUDA_CHECK(hipSetDevice(dev_));
// Try to insert the <k,v> paris into the cache as long as there are unused slot
// Then replace the <k,v> pairs into the cache
hipLaunchKernelGGL(( insert_replace_kernel<key_type, slabset, ref_counter_type, set_hasher, slab_hasher, empty_key, set_associativity, warp_size>)
, dim3(((len-1)/BLOCK_SIZE_)+1), dim3(BLOCK_SIZE_), 0, stream,
d_keys, d_values, embedding_vec_size_, len, keys_, vals_, slot_counter_, set_mutex_, global_counter_, capacity_in_set_);
// Check for GPU error before return
CUDA_CHECK(hipGetLastError());
}
#endif
template class gpu_cache<unsigned int, uint64_t, std::numeric_limits<unsigned int>::max(), SET_ASSOCIATIVITY, SLAB_SIZE>;
template class gpu_cache<long long, uint64_t, std::numeric_limits<long long>::max(), SET_ASSOCIATIVITY, SLAB_SIZE>;
} // namespace gpu_cache
| 716d1f09c8fea0063369e6c41cd7cbf38bf6806c.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <nv_gpu_cache.hpp>
#include <cooperative_groups.h>
namespace cg = cooperative_groups;
// Overload CUDA atomic for other 64bit unsinged/signed integer type
__forceinline__
__device__ long atomicAdd(long* address, long val)
{
return (long)atomicAdd((unsigned long long*)address, (unsigned long long)val);
}
__forceinline__
__device__ long long atomicAdd(long long* address, long long val)
{
return (long long)atomicAdd((unsigned long long*)address, (unsigned long long)val);
}
__forceinline__
__device__ unsigned long atomicAdd(unsigned long* address, unsigned long val)
{
return (unsigned long)atomicAdd((unsigned long long*)address, (unsigned long long)val);
}
namespace gpu_cache {
#ifdef LIBCUDACXX_VERSION
template<int warp_size>
__forceinline__
__device__ void warp_tile_copy(const size_t lane_idx, const size_t emb_vec_size_in_float, float* d_dst, const float* d_src){
#pragma unroll
for(size_t i = lane_idx; i < emb_vec_size_in_float; i += warp_size){
d_dst[i] = d_src[i];
}
}
#else
template<int warp_size>
__forceinline__
__device__ void warp_tile_copy(const size_t lane_idx, const size_t emb_vec_size_in_float, volatile float* d_dst, volatile float* d_src){
#pragma unroll
for(size_t i = lane_idx; i < emb_vec_size_in_float; i += warp_size){
d_dst[i] = d_src[i];
}
}
#endif
#ifdef LIBCUDACXX_VERSION
// Will be called by multiple thread_block_tile((sub-)warp) on the same mutex
// Expect only one thread_block_tile return to execute critical section at any time
template<typename mutex, int warp_size>
__forceinline__
__device__ void warp_lock_mutex(const cg::thread_block_tile<warp_size>& warp_tile, mutex& set_mutex){
// The first thread of this (sub-)warp to acquire the lock
if(warp_tile.thread_rank() == 0){
set_mutex.acquire();
}
warp_tile.sync(); // Synchronize the threads in the (sub-)warp. Execution barrier + memory fence
}
// The (sub-)warp holding the mutex will unlock the mutex after finishing the critical section on a set
// Expect any following (sub-)warp that acquire the mutex can see its modification done in the critical section
template<typename mutex, int warp_size>
__forceinline__
__device__ void warp_unlock_mutex(const cg::thread_block_tile<warp_size>& warp_tile, mutex& set_mutex){
warp_tile.sync(); // Synchronize the threads in the (sub-)warp. Execution barrier + memory fence
// The first thread of this (sub-)warp to release the lock
if(warp_tile.thread_rank() == 0){
set_mutex.release();
}
}
#else
// Will be called by multiple thread_block_tile((sub-)warp) on the same mutex
// Expect only one thread_block_tile return to execute critical section at any time
template<int warp_size>
__forceinline__
__device__ void warp_lock_mutex(const cg::thread_block_tile<warp_size>& warp_tile, volatile int& set_mutex){
// The first thread of this (sub-)warp to acquire the lock
if(warp_tile.thread_rank() == 0){
while (0 == atomicCAS((int*)&set_mutex, 1, 0))
;
}
__threadfence();
warp_tile.sync(); // Synchronize the threads in the (sub-)warp. Execution barrier + memory fence
}
// The (sub-)warp holding the mutex will unlock the mutex after finishing the critical section on a set
// Expect any following (sub-)warp that acquire the mutex can see its modification done in the critical section
template<int warp_size>
__forceinline__
__device__ void warp_unlock_mutex(const cg::thread_block_tile<warp_size>& warp_tile, volatile int& set_mutex){
__threadfence();
warp_tile.sync(); // Synchronize the threads in the (sub-)warp. Execution barrier + memory fence
// The first thread of this (sub-)warp to release the lock
if(warp_tile.thread_rank() == 0){
atomicExch((int*)&set_mutex, 1);
}
}
#endif
// The (sub-)warp doing all reduction to find the slot with min slot_counter
// The slot with min slot_counter is the LR slot.
template<typename ref_counter_type, int warp_size>
__forceinline__
__device__ void warp_min_reduction(const cg::thread_block_tile<warp_size>& warp_tile,
ref_counter_type& min_slot_counter_val,
size_t& slab_distance,
size_t& slot_distance){
const size_t lane_idx = warp_tile.thread_rank();
slot_distance = lane_idx;
for(size_t i = (warp_tile.size() >> 1); i > 0 ; i = i >> 1){
ref_counter_type input_slot_counter_val = warp_tile.shfl_xor(min_slot_counter_val, (int)i);
size_t input_slab_distance = warp_tile.shfl_xor(slab_distance, (int)i);
size_t input_slot_distance = warp_tile.shfl_xor(slot_distance, (int)i);
if(input_slot_counter_val == min_slot_counter_val){
if(input_slab_distance == slab_distance){
if(input_slot_distance < slot_distance){
slot_distance = input_slot_distance;
}
}
else if(input_slab_distance < slab_distance){
slab_distance = input_slab_distance;
slot_distance = input_slot_distance;
}
}
else if(input_slot_counter_val < min_slot_counter_val){
min_slot_counter_val = input_slot_counter_val;
slab_distance = input_slab_distance;
slot_distance = input_slot_distance;
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef LIBCUDACXX_VERSION
// Kernel to initialize the GPU cache
// Init every entry of the cache with <unused_key, value> pair
template<typename slabset, typename ref_counter_type, typename atomic_ref_counter_type, typename key_type, typename mutex>
__global__ void init_cache(slabset* keys,
ref_counter_type* slot_counter,
atomic_ref_counter_type* global_counter,
const size_t num_slot,
const key_type empty_key,
mutex* set_mutex,
const size_t capacity_in_set)
{
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx < num_slot )
{
// Set the key of this slot to unused key
// Flatten the cache
key_type * key_slot = (key_type *)keys;
key_slot[idx] = empty_key;
// Clear the counter for this slot
slot_counter[idx] = 0;
}
// First CUDA thread clear the global counter
if( idx == 0 ){
new(global_counter) atomic_ref_counter_type(0);
}
// First capacity_in_set CUDA thread initialize mutex
if( idx < capacity_in_set ){
new(set_mutex + idx) mutex(1);
}
}
template<typename atomic_ref_counter_type, typename mutex>
__global__ void destruct_kernel(atomic_ref_counter_type* global_counter, mutex* set_mutex, const size_t capacity_in_set)
{
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
// First CUDA thread destruct the global_counter
if( idx == 0 ){
global_counter -> ~atomic_ref_counter_type();
}
// First capacity_in_set CUDA thread destruct the set mutex
if( idx < capacity_in_set ){
(set_mutex + idx) -> ~mutex();
}
}
#else
// Kernel to initialize the GPU cache
// Init every entry of the cache with <unused_key, value> pair
template<typename slabset, typename ref_counter_type, typename key_type>
__global__ void init_cache(slabset* keys,
ref_counter_type* slot_counter,
ref_counter_type* global_counter,
const size_t num_slot,
const key_type empty_key,
int* set_mutex,
const size_t capacity_in_set)
{
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx < num_slot )
{
// Set the key of this slot to unused key
// Flatten the cache
key_type * key_slot = (key_type *)keys;
key_slot[idx] = empty_key;
// Clear the counter for this slot
slot_counter[idx] = 0;
}
// First CUDA thread clear the global counter
if( idx == 0 ){
global_counter[idx] = 0;
}
// First capacity_in_set CUDA thread initialize mutex
if( idx < capacity_in_set ){
set_mutex[idx] = 1;
}
}
#endif
// Kernel to update global counter
// Resolve distance overflow issue as well
#ifdef LIBCUDACXX_VERSION
template<typename atomic_ref_counter_type>
__global__ void update_kernel_overflow_ignore(atomic_ref_counter_type* global_counter,
size_t* d_missing_len){
// Update global counter
global_counter -> fetch_add(1, cuda::std::memory_order_relaxed);
*d_missing_len = 0;
}
#else
template<typename ref_counter_type>
__global__ void update_kernel_overflow_ignore(ref_counter_type* global_counter,
size_t* d_missing_len){
// Update global counter
atomicAdd(global_counter, 1);
*d_missing_len = 0;
}
#endif
#ifdef LIBCUDACXX_VERSION
// Kernel to read from cache
// Also update locality information for touched slot
template<typename key_type,
typename ref_counter_type,
typename atomic_ref_counter_type,
typename slabset,
typename set_hasher,
typename slab_hasher,
typename mutex,
key_type empty_key,
int set_associativity,
int warp_size>
__global__ void get_kernel(const key_type* d_keys,
const size_t len,
float* d_values,
const size_t embedding_vec_size,
uint64_t* d_missing_index,
key_type* d_missing_keys,
size_t* d_missing_len,
const atomic_ref_counter_type* global_counter,
ref_counter_type* slot_counter,
const size_t capacity_in_set,
const slabset* keys,
const float* vals,
mutex* set_mutex){
// Lane(thread) global ID
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
// Lane(thread) ID within a warp_tile
cg::thread_block_tile<warp_size> warp_tile = cg::tiled_partition<warp_size>(cg::this_thread_block());
const size_t lane_idx = warp_tile.thread_rank();
// The assigned key for this lane(thread)
key_type key;
// The dst slabset and the dst slab inside this set
size_t src_set;
size_t src_slab;
// The variable that contains the missing key
key_type missing_key;
// The variable that contains the index for the missing key
uint64_t missing_index;
// The counter for counting the missing key in this warp
uint8_t warp_missing_counter = 0;
// Active flag: whether current lane(thread) has unfinished task
bool active = false;
if( idx < len ){
active = true;
key = d_keys[idx];
src_set = set_hasher::hash(key) % capacity_in_set;
src_slab = slab_hasher::hash(key) % set_associativity;
}
// Lane participate in warp_tile ballot to produce warp-level work queue
unsigned active_mask = warp_tile.ballot(active);
// The warp-level outer loop: finish all the tasks within the work queue
while(active_mask != 0){
// Next task in the work quere, start from lower index lane(thread)
int next_lane = __ffs(active_mask) - 1;
// Broadcast the task and the global index to all lane in the warp_tile
key_type next_key = warp_tile.shfl(key, next_lane);
size_t next_idx = warp_tile.shfl(idx, next_lane);
size_t next_set = warp_tile.shfl(src_set, next_lane);
size_t next_slab = warp_tile.shfl(src_slab, next_lane);
// Counter to record how many slab have been searched
size_t counter = 0;
// Working queue before task started
const unsigned old_active_mask = active_mask;
// Lock the slabset before operating the slabset
warp_lock_mutex<mutex, warp_size>(warp_tile, set_mutex[next_set]);
// The warp-level inner loop: finish a single task in the work queue
while(active_mask == old_active_mask){
// When all the slabs inside a slabset have been searched, mark missing task, task is completed
if(counter >= set_associativity){
if(lane_idx == warp_missing_counter){
missing_key = next_key;
missing_index = next_idx;
}
if(lane_idx == (size_t)next_lane){
active = false;
}
warp_missing_counter++;
active_mask = warp_tile.ballot(active);
break;
}
// The warp_tile read out the slab
key_type read_key = keys[next_set].set_[next_slab].slab_[lane_idx];
// Compare the slab data with the target key
int found_lane = __ffs(warp_tile.ballot(read_key == next_key)) - 1;
// If found, mark hit task, copy the founded data, the task is completed
if(found_lane >= 0){
size_t found_offset = (next_set * set_associativity + next_slab) * warp_size + found_lane;
if(lane_idx == (size_t)next_lane){
slot_counter[found_offset] = global_counter -> load(cuda::std::memory_order_relaxed);
active = false;
}
warp_tile_copy<warp_size>(lane_idx, embedding_vec_size, d_values + next_idx * embedding_vec_size, vals + found_offset * embedding_vec_size);
active_mask = warp_tile.ballot(active);
break;
}
// Compare the slab data with empty key, if found empty key, mark missing task, task is completed
if(warp_tile.ballot(read_key == empty_key) != 0){
if(lane_idx == warp_missing_counter){
missing_key = next_key;
missing_index = next_idx;
}
if(lane_idx == (size_t)next_lane){
active = false;
}
warp_missing_counter++;
active_mask = warp_tile.ballot(active);
break;
}
// Not found in this slab, the task is not completed, goto searching next slab
counter++;
next_slab = (next_slab + 1) % set_associativity;
}
// Unlock the slabset after operating the slabset
warp_unlock_mutex<mutex, warp_size>(warp_tile, set_mutex[next_set]);
}
// After warp_tile complete the working queue, save the result for output
// First thread of the warp_tile accumulate the missing length to global variable
size_t warp_position;
if(lane_idx == 0){
warp_position = atomicAdd(d_missing_len, (size_t)warp_missing_counter);
}
warp_position = warp_tile.shfl(warp_position, 0);
if(lane_idx < warp_missing_counter){
d_missing_keys[warp_position + lane_idx] = missing_key;
d_missing_index[warp_position + lane_idx] = missing_index;
}
}
#else
// Kernel to read from cache
// Also update locality information for touched slot
template<typename key_type,
typename ref_counter_type,
typename slabset,
typename set_hasher,
typename slab_hasher,
key_type empty_key,
int set_associativity,
int warp_size>
__global__ void get_kernel(const key_type* d_keys,
const size_t len,
float* d_values,
const size_t embedding_vec_size,
uint64_t* d_missing_index,
key_type* d_missing_keys,
size_t* d_missing_len,
ref_counter_type* global_counter,
volatile ref_counter_type* slot_counter,
const size_t capacity_in_set,
volatile slabset* keys,
volatile float* vals,
volatile int* set_mutex){
// Lane(thread) global ID
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
// Lane(thread) ID within a warp_tile
cg::thread_block_tile<warp_size> warp_tile = cg::tiled_partition<warp_size>(cg::this_thread_block());
const size_t lane_idx = warp_tile.thread_rank();
// The assigned key for this lane(thread)
key_type key;
// The dst slabset and the dst slab inside this set
size_t src_set;
size_t src_slab;
// The variable that contains the missing key
key_type missing_key;
// The variable that contains the index for the missing key
uint64_t missing_index;
// The counter for counting the missing key in this warp
uint8_t warp_missing_counter = 0;
// Active flag: whether current lane(thread) has unfinished task
bool active = false;
if( idx < len ){
active = true;
key = d_keys[idx];
src_set = set_hasher::hash(key) % capacity_in_set;
src_slab = slab_hasher::hash(key) % set_associativity;
}
// Lane participate in warp_tile ballot to produce warp-level work queue
unsigned active_mask = warp_tile.ballot(active);
// The warp-level outer loop: finish all the tasks within the work queue
while(active_mask != 0){
// Next task in the work quere, start from lower index lane(thread)
int next_lane = __ffs(active_mask) - 1;
// Broadcast the task and the global index to all lane in the warp_tile
key_type next_key = warp_tile.shfl(key, next_lane);
size_t next_idx = warp_tile.shfl(idx, next_lane);
size_t next_set = warp_tile.shfl(src_set, next_lane);
size_t next_slab = warp_tile.shfl(src_slab, next_lane);
// Counter to record how many slab have been searched
size_t counter = 0;
// Working queue before task started
const unsigned old_active_mask = active_mask;
// Lock the slabset before operating the slabset
warp_lock_mutex<warp_size>(warp_tile, set_mutex[next_set]);
// The warp-level inner loop: finish a single task in the work queue
while(active_mask == old_active_mask){
// When all the slabs inside a slabset have been searched, mark missing task, task is completed
if(counter >= set_associativity){
if(lane_idx == warp_missing_counter){
missing_key = next_key;
missing_index = next_idx;
}
if(lane_idx == (size_t)next_lane){
active = false;
}
warp_missing_counter++;
active_mask = warp_tile.ballot(active);
break;
}
// The warp_tile read out the slab
key_type read_key = ((volatile key_type*)(keys[next_set].set_[next_slab].slab_))[lane_idx];
// Compare the slab data with the target key
int found_lane = __ffs(warp_tile.ballot(read_key == next_key)) - 1;
// If found, mark hit task, copy the founded data, the task is completed
if(found_lane >= 0){
size_t found_offset = (next_set * set_associativity + next_slab) * warp_size + found_lane;
if(lane_idx == (size_t)next_lane){
slot_counter[found_offset] = atomicAdd(global_counter, 0);
active = false;
}
warp_tile_copy<warp_size>(lane_idx, embedding_vec_size, (volatile float*)(d_values + next_idx * embedding_vec_size), (volatile float*)(vals + found_offset * embedding_vec_size));
active_mask = warp_tile.ballot(active);
break;
}
// Compare the slab data with empty key, if found empty key, mark missing task, task is completed
if(warp_tile.ballot(read_key == empty_key) != 0){
if(lane_idx == warp_missing_counter){
missing_key = next_key;
missing_index = next_idx;
}
if(lane_idx == (size_t)next_lane){
active = false;
}
warp_missing_counter++;
active_mask = warp_tile.ballot(active);
break;
}
// Not found in this slab, the task is not completed, goto searching next slab
counter++;
next_slab = (next_slab + 1) % set_associativity;
}
// Unlock the slabset after operating the slabset
warp_unlock_mutex<warp_size>(warp_tile, set_mutex[next_set]);
}
// After warp_tile complete the working queue, save the result for output
// First thread of the warp_tile accumulate the missing length to global variable
size_t warp_position;
if(lane_idx == 0){
warp_position = atomicAdd(d_missing_len, (size_t)warp_missing_counter);
}
warp_position = warp_tile.shfl(warp_position, 0);
if(lane_idx < warp_missing_counter){
d_missing_keys[warp_position + lane_idx] = missing_key;
d_missing_index[warp_position + lane_idx] = missing_index;
}
}
#endif
#ifdef LIBCUDACXX_VERSION
// Kernel to insert or replace the <k,v> pairs into the cache
template<typename key_type,
typename slabset,
typename ref_counter_type,
typename mutex,
typename atomic_ref_counter_type,
typename set_hasher,
typename slab_hasher,
key_type empty_key,
int set_associativity,
int warp_size,
ref_counter_type max_ref_counter_type = std::numeric_limits<ref_counter_type>::max(),
size_t max_slab_distance = std::numeric_limits<size_t>::max()>
__global__ void insert_replace_kernel(const key_type* d_keys,
const float* d_values,
const size_t embedding_vec_size,
const size_t len,
slabset* keys,
float* vals,
ref_counter_type* slot_counter,
mutex* set_mutex,
const atomic_ref_counter_type* global_counter,
const size_t capacity_in_set){
// Lane(thread) global ID
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
// Lane(thread) ID within a warp_tile
cg::thread_block_tile<warp_size> warp_tile = cg::tiled_partition<warp_size>(cg::this_thread_block());
const size_t lane_idx = warp_tile.thread_rank();
// The assigned key for this lane(thread)
key_type key;
// The dst slabset and the dst slab inside this set
size_t src_set;
size_t src_slab;
// Active flag: whether current lane(thread) has unfinished task
bool active = false;
if( idx < len ){
active = true;
key = d_keys[idx];
src_set = set_hasher::hash(key) % capacity_in_set;
src_slab = slab_hasher::hash(key) % set_associativity;
}
// Lane participate in warp_tile ballot to produce warp-level work queue
unsigned active_mask = warp_tile.ballot(active);
// The warp-level outer loop: finish all the tasks within the work queue
while(active_mask != 0){
// Next task in the work quere, start from lower index lane(thread)
int next_lane = __ffs(active_mask) - 1;
// Broadcast the task, the global index and the src slabset and slab to all lane in a warp_tile
key_type next_key = warp_tile.shfl(key, next_lane);
size_t next_idx = warp_tile.shfl(idx, next_lane);
size_t next_set = warp_tile.shfl(src_set, next_lane);
size_t next_slab = warp_tile.shfl(src_slab, next_lane);
size_t first_slab = next_slab;
// Counter to record how many slab have been searched
size_t counter = 0;
// Variable to keep the min slot counter during the probing
ref_counter_type min_slot_counter_val = max_ref_counter_type;
// Variable to keep the slab distance for slot with min counter
size_t slab_distance = max_slab_distance;
// Variable to keep the slot distance for slot with min counter within the slab
size_t slot_distance;
// Working queue before task started
const unsigned old_active_mask = active_mask;
// Lock the slabset before operating the slabset
warp_lock_mutex<mutex, warp_size>(warp_tile, set_mutex[next_set]);
// The warp-level inner loop: finish a single task in the work queue
while(active_mask == old_active_mask){
// When all the slabs inside a slabset have been searched
// and no empty slots or target slots are found. Replace with LRU
if(counter >= set_associativity){
// (sub)Warp all-reduction, the reduction result store in all threads
warp_min_reduction<ref_counter_type, warp_size>(warp_tile, min_slot_counter_val, slab_distance, slot_distance);
// Calculate the position of LR slot
size_t target_slab = (first_slab + slab_distance) % set_associativity;
size_t slot_index = (next_set * set_associativity + target_slab) * warp_size + slot_distance;
// Replace the LR slot
if(lane_idx == (size_t)next_lane){
keys[next_set].set_[target_slab].slab_[slot_distance] = key;
slot_counter[slot_index] = global_counter -> load(cuda::std::memory_order_relaxed);
}
warp_tile_copy<warp_size>(lane_idx, embedding_vec_size, vals + slot_index * embedding_vec_size, d_values + next_idx * embedding_vec_size);
// Replace complete, mark this task completed
if(lane_idx == (size_t)next_lane){
active = false;
}
active_mask = warp_tile.ballot(active);
break;
}
// The warp_tile read out the slab
key_type read_key = keys[next_set].set_[next_slab].slab_[lane_idx];
// Compare the slab data with the target key
int found_lane = __ffs(warp_tile.ballot(read_key == next_key)) - 1;
// If found target key, the insertion/replace is no longer needed.
// Refresh the slot, the task is completed
if(found_lane >= 0){
size_t found_offset = (next_set * set_associativity + next_slab) * warp_size + found_lane;
if(lane_idx == (size_t)next_lane){
slot_counter[found_offset] = global_counter -> load(cuda::std::memory_order_relaxed);
active = false;
}
active_mask = warp_tile.ballot(active);
break;
}
// Compare the slab data with empty key.
// If found empty key, do insertion,the task is complete
found_lane = __ffs(warp_tile.ballot(read_key == empty_key)) - 1;
if(found_lane >= 0){
size_t found_offset = (next_set * set_associativity + next_slab) * warp_size + found_lane;
if(lane_idx == (size_t)next_lane){
keys[next_set].set_[next_slab].slab_[found_lane] = key;
slot_counter[found_offset] = global_counter -> load(cuda::std::memory_order_relaxed);
}
warp_tile_copy<warp_size>(lane_idx, embedding_vec_size, vals + found_offset * embedding_vec_size, d_values + next_idx * embedding_vec_size);
if(lane_idx == (size_t)next_lane){
active = false;
}
active_mask = warp_tile.ballot(active);
break;
}
// If no target or unused slot found in this slab,
// Refresh LR info, continue probing
ref_counter_type read_slot_counter = slot_counter[(next_set * set_associativity + next_slab) * warp_size + lane_idx];
if(read_slot_counter < min_slot_counter_val){
min_slot_counter_val = read_slot_counter;
slab_distance = counter;
}
counter++;
next_slab = (next_slab + 1) % set_associativity;
}
// Unlock the slabset after operating the slabset
warp_unlock_mutex<mutex, warp_size>(warp_tile, set_mutex[next_set]);
}
}
#else
// Kernel to insert or replace the <k,v> pairs into the cache
template<typename key_type,
typename slabset,
typename ref_counter_type,
typename set_hasher,
typename slab_hasher,
key_type empty_key,
int set_associativity,
int warp_size,
ref_counter_type max_ref_counter_type = std::numeric_limits<ref_counter_type>::max(),
size_t max_slab_distance = std::numeric_limits<size_t>::max()>
__global__ void insert_replace_kernel(const key_type* d_keys,
const float* d_values,
const size_t embedding_vec_size,
const size_t len,
volatile slabset* keys,
volatile float* vals,
volatile ref_counter_type* slot_counter,
volatile int* set_mutex,
ref_counter_type* global_counter,
const size_t capacity_in_set){
// Lane(thread) global ID
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
// Lane(thread) ID within a warp_tile
cg::thread_block_tile<warp_size> warp_tile = cg::tiled_partition<warp_size>(cg::this_thread_block());
const size_t lane_idx = warp_tile.thread_rank();
// The assigned key for this lane(thread)
key_type key;
// The dst slabset and the dst slab inside this set
size_t src_set;
size_t src_slab;
// Active flag: whether current lane(thread) has unfinished task
bool active = false;
if( idx < len ){
active = true;
key = d_keys[idx];
src_set = set_hasher::hash(key) % capacity_in_set;
src_slab = slab_hasher::hash(key) % set_associativity;
}
// Lane participate in warp_tile ballot to produce warp-level work queue
unsigned active_mask = warp_tile.ballot(active);
// The warp-level outer loop: finish all the tasks within the work queue
while(active_mask != 0){
// Next task in the work quere, start from lower index lane(thread)
int next_lane = __ffs(active_mask) - 1;
// Broadcast the task, the global index and the src slabset and slab to all lane in a warp_tile
key_type next_key = warp_tile.shfl(key, next_lane);
size_t next_idx = warp_tile.shfl(idx, next_lane);
size_t next_set = warp_tile.shfl(src_set, next_lane);
size_t next_slab = warp_tile.shfl(src_slab, next_lane);
size_t first_slab = next_slab;
// Counter to record how many slab have been searched
size_t counter = 0;
// Variable to keep the min slot counter during the probing
ref_counter_type min_slot_counter_val = max_ref_counter_type;
// Variable to keep the slab distance for slot with min counter
size_t slab_distance = max_slab_distance;
// Variable to keep the slot distance for slot with min counter within the slab
size_t slot_distance;
// Working queue before task started
const unsigned old_active_mask = active_mask;
// Lock the slabset before operating the slabset
warp_lock_mutex<warp_size>(warp_tile, set_mutex[next_set]);
// The warp-level inner loop: finish a single task in the work queue
while(active_mask == old_active_mask){
// When all the slabs inside a slabset have been searched
// and no empty slots or target slots are found. Replace with LRU
if(counter >= set_associativity){
// (sub)Warp all-reduction, the reduction result store in all threads
warp_min_reduction<ref_counter_type, warp_size>(warp_tile, min_slot_counter_val, slab_distance, slot_distance);
// Calculate the position of LR slot
size_t target_slab = (first_slab + slab_distance) % set_associativity;
size_t slot_index = (next_set * set_associativity + target_slab) * warp_size + slot_distance;
// Replace the LR slot
if(lane_idx == (size_t)next_lane){
((volatile key_type*)(keys[next_set].set_[target_slab].slab_))[slot_distance] = key;
slot_counter[slot_index] = atomicAdd(global_counter, 0);
}
warp_tile_copy<warp_size>(lane_idx, embedding_vec_size, (volatile float*)(vals + slot_index * embedding_vec_size), (volatile float*)(d_values + next_idx * embedding_vec_size));
// Replace complete, mark this task completed
if(lane_idx == (size_t)next_lane){
active = false;
}
active_mask = warp_tile.ballot(active);
break;
}
// The warp_tile read out the slab
key_type read_key = ((volatile key_type*)(keys[next_set].set_[next_slab].slab_))[lane_idx];
// Compare the slab data with the target key
int found_lane = __ffs(warp_tile.ballot(read_key == next_key)) - 1;
// If found target key, the insertion/replace is no longer needed.
// Refresh the slot, the task is completed
if(found_lane >= 0){
size_t found_offset = (next_set * set_associativity + next_slab) * warp_size + found_lane;
if(lane_idx == (size_t)next_lane){
slot_counter[found_offset] = atomicAdd(global_counter, 0);
active = false;
}
active_mask = warp_tile.ballot(active);
break;
}
// Compare the slab data with empty key.
// If found empty key, do insertion,the task is complete
found_lane = __ffs(warp_tile.ballot(read_key == empty_key)) - 1;
if(found_lane >= 0){
size_t found_offset = (next_set * set_associativity + next_slab) * warp_size + found_lane;
if(lane_idx == (size_t)next_lane){
((volatile key_type*)(keys[next_set].set_[next_slab].slab_))[found_lane] = key;
slot_counter[found_offset] = atomicAdd(global_counter, 0);
}
warp_tile_copy<warp_size>(lane_idx, embedding_vec_size, (volatile float*)(vals + found_offset * embedding_vec_size), (volatile float*)(d_values + next_idx * embedding_vec_size));
if(lane_idx == (size_t)next_lane){
active = false;
}
active_mask = warp_tile.ballot(active);
break;
}
// If no target or unused slot found in this slab,
// Refresh LR info, continue probing
ref_counter_type read_slot_counter = slot_counter[(next_set * set_associativity + next_slab) * warp_size + lane_idx];
if(read_slot_counter < min_slot_counter_val){
min_slot_counter_val = read_slot_counter;
slab_distance = counter;
}
counter++;
next_slab = (next_slab + 1) % set_associativity;
}
// Unlock the slabset after operating the slabset
warp_unlock_mutex<warp_size>(warp_tile, set_mutex[next_set]);
}
}
#endif
///////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef LIBCUDACXX_VERSION
template<typename key_type,
typename ref_counter_type,
key_type empty_key,
int set_associativity,
int warp_size,
typename set_hasher,
typename slab_hasher>
gpu_cache<key_type, ref_counter_type, empty_key, set_associativity, warp_size, set_hasher, slab_hasher>::gpu_cache(const size_t capacity_in_set,
const size_t embedding_vec_size)
:capacity_in_set_(capacity_in_set),
embedding_vec_size_(embedding_vec_size){
// Check parameter
if(capacity_in_set_ == 0){
printf("Error: Invalid value for capacity_in_set.\n");
return;
}
if(embedding_vec_size_ == 0){
printf("Error: Invalid value for embedding_vec_size.\n");
return;
}
if(set_associativity <= 0){
printf("Error: Invalid value for set_associativity.\n");
return;
}
if(warp_size != 1 && warp_size != 2 && warp_size != 4 && warp_size != 8 && warp_size != 16 && warp_size != 32){
printf("Error: Invalid value for warp_size.\n");
return;
}
// Get the current CUDA dev
CUDA_CHECK(cudaGetDevice( &dev_ ));
// Calculate # of slot
num_slot_ = capacity_in_set_ * set_associativity * warp_size;
// Allocate GPU memory for cache
CUDA_CHECK(cudaMalloc((void**)&keys_, sizeof(slabset) * capacity_in_set_));
CUDA_CHECK(cudaMalloc((void**)&vals_, sizeof(float) * embedding_vec_size_ * num_slot_));
CUDA_CHECK(cudaMalloc((void**)&slot_counter_, sizeof(ref_counter_type) * num_slot_));
CUDA_CHECK(cudaMalloc((void**)&global_counter_, sizeof(atomic_ref_counter_type)));
// Allocate GPU memory for set mutex
CUDA_CHECK(cudaMalloc((void**)&set_mutex_, sizeof(mutex) * capacity_in_set_));
// Initialize the cache, set all entry to unused <K,V>
init_cache<<<((num_slot_-1)/BLOCK_SIZE_)+1, BLOCK_SIZE_>>>(keys_, slot_counter_, global_counter_, num_slot_, empty_key, set_mutex_, capacity_in_set_);
// Wait for initialization to finish
CUDA_CHECK(cudaStreamSynchronize(0));
CUDA_CHECK(cudaGetLastError());
}
#else
template<typename key_type,
typename ref_counter_type,
key_type empty_key,
int set_associativity,
int warp_size,
typename set_hasher,
typename slab_hasher>
gpu_cache<key_type, ref_counter_type, empty_key, set_associativity, warp_size, set_hasher, slab_hasher>::gpu_cache(const size_t capacity_in_set,
const size_t embedding_vec_size)
:capacity_in_set_(capacity_in_set),
embedding_vec_size_(embedding_vec_size){
// Check parameter
if(capacity_in_set_ == 0){
printf("Error: Invalid value for capacity_in_set.\n");
return;
}
if(embedding_vec_size_ == 0){
printf("Error: Invalid value for embedding_vec_size.\n");
return;
}
if(set_associativity <= 0){
printf("Error: Invalid value for set_associativity.\n");
return;
}
if(warp_size != 1 && warp_size != 2 && warp_size != 4 && warp_size != 8 && warp_size != 16 && warp_size != 32){
printf("Error: Invalid value for warp_size.\n");
return;
}
// Get the current CUDA dev
CUDA_CHECK(cudaGetDevice( &dev_ ));
// Calculate # of slot
num_slot_ = capacity_in_set_ * set_associativity * warp_size;
// Allocate GPU memory for cache
CUDA_CHECK(cudaMalloc((void**)&keys_, sizeof(slabset) * capacity_in_set_));
CUDA_CHECK(cudaMalloc((void**)&vals_, sizeof(float) * embedding_vec_size_ * num_slot_));
CUDA_CHECK(cudaMalloc((void**)&slot_counter_, sizeof(ref_counter_type) * num_slot_));
CUDA_CHECK(cudaMalloc((void**)&global_counter_, sizeof(ref_counter_type)));
// Allocate GPU memory for set mutex
CUDA_CHECK(cudaMalloc((void**)&set_mutex_, sizeof(int) * capacity_in_set_));
// Initialize the cache, set all entry to unused <K,V>
init_cache<<<((num_slot_-1)/BLOCK_SIZE_)+1, BLOCK_SIZE_>>>(keys_, slot_counter_, global_counter_, num_slot_, empty_key, set_mutex_, capacity_in_set_);
// Wait for initialization to finish
CUDA_CHECK(cudaStreamSynchronize(0));
CUDA_CHECK(cudaGetLastError());
}
#endif
#ifdef LIBCUDACXX_VERSION
template<typename key_type,
typename ref_counter_type,
key_type empty_key,
int set_associativity,
int warp_size,
typename set_hasher,
typename slab_hasher>
gpu_cache<key_type, ref_counter_type, empty_key, set_associativity, warp_size, set_hasher, slab_hasher>::~gpu_cache(){
// Device Restorer
nv::CudaDeviceRestorer dev_restorer;
// Set device
CUDA_CHECK(cudaSetDevice(dev_));
// Destruct CUDA std object
destruct_kernel<<<((capacity_in_set_-1)/BLOCK_SIZE_)+1, BLOCK_SIZE_>>>(global_counter_, set_mutex_, capacity_in_set_);
// Wait for destruction to finish
CUDA_CHECK(cudaStreamSynchronize(0));
// Free GPU memory for cache
CUDA_CHECK(cudaFree( keys_ ));
CUDA_CHECK(cudaFree( vals_ ));
CUDA_CHECK(cudaFree( slot_counter_ ));
CUDA_CHECK(cudaFree( global_counter_ ));
// Free GPU memory for set mutex
CUDA_CHECK(cudaFree( set_mutex_ ));
}
#else
template<typename key_type,
typename ref_counter_type,
key_type empty_key,
int set_associativity,
int warp_size,
typename set_hasher,
typename slab_hasher>
gpu_cache<key_type, ref_counter_type, empty_key, set_associativity, warp_size, set_hasher, slab_hasher>::~gpu_cache() noexcept(false) {
// Device Restorer
nv::CudaDeviceRestorer dev_restorer;
// Set device
CUDA_CHECK(cudaSetDevice(dev_));
// Free GPU memory for cache
CUDA_CHECK(cudaFree( keys_ ));
CUDA_CHECK(cudaFree( vals_ ));
CUDA_CHECK(cudaFree( slot_counter_ ));
CUDA_CHECK(cudaFree( global_counter_ ));
// Free GPU memory for set mutex
CUDA_CHECK(cudaFree( set_mutex_ ));
}
#endif
#ifdef LIBCUDACXX_VERSION
template<typename key_type,
typename ref_counter_type,
key_type empty_key,
int set_associativity,
int warp_size,
typename set_hasher,
typename slab_hasher>
void gpu_cache<key_type, ref_counter_type, empty_key, set_associativity, warp_size, set_hasher, slab_hasher>::
Query(const key_type* d_keys,
const size_t len,
float* d_values,
uint64_t* d_missing_index,
key_type* d_missing_keys,
size_t* d_missing_len,
cudaStream_t stream){
// Device Restorer
nv::CudaDeviceRestorer dev_restorer;
// Set to the device of this cache
CUDA_CHECK(cudaSetDevice(dev_));
// Check if it is a valid query
if(len == 0){
// Set the d_missing_len to 0 before return
CUDA_CHECK(cudaMemsetAsync(d_missing_len, 0, sizeof(size_t), stream));
return;
}
// Update the global counter as user perform a new(most recent) read operation to the cache
// Resolve distance overflow issue as well.
update_kernel_overflow_ignore<atomic_ref_counter_type><<<1, 1, 0, stream>>>(global_counter_, d_missing_len);
// Read from the cache
// Touch and refresh the hitting slot
get_kernel<key_type, ref_counter_type, atomic_ref_counter_type, slabset, set_hasher, slab_hasher, mutex, empty_key, set_associativity, warp_size>
<<<((len-1)/BLOCK_SIZE_)+1, BLOCK_SIZE_, 0, stream>>>(d_keys, len, d_values, embedding_vec_size_, d_missing_index,
d_missing_keys, d_missing_len, global_counter_,
slot_counter_, capacity_in_set_, keys_, vals_, set_mutex_);
// Check for GPU error before return
CUDA_CHECK(cudaGetLastError());
}
#else
template<typename key_type,
typename ref_counter_type,
key_type empty_key,
int set_associativity,
int warp_size,
typename set_hasher,
typename slab_hasher>
void gpu_cache<key_type, ref_counter_type, empty_key, set_associativity, warp_size, set_hasher, slab_hasher>::
Query(const key_type* d_keys,
const size_t len,
float* d_values,
uint64_t* d_missing_index,
key_type* d_missing_keys,
size_t* d_missing_len,
cudaStream_t stream){
// Device Restorer
nv::CudaDeviceRestorer dev_restorer;
// Set to the device of this cache
CUDA_CHECK(cudaSetDevice(dev_));
// Check if it is a valid query
if(len == 0){
// Set the d_missing_len to 0 before return
CUDA_CHECK(cudaMemsetAsync(d_missing_len, 0, sizeof(size_t), stream));
return;
}
// Update the global counter as user perform a new(most recent) read operation to the cache
// Resolve distance overflow issue as well.
update_kernel_overflow_ignore<ref_counter_type><<<1, 1, 0, stream>>>(global_counter_, d_missing_len);
// Read from the cache
// Touch and refresh the hitting slot
get_kernel<key_type, ref_counter_type, slabset, set_hasher, slab_hasher, empty_key, set_associativity, warp_size>
<<<((len-1)/BLOCK_SIZE_)+1, BLOCK_SIZE_, 0, stream>>>(d_keys, len, d_values, embedding_vec_size_, d_missing_index,
d_missing_keys, d_missing_len, global_counter_,
slot_counter_, capacity_in_set_, keys_, vals_, set_mutex_);
// Check for GPU error before return
CUDA_CHECK(cudaGetLastError());
}
#endif
#ifdef LIBCUDACXX_VERSION
template<typename key_type,
typename ref_counter_type,
key_type empty_key,
int set_associativity,
int warp_size,
typename set_hasher,
typename slab_hasher>
void gpu_cache<key_type, ref_counter_type, empty_key, set_associativity, warp_size, set_hasher, slab_hasher>::
Replace(const key_type* d_keys,
const size_t len,
const float* d_values,
cudaStream_t stream){
// Check if it is a valid replacement
if(len == 0){
return;
}
// Device Restorer
nv::CudaDeviceRestorer dev_restorer;
// Set to the device of this cache
CUDA_CHECK(cudaSetDevice(dev_));
// Try to insert the <k,v> paris into the cache as long as there are unused slot
// Then replace the <k,v> pairs into the cache
insert_replace_kernel<key_type, slabset, ref_counter_type, mutex, atomic_ref_counter_type, set_hasher, slab_hasher, empty_key, set_associativity, warp_size>
<<<((len-1)/BLOCK_SIZE_)+1, BLOCK_SIZE_, 0, stream>>>
(d_keys, d_values, embedding_vec_size_, len, keys_, vals_, slot_counter_, set_mutex_, global_counter_, capacity_in_set_);
// Check for GPU error before return
CUDA_CHECK(cudaGetLastError());
}
#else
template<typename key_type,
typename ref_counter_type,
key_type empty_key,
int set_associativity,
int warp_size,
typename set_hasher,
typename slab_hasher>
void gpu_cache<key_type, ref_counter_type, empty_key, set_associativity, warp_size, set_hasher, slab_hasher>::
Replace(const key_type* d_keys,
const size_t len,
const float* d_values,
cudaStream_t stream){
// Check if it is a valid replacement
if(len == 0){
return;
}
// Device Restorer
nv::CudaDeviceRestorer dev_restorer;
// Set to the device of this cache
CUDA_CHECK(cudaSetDevice(dev_));
// Try to insert the <k,v> paris into the cache as long as there are unused slot
// Then replace the <k,v> pairs into the cache
insert_replace_kernel<key_type, slabset, ref_counter_type, set_hasher, slab_hasher, empty_key, set_associativity, warp_size>
<<<((len-1)/BLOCK_SIZE_)+1, BLOCK_SIZE_, 0, stream>>>
(d_keys, d_values, embedding_vec_size_, len, keys_, vals_, slot_counter_, set_mutex_, global_counter_, capacity_in_set_);
// Check for GPU error before return
CUDA_CHECK(cudaGetLastError());
}
#endif
template class gpu_cache<unsigned int, uint64_t, std::numeric_limits<unsigned int>::max(), SET_ASSOCIATIVITY, SLAB_SIZE>;
template class gpu_cache<long long, uint64_t, std::numeric_limits<long long>::max(), SET_ASSOCIATIVITY, SLAB_SIZE>;
} // namespace gpu_cache
|
ca961cfe7e742cf1d443e00c751a9dbc737502de.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2019-2020 NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../src/cudapoa_add_alignment.cuh" //addAlignment, CUDAPOA_MAX_NODE_EDGES, CUDAPOA_MAX_NODE_ALIGNMENTS
#include "basic_graph.hpp" //BasicGraph
#include <claraparabricks/genomeworks/cudapoa/batch.hpp>
#include <claraparabricks/genomeworks/utils/cudautils.hpp> //GW_CU_CHECK_ERR
#include <claraparabricks/genomeworks/utils/stringutils.hpp> //array_to_string
#include <claraparabricks/genomeworks/utils/signed_integer_utils.hpp> // get_size
#include "gtest/gtest.h"
namespace claraparabricks
{
namespace genomeworks
{
namespace cudapoa
{
class BasicAlignment
{
public:
BasicAlignment(std::vector<uint8_t> nodes, Int16Vec2D outgoing_edges,
Int16Vec2D node_alignments, std::vector<uint16_t> node_coverage_counts,
std::vector<uint8_t> read, std::vector<int8_t> base_weights, std::vector<int16_t> alignment_graph, std::vector<int16_t> alignment_read)
: graph(nodes, outgoing_edges, node_alignments, node_coverage_counts)
, read_(read)
, alignment_graph_(alignment_graph)
, alignment_read_(alignment_read)
{
//do nothing for now
}
void get_alignments(int16_t* alignment_graph, int16_t* alignment_read, int16_t* alignment_length) const
{
for (int i = 0; i < get_size(alignment_graph_); i++)
{
alignment_graph[i] = alignment_graph_[i];
alignment_read[i] = alignment_read_[i];
}
*alignment_length = get_size(alignment_graph_);
}
void get_read(uint8_t* read) const
{
for (int i = 0; i < get_size(read_); i++)
{
read[i] = read_[i];
}
}
void get_base_weights(int8_t* base_weights) const
{
for (int i = 0; i < get_size(base_weights_); i++)
{
base_weights[i] = base_weights_[i];
}
}
void get_graph_buffers(int16_t* incoming_edges, uint16_t* incoming_edge_count,
int16_t* outgoing_edges, uint16_t* outgoing_edge_count,
uint8_t* nodes, int16_t* node_count,
int16_t* node_alignments, uint16_t* node_alignment_count,
uint16_t* node_coverage_counts) const
{
if (!graph.is_complete())
{
throw "graph is incomplete; unable to fill the buffers.";
}
graph.get_edges(incoming_edges, incoming_edge_count, outgoing_edges, outgoing_edge_count);
graph.get_nodes(nodes, node_count);
graph.get_node_alignments(node_alignments, node_alignment_count);
graph.get_node_coverage_counts(node_coverage_counts);
}
void get_alignment_buffers(int16_t* alignment_graph, int16_t* alignment_read, int16_t* alignment_length,
uint8_t* read, int8_t* base_weights) const
{
get_alignments(alignment_graph, alignment_read, alignment_length);
get_read(read);
get_base_weights(base_weights);
}
protected:
BasicGraph graph;
std::vector<uint8_t> read_;
std::vector<int8_t> base_weights_;
std::vector<int16_t> alignment_graph_;
std::vector<int16_t> alignment_read_;
};
typedef std::pair<BasicGraph, BasicAlignment> AddAlginmentTestPair;
// create a vector of test cases
std::vector<AddAlginmentTestPair> getAddAlignmentTestCases()
{
std::vector<AddAlginmentTestPair> test_cases;
/*
* read: A A T A
* graph: A A A A
* alignment graph: 0 1 2 3
* alignment read: 0 1 2 3
* T
* / \
* final graph A A A
* \ /
* A
*/
BasicGraph ans_1(Int16Vec2D({{}, {0}, {1}, {2, 4}, {1}}));
BasicAlignment ali_1({'A', 'A', 'A', 'A'}, //nodes
{{}, {0}, {1}, {2}}, //outgoing_edges
{{}, {}, {}, {}}, //node_alignments
{1, 1, 1, 1}, //node_coverage_counts
{'A', 'A', 'T', 'A'}, //read
{0, 0, 1, 2}, //base weights
{0, 1, 2, 3}, //alignment_graph
{0, 1, 2, 3}); //alignment_read
test_cases.emplace_back(std::move(ans_1), std::move(ali_1));
/*
* read: A T C G A
* graph: A T C G
* alignment graph: 0 1 2 3 -1
* alignment read: 0 1 2 3 4
*
* final graph A T C G A
*
*/
BasicGraph ans_2(Int16Vec2D({{}, {0}, {1}, {2}, {3}}));
BasicAlignment ali_2({'A', 'T', 'C', 'G'}, //nodes
{{}, {0}, {1}, {2}}, //outgoing_edges
{{}, {}, {}, {}}, //node_alignments
{1, 1, 1, 1}, //node_coverage_counts
{'A', 'T', 'C', 'G', 'A'}, //read
{0, 1, 2, 3, 4}, //base weights
{0, 1, 2, 3, -1}, //alignment_graph
{0, 1, 2, 3, 4}); //alignment_read
test_cases.emplace_back(std::move(ans_2), std::move(ali_2));
/*
* read: A T C G
* A
* / \
* graph: A C C G
* alignment graph: 0 4 2 3
* alignment read: 0 1 2 3
* T
* / \
* final graph A C C G
* \ /
* A
*/
BasicGraph ans_3(Int16Vec2D({{}, {0}, {1, 4, 5}, {2}, {0}, {0}}));
BasicAlignment ali_3({'A', 'A', 'C', 'G', 'C'}, //nodes
{{}, {0}, {1, 4}, {2}, {0}}, //outgoing_edges
{{}, {}, {}, {}}, //node_alignments
{2, 1, 2, 2, 1}, //node_coverage_counts
{'A', 'T', 'C', 'G'}, //read
{0, 1, 1, 5}, //base weights
{0, 4, 2, 3}, //alignment_graph
{0, 1, 2, 3}); //alignment_read
test_cases.emplace_back(std::move(ans_3), std::move(ali_3));
/*
* read: A A
* graph: A T T G A
* alignment graph: 0 1 2 3 4
* alignment read: 0 -1 -1 -1 1
*
* final graph A T T G A
* \_____________/
*
*/
BasicGraph ans_4(Int16Vec2D({{}, {0}, {1}, {2}, {3, 0}}));
BasicAlignment ali_4({'A', 'T', 'T', 'G', 'A'}, //nodes
{{}, {0}, {1}, {2}, {3}}, //outgoing_edges
{{}, {}, {}, {}}, //node_alignments
{1, 1, 1, 1, 1}, //node_coverage_counts
{'A', 'A'}, //read
{5, 1}, //base weights
{0, 1, 2, 3, 4}, //alignment_graph
{0, -1, -1, -1, 1}); //alignment_read
test_cases.emplace_back(std::move(ans_4), std::move(ali_4));
/*
* read: A C T T A
* T G
* / \
* graph: A C A T A
* alignment graph: 0 5 6 3 4
* alignment read: 0 1 2 3 4
* T G
* / \
* final graph A C A T A
* \ /
* T
*
*/
BasicGraph ans_5(Int16Vec2D({{}, {0}, {1}, {2, 6, 7}, {3}, {0}, {5}, {5}}));
BasicAlignment ali_5({'A', 'T', 'G', 'T', 'A', 'C', 'A'}, //nodes
{{}, {0}, {1}, {2, 6}, {3}, {0}, {5}}, //outgoing_edges
{{}, {}, {}, {}}, //node_alignments
{2, 1, 1, 2, 2, 1, 1}, //node_coverage_counts
{'A', 'C', 'T', 'T', 'A'}, //read
{10, 9, 8, 7, 6}, //base weights
{0, 5, 6, 3, 4}, //alignment_graph
{0, 1, 2, 3, 4}); //alignment_read
test_cases.emplace_back(std::move(ans_5), std::move(ali_5));
//add more test cases below
return test_cases;
}
// host function for calling the kernel to test topsort device function.
BasicGraph testAddAlignment(const BasicAlignment& obj)
{
//declare device buffer
uint8_t* nodes = nullptr;
int16_t* node_count = nullptr;
int16_t* node_alignments = nullptr;
uint16_t* node_alignment_count = nullptr;
int16_t* incoming_edges = nullptr;
uint16_t* incoming_edge_count = nullptr;
int16_t* outgoing_edges = nullptr;
uint16_t* outgoing_edge_count = nullptr;
uint16_t* incoming_edge_w = nullptr;
uint16_t* outgoing_edge_w = nullptr;
int16_t* alignment_length = nullptr;
int16_t* graph = nullptr;
int16_t* alignment_graph = nullptr;
uint8_t* read = nullptr;
int8_t* base_weights = nullptr;
int16_t* alignment_read = nullptr;
uint16_t* node_coverage_counts = nullptr;
int16_t* sequence_begin_nodes_ids = nullptr;
uint16_t* outgoing_edges_coverage = nullptr;
uint16_t* outgoing_edges_coverage_count = nullptr;
uint16_t s = 0;
BatchConfig batch_size; // default max_sequence_size = 1024, max_sequences_per_poa = 100
//allocate unified memory so they can be accessed by both host and device.
GW_CU_CHECK_ERR(hipMallocManaged(&nodes, batch_size.max_nodes_per_graph * sizeof(uint8_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&node_count, sizeof(int16_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&node_alignments, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_ALIGNMENTS * sizeof(int16_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&node_alignment_count, batch_size.max_nodes_per_graph * sizeof(uint16_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&incoming_edges, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(int16_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&incoming_edge_count, batch_size.max_nodes_per_graph * sizeof(uint16_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&outgoing_edges, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(int16_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&outgoing_edge_count, batch_size.max_nodes_per_graph * sizeof(uint16_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&incoming_edge_w, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(uint16_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&outgoing_edge_w, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(uint16_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&alignment_length, sizeof(int16_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&graph, batch_size.max_nodes_per_graph * sizeof(int16_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&alignment_graph, batch_size.max_sequence_size * sizeof(int16_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&read, batch_size.max_sequence_size * sizeof(uint8_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&base_weights, batch_size.max_sequence_size * sizeof(int8_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&alignment_read, batch_size.max_sequence_size * sizeof(int16_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&node_coverage_counts, batch_size.max_nodes_per_graph * sizeof(int16_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&sequence_begin_nodes_ids, batch_size.max_sequences_per_poa * sizeof(int16_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&outgoing_edges_coverage, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * batch_size.max_sequences_per_poa * sizeof(uint16_t)));
GW_CU_CHECK_ERR(hipMallocManaged(&outgoing_edges_coverage_count, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(uint16_t)));
//initialize all 'count' buffers
memset(node_alignment_count, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t));
memset(incoming_edge_count, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t));
memset(outgoing_edge_count, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t));
memset(node_coverage_counts, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t));
//calculate edge counts on host
//3 buffers are disregarded because they don't affect correctness -- incoming_edge_w, outgoing_edge_w, graph
obj.get_graph_buffers(incoming_edges, incoming_edge_count, outgoing_edges, outgoing_edge_count,
nodes, node_count,
node_alignments, node_alignment_count,
node_coverage_counts);
obj.get_alignment_buffers(alignment_graph, alignment_read, alignment_length, read, base_weights);
// call the host wrapper of topsort kernel
addAlignment(nodes,
node_count,
node_alignments, node_alignment_count,
incoming_edges, incoming_edge_count,
outgoing_edges, outgoing_edge_count,
incoming_edge_w,
alignment_length,
graph,
alignment_graph,
read,
alignment_read,
node_coverage_counts,
base_weights,
sequence_begin_nodes_ids,
outgoing_edges_coverage,
outgoing_edges_coverage_count,
s,
batch_size.max_sequences_per_poa,
batch_size.max_nodes_per_graph);
GW_CU_CHECK_ERR(hipDeviceSynchronize());
//input and output buffers are the same ones in unified memory, so the results are updated in place
//create and return a new BasicGraph object that encodes the resulting graph structure after adding the alignment
BasicGraph res(outgoing_edges, outgoing_edge_count, *node_count);
GW_CU_CHECK_ERR(hipFree(nodes));
GW_CU_CHECK_ERR(hipFree(node_count));
GW_CU_CHECK_ERR(hipFree(node_alignments));
GW_CU_CHECK_ERR(hipFree(node_alignment_count));
GW_CU_CHECK_ERR(hipFree(incoming_edges));
GW_CU_CHECK_ERR(hipFree(incoming_edge_count));
GW_CU_CHECK_ERR(hipFree(outgoing_edges));
GW_CU_CHECK_ERR(hipFree(outgoing_edge_count));
GW_CU_CHECK_ERR(hipFree(incoming_edge_w));
GW_CU_CHECK_ERR(hipFree(outgoing_edge_w));
GW_CU_CHECK_ERR(hipFree(alignment_length));
GW_CU_CHECK_ERR(hipFree(graph));
GW_CU_CHECK_ERR(hipFree(alignment_graph));
GW_CU_CHECK_ERR(hipFree(read));
GW_CU_CHECK_ERR(hipFree(base_weights));
GW_CU_CHECK_ERR(hipFree(alignment_read));
GW_CU_CHECK_ERR(hipFree(node_coverage_counts));
GW_CU_CHECK_ERR(hipFree(sequence_begin_nodes_ids));
GW_CU_CHECK_ERR(hipFree(outgoing_edges_coverage));
GW_CU_CHECK_ERR(hipFree(outgoing_edges_coverage_count));
return res;
}
using ::testing::TestWithParam;
using ::testing::ValuesIn;
class AddAlignmentTest : public TestWithParam<AddAlginmentTestPair>
{
public:
void SetUp() {}
BasicGraph runAddAlignment(const BasicAlignment& ali)
{
return testAddAlignment(ali);
}
};
TEST_P(AddAlignmentTest, TestAddAlignmentCorrectness)
{
const auto test_case = GetParam();
EXPECT_EQ(test_case.first, runAddAlignment(test_case.second));
}
INSTANTIATE_TEST_SUITE_P(TestAddAlginment, AddAlignmentTest, ValuesIn(getAddAlignmentTestCases()));
} // namespace cudapoa
} // namespace genomeworks
} // namespace claraparabricks
| ca961cfe7e742cf1d443e00c751a9dbc737502de.cu | /*
* Copyright 2019-2020 NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../src/cudapoa_add_alignment.cuh" //addAlignment, CUDAPOA_MAX_NODE_EDGES, CUDAPOA_MAX_NODE_ALIGNMENTS
#include "basic_graph.hpp" //BasicGraph
#include <claraparabricks/genomeworks/cudapoa/batch.hpp>
#include <claraparabricks/genomeworks/utils/cudautils.hpp> //GW_CU_CHECK_ERR
#include <claraparabricks/genomeworks/utils/stringutils.hpp> //array_to_string
#include <claraparabricks/genomeworks/utils/signed_integer_utils.hpp> // get_size
#include "gtest/gtest.h"
namespace claraparabricks
{
namespace genomeworks
{
namespace cudapoa
{
class BasicAlignment
{
public:
BasicAlignment(std::vector<uint8_t> nodes, Int16Vec2D outgoing_edges,
Int16Vec2D node_alignments, std::vector<uint16_t> node_coverage_counts,
std::vector<uint8_t> read, std::vector<int8_t> base_weights, std::vector<int16_t> alignment_graph, std::vector<int16_t> alignment_read)
: graph(nodes, outgoing_edges, node_alignments, node_coverage_counts)
, read_(read)
, alignment_graph_(alignment_graph)
, alignment_read_(alignment_read)
{
//do nothing for now
}
void get_alignments(int16_t* alignment_graph, int16_t* alignment_read, int16_t* alignment_length) const
{
for (int i = 0; i < get_size(alignment_graph_); i++)
{
alignment_graph[i] = alignment_graph_[i];
alignment_read[i] = alignment_read_[i];
}
*alignment_length = get_size(alignment_graph_);
}
void get_read(uint8_t* read) const
{
for (int i = 0; i < get_size(read_); i++)
{
read[i] = read_[i];
}
}
void get_base_weights(int8_t* base_weights) const
{
for (int i = 0; i < get_size(base_weights_); i++)
{
base_weights[i] = base_weights_[i];
}
}
void get_graph_buffers(int16_t* incoming_edges, uint16_t* incoming_edge_count,
int16_t* outgoing_edges, uint16_t* outgoing_edge_count,
uint8_t* nodes, int16_t* node_count,
int16_t* node_alignments, uint16_t* node_alignment_count,
uint16_t* node_coverage_counts) const
{
if (!graph.is_complete())
{
throw "graph is incomplete; unable to fill the buffers.";
}
graph.get_edges(incoming_edges, incoming_edge_count, outgoing_edges, outgoing_edge_count);
graph.get_nodes(nodes, node_count);
graph.get_node_alignments(node_alignments, node_alignment_count);
graph.get_node_coverage_counts(node_coverage_counts);
}
void get_alignment_buffers(int16_t* alignment_graph, int16_t* alignment_read, int16_t* alignment_length,
uint8_t* read, int8_t* base_weights) const
{
get_alignments(alignment_graph, alignment_read, alignment_length);
get_read(read);
get_base_weights(base_weights);
}
protected:
BasicGraph graph;
std::vector<uint8_t> read_;
std::vector<int8_t> base_weights_;
std::vector<int16_t> alignment_graph_;
std::vector<int16_t> alignment_read_;
};
typedef std::pair<BasicGraph, BasicAlignment> AddAlginmentTestPair;
// create a vector of test cases
std::vector<AddAlginmentTestPair> getAddAlignmentTestCases()
{
std::vector<AddAlginmentTestPair> test_cases;
/*
* read: A A T A
* graph: A — A — A — A
* alignment graph: 0 1 2 3
* alignment read: 0 1 2 3
* T
* / \
* final graph A — A A
* \ /
* A
*/
BasicGraph ans_1(Int16Vec2D({{}, {0}, {1}, {2, 4}, {1}}));
BasicAlignment ali_1({'A', 'A', 'A', 'A'}, //nodes
{{}, {0}, {1}, {2}}, //outgoing_edges
{{}, {}, {}, {}}, //node_alignments
{1, 1, 1, 1}, //node_coverage_counts
{'A', 'A', 'T', 'A'}, //read
{0, 0, 1, 2}, //base weights
{0, 1, 2, 3}, //alignment_graph
{0, 1, 2, 3}); //alignment_read
test_cases.emplace_back(std::move(ans_1), std::move(ali_1));
/*
* read: A T C G A
* graph: A — T — C — G
* alignment graph: 0 1 2 3 -1
* alignment read: 0 1 2 3 4
*
* final graph A — T — C — G — A
*
*/
BasicGraph ans_2(Int16Vec2D({{}, {0}, {1}, {2}, {3}}));
BasicAlignment ali_2({'A', 'T', 'C', 'G'}, //nodes
{{}, {0}, {1}, {2}}, //outgoing_edges
{{}, {}, {}, {}}, //node_alignments
{1, 1, 1, 1}, //node_coverage_counts
{'A', 'T', 'C', 'G', 'A'}, //read
{0, 1, 2, 3, 4}, //base weights
{0, 1, 2, 3, -1}, //alignment_graph
{0, 1, 2, 3, 4}); //alignment_read
test_cases.emplace_back(std::move(ans_2), std::move(ali_2));
/*
* read: A T C G
* A
* / \
* graph: A — C — C — G
* alignment graph: 0 4 2 3
* alignment read: 0 1 2 3
* T
* / \
* final graph A — C — C — G
* \ /
* A
*/
BasicGraph ans_3(Int16Vec2D({{}, {0}, {1, 4, 5}, {2}, {0}, {0}}));
BasicAlignment ali_3({'A', 'A', 'C', 'G', 'C'}, //nodes
{{}, {0}, {1, 4}, {2}, {0}}, //outgoing_edges
{{}, {}, {}, {}}, //node_alignments
{2, 1, 2, 2, 1}, //node_coverage_counts
{'A', 'T', 'C', 'G'}, //read
{0, 1, 1, 5}, //base weights
{0, 4, 2, 3}, //alignment_graph
{0, 1, 2, 3}); //alignment_read
test_cases.emplace_back(std::move(ans_3), std::move(ali_3));
/*
* read: A A
* graph: A — T — T — G — A
* alignment graph: 0 1 2 3 4
* alignment read: 0 -1 -1 -1 1
*
* final graph A — T — T — G — A
* \_____________/
*
*/
BasicGraph ans_4(Int16Vec2D({{}, {0}, {1}, {2}, {3, 0}}));
BasicAlignment ali_4({'A', 'T', 'T', 'G', 'A'}, //nodes
{{}, {0}, {1}, {2}, {3}}, //outgoing_edges
{{}, {}, {}, {}}, //node_alignments
{1, 1, 1, 1, 1}, //node_coverage_counts
{'A', 'A'}, //read
{5, 1}, //base weights
{0, 1, 2, 3, 4}, //alignment_graph
{0, -1, -1, -1, 1}); //alignment_read
test_cases.emplace_back(std::move(ans_4), std::move(ali_4));
/*
* read: A C T T A
* T — G
* / \
* graph: A — C — A — T — A
* alignment graph: 0 5 6 3 4
* alignment read: 0 1 2 3 4
* T — G
* / \
* final graph A — C — A — T — A
* \ /
* T
*
*/
BasicGraph ans_5(Int16Vec2D({{}, {0}, {1}, {2, 6, 7}, {3}, {0}, {5}, {5}}));
BasicAlignment ali_5({'A', 'T', 'G', 'T', 'A', 'C', 'A'}, //nodes
{{}, {0}, {1}, {2, 6}, {3}, {0}, {5}}, //outgoing_edges
{{}, {}, {}, {}}, //node_alignments
{2, 1, 1, 2, 2, 1, 1}, //node_coverage_counts
{'A', 'C', 'T', 'T', 'A'}, //read
{10, 9, 8, 7, 6}, //base weights
{0, 5, 6, 3, 4}, //alignment_graph
{0, 1, 2, 3, 4}); //alignment_read
test_cases.emplace_back(std::move(ans_5), std::move(ali_5));
//add more test cases below
return test_cases;
}
// host function for calling the kernel to test topsort device function.
BasicGraph testAddAlignment(const BasicAlignment& obj)
{
//declare device buffer
uint8_t* nodes = nullptr;
int16_t* node_count = nullptr;
int16_t* node_alignments = nullptr;
uint16_t* node_alignment_count = nullptr;
int16_t* incoming_edges = nullptr;
uint16_t* incoming_edge_count = nullptr;
int16_t* outgoing_edges = nullptr;
uint16_t* outgoing_edge_count = nullptr;
uint16_t* incoming_edge_w = nullptr;
uint16_t* outgoing_edge_w = nullptr;
int16_t* alignment_length = nullptr;
int16_t* graph = nullptr;
int16_t* alignment_graph = nullptr;
uint8_t* read = nullptr;
int8_t* base_weights = nullptr;
int16_t* alignment_read = nullptr;
uint16_t* node_coverage_counts = nullptr;
int16_t* sequence_begin_nodes_ids = nullptr;
uint16_t* outgoing_edges_coverage = nullptr;
uint16_t* outgoing_edges_coverage_count = nullptr;
uint16_t s = 0;
BatchConfig batch_size; // default max_sequence_size = 1024, max_sequences_per_poa = 100
//allocate unified memory so they can be accessed by both host and device.
GW_CU_CHECK_ERR(cudaMallocManaged(&nodes, batch_size.max_nodes_per_graph * sizeof(uint8_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&node_count, sizeof(int16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&node_alignments, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_ALIGNMENTS * sizeof(int16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&node_alignment_count, batch_size.max_nodes_per_graph * sizeof(uint16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&incoming_edges, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(int16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&incoming_edge_count, batch_size.max_nodes_per_graph * sizeof(uint16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&outgoing_edges, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(int16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&outgoing_edge_count, batch_size.max_nodes_per_graph * sizeof(uint16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&incoming_edge_w, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(uint16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&outgoing_edge_w, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(uint16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&alignment_length, sizeof(int16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&graph, batch_size.max_nodes_per_graph * sizeof(int16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&alignment_graph, batch_size.max_sequence_size * sizeof(int16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&read, batch_size.max_sequence_size * sizeof(uint8_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&base_weights, batch_size.max_sequence_size * sizeof(int8_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&alignment_read, batch_size.max_sequence_size * sizeof(int16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&node_coverage_counts, batch_size.max_nodes_per_graph * sizeof(int16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&sequence_begin_nodes_ids, batch_size.max_sequences_per_poa * sizeof(int16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&outgoing_edges_coverage, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * batch_size.max_sequences_per_poa * sizeof(uint16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged(&outgoing_edges_coverage_count, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(uint16_t)));
//initialize all 'count' buffers
memset(node_alignment_count, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t));
memset(incoming_edge_count, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t));
memset(outgoing_edge_count, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t));
memset(node_coverage_counts, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t));
//calculate edge counts on host
//3 buffers are disregarded because they don't affect correctness -- incoming_edge_w, outgoing_edge_w, graph
obj.get_graph_buffers(incoming_edges, incoming_edge_count, outgoing_edges, outgoing_edge_count,
nodes, node_count,
node_alignments, node_alignment_count,
node_coverage_counts);
obj.get_alignment_buffers(alignment_graph, alignment_read, alignment_length, read, base_weights);
// call the host wrapper of topsort kernel
addAlignment(nodes,
node_count,
node_alignments, node_alignment_count,
incoming_edges, incoming_edge_count,
outgoing_edges, outgoing_edge_count,
incoming_edge_w,
alignment_length,
graph,
alignment_graph,
read,
alignment_read,
node_coverage_counts,
base_weights,
sequence_begin_nodes_ids,
outgoing_edges_coverage,
outgoing_edges_coverage_count,
s,
batch_size.max_sequences_per_poa,
batch_size.max_nodes_per_graph);
GW_CU_CHECK_ERR(cudaDeviceSynchronize());
//input and output buffers are the same ones in unified memory, so the results are updated in place
//create and return a new BasicGraph object that encodes the resulting graph structure after adding the alignment
BasicGraph res(outgoing_edges, outgoing_edge_count, *node_count);
GW_CU_CHECK_ERR(cudaFree(nodes));
GW_CU_CHECK_ERR(cudaFree(node_count));
GW_CU_CHECK_ERR(cudaFree(node_alignments));
GW_CU_CHECK_ERR(cudaFree(node_alignment_count));
GW_CU_CHECK_ERR(cudaFree(incoming_edges));
GW_CU_CHECK_ERR(cudaFree(incoming_edge_count));
GW_CU_CHECK_ERR(cudaFree(outgoing_edges));
GW_CU_CHECK_ERR(cudaFree(outgoing_edge_count));
GW_CU_CHECK_ERR(cudaFree(incoming_edge_w));
GW_CU_CHECK_ERR(cudaFree(outgoing_edge_w));
GW_CU_CHECK_ERR(cudaFree(alignment_length));
GW_CU_CHECK_ERR(cudaFree(graph));
GW_CU_CHECK_ERR(cudaFree(alignment_graph));
GW_CU_CHECK_ERR(cudaFree(read));
GW_CU_CHECK_ERR(cudaFree(base_weights));
GW_CU_CHECK_ERR(cudaFree(alignment_read));
GW_CU_CHECK_ERR(cudaFree(node_coverage_counts));
GW_CU_CHECK_ERR(cudaFree(sequence_begin_nodes_ids));
GW_CU_CHECK_ERR(cudaFree(outgoing_edges_coverage));
GW_CU_CHECK_ERR(cudaFree(outgoing_edges_coverage_count));
return res;
}
using ::testing::TestWithParam;
using ::testing::ValuesIn;
class AddAlignmentTest : public TestWithParam<AddAlginmentTestPair>
{
public:
void SetUp() {}
BasicGraph runAddAlignment(const BasicAlignment& ali)
{
return testAddAlignment(ali);
}
};
TEST_P(AddAlignmentTest, TestAddAlignmentCorrectness)
{
const auto test_case = GetParam();
EXPECT_EQ(test_case.first, runAddAlignment(test_case.second));
}
INSTANTIATE_TEST_SUITE_P(TestAddAlginment, AddAlignmentTest, ValuesIn(getAddAlignmentTestCases()));
} // namespace cudapoa
} // namespace genomeworks
} // namespace claraparabricks
|
200443a57c6c84bf753ce736e2a18bfe1842b89f.hip | // !!! This is a file automatically generated by hipify!!!
/*=========================================================================
Program: Robarts Visualization Toolkit
Copyright (c) John SH Baxter, Robarts Research Institute
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================*/
#include "CUDA_commonKernels.h"
#include "CUDA_hierarchicalmaxflowdecomp.h"
#include "hip/hip_runtime.h"
#include "stdio.h"
#include "vtkCudaCommon.h"
__global__ void kern_GHMFD_GradientBuffer(float* buffer, float* gradBuffer, int3 dims, int size)
{
int idx = CUDASTDOFFSET;
int3 idxN = { idx % dims.x, (idx / dims.x) % dims.y, idx / (dims.x*dims.y) };
float gradMagSquared = 0.0f;
float cur = buffer[idx];
float XHi = (idxN.x == dims.x-1) ? cur: buffer[idx+1];
float XLo = (idxN.x == 0) ? cur: buffer[idx-1];
gradMagSquared += (XHi-XLo)*(XHi-XLo);
float YHi = (idxN.y == dims.y-1) ? cur: buffer[idx+dims.x];
float YLo = (idxN.y == 0) ? cur: buffer[idx-dims.x];
gradMagSquared += (YHi-YLo)*(YHi-YLo);
float ZHi = (idxN.z == dims.z-1) ? cur: buffer[idx+dims.x*dims.y];
float ZLo = (idxN.z == 0) ? cur: buffer[idx-dims.x*dims.y];
gradMagSquared += (ZHi-ZLo)*(ZHi-ZLo);
if(idx < size)
{
gradBuffer[idx] = 0.5f * sqrt( gradMagSquared );
}
}
double CUDA_GHMFD_DataTermForLabel(float* data, float* label, int size, hipStream_t* stream)
{
//allocate GPU buffers
float* devDataTermBuffer = 0;
float* devLabelBuffer = 0;
hipMalloc( &devDataTermBuffer, sizeof(float)*size );
hipMalloc( &devLabelBuffer, sizeof(float)*size );
hipMemcpyAsync( devDataTermBuffer, data, sizeof(float)*size, hipMemcpyHostToDevice, *stream );
hipMemcpyAsync( devLabelBuffer, label, sizeof(float)*size, hipMemcpyHostToDevice, *stream );
float retVal = 0.0f;
//multiply buffers
dim3 threads(NUMTHREADS,1,1);
dim3 grid = GetGrid(size);
hipLaunchKernelGGL(( MultiplyBuffers), dim3(grid),dim3(threads),0,*stream, devDataTermBuffer, devLabelBuffer, size);
//reduce buffer by summation
int i = 1;
while(i < size/2)
{
i+=i;
}
for(; i >= NUMTHREADS; i = i/2)
{
dim3 tempGrid( i>NUMTHREADS ? i/NUMTHREADS : 1, 1, 1);
hipLaunchKernelGGL(( SumOverLargeBuffer), dim3(tempGrid), dim3(threads), 0, *stream, devDataTermBuffer, i, size );
#ifdef DEBUG_VTKCUDAHMFD
hipDeviceSynchronize();
std::cout << "Reduce: " << hipGetErrorString( hipGetLastError() ) << std::endl;
#endif
}
SumData( min(NUMTHREADS,size), min(NUMTHREADS,size), 1, devDataTermBuffer, stream );
//return result to CPU
hipMemcpyAsync( &retVal, devDataTermBuffer, sizeof(float), hipMemcpyDeviceToHost, *stream );
hipStreamSynchronize(*stream);
//deallocate GPU buffers
hipFree( devDataTermBuffer );
hipFree( devLabelBuffer );
//return to main class
return (double) retVal;
}
double CUDA_GHMFD_LeafSmoothnessForLabel(float* smoothness, float* label, int x, int y, int z, int size, float* GPUParentLabel, float* devGradBuffer, hipStream_t* stream)
{
//allocate GPU buffers
float* devSmoothnessBuffer = 0;
float* devLabelBuffer = 0;
hipMalloc( &devSmoothnessBuffer, sizeof(float)*size );
hipMalloc( &devLabelBuffer, sizeof(float)*size );
hipMemcpyAsync( devSmoothnessBuffer, smoothness, sizeof(float)*size, hipMemcpyHostToDevice, *stream );
hipMemcpyAsync( devLabelBuffer, label, sizeof(float)*size, hipMemcpyHostToDevice, *stream );
//find gradient
dim3 threads(NUMTHREADS,1,1);
dim3 grid = GetGrid(size);
int3 dims = {x,y,z};
hipLaunchKernelGGL(( kern_GHMFD_GradientBuffer), dim3(grid),dim3(threads),0,*stream, devLabelBuffer, devGradBuffer, dims, size);
//multiply buffers
hipLaunchKernelGGL(( MultiplyBuffers), dim3(grid),dim3(threads),0,*stream, devSmoothnessBuffer, devGradBuffer, size);
//reduce buffer by summation
int i = 1;
while(i < size/2)
{
i+=i;
}
for(; i >= NUMTHREADS; i = i/2)
{
dim3 tempGrid( i>NUMTHREADS ? i/NUMTHREADS : 1, 1, 1);
hipLaunchKernelGGL(( SumOverLargeBuffer), dim3(tempGrid), dim3(threads), 0, *stream, devSmoothnessBuffer, i, size );
}
SumData( min(NUMTHREADS,size), min(NUMTHREADS,size), 1, devSmoothnessBuffer, stream );
//return result to CPU
float retVal = 0.0f;
hipMemcpyAsync( &retVal, devSmoothnessBuffer, sizeof(float), hipMemcpyDeviceToHost, *stream );
hipStreamSynchronize(*stream);
//add to parent buffer if present
if( GPUParentLabel )
{
hipLaunchKernelGGL(( SumBuffers), dim3(grid),dim3(threads),0,*stream, GPUParentLabel, devLabelBuffer, size);
}
//deallocate GPU buffers
hipFree( devSmoothnessBuffer );
hipFree( devLabelBuffer );
//return to main class
return (double) retVal;
}
double CUDA_GHMFD_LeafNoSmoothnessForLabel(float* label, int x, int y, int z, int size, float* GPUParentLabel, float* devGradBuffer, hipStream_t* stream)
{
//allocate GPU buffers
float* devLabelBuffer = 0;
hipMalloc( &devLabelBuffer, sizeof(float)*size );
hipMemcpyAsync( devLabelBuffer, label, sizeof(float)*size, hipMemcpyHostToDevice, *stream );
//find gradient
dim3 threads(NUMTHREADS,1,1);
dim3 grid = GetGrid(size);
int3 dims = {x,y,z};
hipLaunchKernelGGL(( kern_GHMFD_GradientBuffer), dim3(grid),dim3(threads),0,*stream, devLabelBuffer, devGradBuffer, dims, size);
//reduce buffer by summation
int i = 1;
while(i < size/2)
{
i+=i;
}
for(; i >= NUMTHREADS; i = i/2)
{
dim3 tempGrid( i>NUMTHREADS ? i/NUMTHREADS : 1, 1, 1);
hipLaunchKernelGGL(( SumOverLargeBuffer), dim3(tempGrid), dim3(threads), 0, *stream, devGradBuffer, i, size );
}
SumData( min(NUMTHREADS,size), min(NUMTHREADS,size), 1, devGradBuffer, stream );
//return result to CPU
float retVal = 0.0f;
hipMemcpyAsync( &retVal, devGradBuffer, sizeof(float), hipMemcpyDeviceToHost, *stream );
hipStreamSynchronize(*stream);
//add to parent buffer if present
if( GPUParentLabel )
{
hipLaunchKernelGGL(( SumBuffers), dim3(grid),dim3(threads),0,*stream, GPUParentLabel, devLabelBuffer, size);
}
//deallocate GPU buffers
hipFree( devLabelBuffer );
//return to main class
return (double) retVal;
}
double CUDA_GHMFD_BranchSmoothnessForLabel(float* smoothness, float* devLabelBuffer, int x, int y, int z, int size, float* GPUParentLabel, float* devGradBuffer, hipStream_t* stream)
{
//allocate GPU buffers
float* devSmoothnessBuffer = 0;
hipMalloc( &devSmoothnessBuffer, sizeof(float)*size );
hipMemcpyAsync( devSmoothnessBuffer, smoothness, sizeof(float)*size, hipMemcpyHostToDevice, *stream );
//find gradient
dim3 threads(NUMTHREADS,1,1);
dim3 grid = GetGrid(size);
int3 dims = {x,y,z};
hipLaunchKernelGGL(( kern_GHMFD_GradientBuffer), dim3(grid),dim3(threads),0,*stream, devLabelBuffer, devGradBuffer, dims, size);
//multiply buffers
hipLaunchKernelGGL(( MultiplyBuffers), dim3(grid),dim3(threads),0,*stream, devSmoothnessBuffer, devGradBuffer, size);
//reduce buffer by summation
int i = 1;
while(i < size/2)
{
i+=i;
}
for(; i >= NUMTHREADS; i = i/2)
{
dim3 tempGrid( i>NUMTHREADS ? i/NUMTHREADS : 1, 1, 1);
hipLaunchKernelGGL(( SumOverLargeBuffer), dim3(tempGrid), dim3(threads), 0, *stream, devSmoothnessBuffer, i, size );
}
SumData( min(NUMTHREADS,size), min(NUMTHREADS,size), 1, devSmoothnessBuffer, stream );
//return result to CPU
float retVal = 0.0f;
hipMemcpyAsync( &retVal, devSmoothnessBuffer, sizeof(float), hipMemcpyDeviceToHost, *stream );
hipStreamSynchronize(*stream);
//add to parent buffer if present
if( GPUParentLabel )
{
hipLaunchKernelGGL(( SumBuffers), dim3(grid),dim3(threads),0,*stream, GPUParentLabel, devLabelBuffer, size);
}
//deallocate GPU buffers
hipFree( devSmoothnessBuffer );
//return to main class
return (double) retVal;
}
double CUDA_GHMFD_BranchNoSmoothnessForLabel(float* devLabelBuffer, int x, int y, int z, int size, float* GPUParentLabel, float* devGradBuffer, hipStream_t* stream)
{
//find gradient
dim3 threads(NUMTHREADS,1,1);
dim3 grid = GetGrid(size);
int3 dims = {x,y,z};
hipLaunchKernelGGL(( kern_GHMFD_GradientBuffer), dim3(grid),dim3(threads),0,*stream, devLabelBuffer, devGradBuffer, dims, size);
//reduce buffer by summation
int i = 1;
while(i < size/2)
{
i+=i;
}
for(; i >= NUMTHREADS; i = i/2)
{
dim3 tempGrid( i>NUMTHREADS ? i/NUMTHREADS : 1, 1, 1);
hipLaunchKernelGGL(( SumOverLargeBuffer), dim3(tempGrid), dim3(threads), 0, *stream, devGradBuffer, i, size );
}
SumData( min(NUMTHREADS,size), min(NUMTHREADS,size), 1, devGradBuffer, stream );
//return result to CPU
float retVal = 0.0f;
hipMemcpyAsync( &retVal, devGradBuffer, sizeof(float), hipMemcpyDeviceToHost, *stream );
hipStreamSynchronize(*stream);
//add to parent buffer if present
if( GPUParentLabel )
{
hipLaunchKernelGGL(( SumBuffers), dim3(grid),dim3(threads),0,*stream, GPUParentLabel, devLabelBuffer, size);
}
//return to main class
return (double) retVal;
}
float* CUDA_GHMFD_GetBuffer(int size, hipStream_t* stream)
{
dim3 threads(NUMTHREADS,1,1);
dim3 grid = GetGrid(size);
float* buffer = 0;
hipMalloc(&buffer,size*sizeof(float));
hipLaunchKernelGGL(( ZeroOutBuffer), dim3(grid),dim3(threads),0,*stream, buffer, size);
return buffer;
}
void CUDA_GHMFD_ReturnBuffer(float* buffer)
{
hipFree(buffer);
} | 200443a57c6c84bf753ce736e2a18bfe1842b89f.cu | /*=========================================================================
Program: Robarts Visualization Toolkit
Copyright (c) John SH Baxter, Robarts Research Institute
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================*/
#include "CUDA_commonKernels.h"
#include "CUDA_hierarchicalmaxflowdecomp.h"
#include "cuda.h"
#include "stdio.h"
#include "vtkCudaCommon.h"
__global__ void kern_GHMFD_GradientBuffer(float* buffer, float* gradBuffer, int3 dims, int size)
{
int idx = CUDASTDOFFSET;
int3 idxN = { idx % dims.x, (idx / dims.x) % dims.y, idx / (dims.x*dims.y) };
float gradMagSquared = 0.0f;
float cur = buffer[idx];
float XHi = (idxN.x == dims.x-1) ? cur: buffer[idx+1];
float XLo = (idxN.x == 0) ? cur: buffer[idx-1];
gradMagSquared += (XHi-XLo)*(XHi-XLo);
float YHi = (idxN.y == dims.y-1) ? cur: buffer[idx+dims.x];
float YLo = (idxN.y == 0) ? cur: buffer[idx-dims.x];
gradMagSquared += (YHi-YLo)*(YHi-YLo);
float ZHi = (idxN.z == dims.z-1) ? cur: buffer[idx+dims.x*dims.y];
float ZLo = (idxN.z == 0) ? cur: buffer[idx-dims.x*dims.y];
gradMagSquared += (ZHi-ZLo)*(ZHi-ZLo);
if(idx < size)
{
gradBuffer[idx] = 0.5f * sqrt( gradMagSquared );
}
}
double CUDA_GHMFD_DataTermForLabel(float* data, float* label, int size, cudaStream_t* stream)
{
//allocate GPU buffers
float* devDataTermBuffer = 0;
float* devLabelBuffer = 0;
cudaMalloc( &devDataTermBuffer, sizeof(float)*size );
cudaMalloc( &devLabelBuffer, sizeof(float)*size );
cudaMemcpyAsync( devDataTermBuffer, data, sizeof(float)*size, cudaMemcpyHostToDevice, *stream );
cudaMemcpyAsync( devLabelBuffer, label, sizeof(float)*size, cudaMemcpyHostToDevice, *stream );
float retVal = 0.0f;
//multiply buffers
dim3 threads(NUMTHREADS,1,1);
dim3 grid = GetGrid(size);
MultiplyBuffers<<<grid,threads,0,*stream>>>(devDataTermBuffer, devLabelBuffer, size);
//reduce buffer by summation
int i = 1;
while(i < size/2)
{
i+=i;
}
for(; i >= NUMTHREADS; i = i/2)
{
dim3 tempGrid( i>NUMTHREADS ? i/NUMTHREADS : 1, 1, 1);
SumOverLargeBuffer<<<tempGrid, threads, 0, *stream>>>(devDataTermBuffer, i, size );
#ifdef DEBUG_VTKCUDAHMFD
cudaThreadSynchronize();
std::cout << "Reduce: " << cudaGetErrorString( cudaGetLastError() ) << std::endl;
#endif
}
SumData( min(NUMTHREADS,size), min(NUMTHREADS,size), 1, devDataTermBuffer, stream );
//return result to CPU
cudaMemcpyAsync( &retVal, devDataTermBuffer, sizeof(float), cudaMemcpyDeviceToHost, *stream );
cudaStreamSynchronize(*stream);
//deallocate GPU buffers
cudaFree( devDataTermBuffer );
cudaFree( devLabelBuffer );
//return to main class
return (double) retVal;
}
double CUDA_GHMFD_LeafSmoothnessForLabel(float* smoothness, float* label, int x, int y, int z, int size, float* GPUParentLabel, float* devGradBuffer, cudaStream_t* stream)
{
//allocate GPU buffers
float* devSmoothnessBuffer = 0;
float* devLabelBuffer = 0;
cudaMalloc( &devSmoothnessBuffer, sizeof(float)*size );
cudaMalloc( &devLabelBuffer, sizeof(float)*size );
cudaMemcpyAsync( devSmoothnessBuffer, smoothness, sizeof(float)*size, cudaMemcpyHostToDevice, *stream );
cudaMemcpyAsync( devLabelBuffer, label, sizeof(float)*size, cudaMemcpyHostToDevice, *stream );
//find gradient
dim3 threads(NUMTHREADS,1,1);
dim3 grid = GetGrid(size);
int3 dims = {x,y,z};
kern_GHMFD_GradientBuffer<<<grid,threads,0,*stream>>>(devLabelBuffer, devGradBuffer, dims, size);
//multiply buffers
MultiplyBuffers<<<grid,threads,0,*stream>>>(devSmoothnessBuffer, devGradBuffer, size);
//reduce buffer by summation
int i = 1;
while(i < size/2)
{
i+=i;
}
for(; i >= NUMTHREADS; i = i/2)
{
dim3 tempGrid( i>NUMTHREADS ? i/NUMTHREADS : 1, 1, 1);
SumOverLargeBuffer<<<tempGrid, threads, 0, *stream>>>(devSmoothnessBuffer, i, size );
}
SumData( min(NUMTHREADS,size), min(NUMTHREADS,size), 1, devSmoothnessBuffer, stream );
//return result to CPU
float retVal = 0.0f;
cudaMemcpyAsync( &retVal, devSmoothnessBuffer, sizeof(float), cudaMemcpyDeviceToHost, *stream );
cudaStreamSynchronize(*stream);
//add to parent buffer if present
if( GPUParentLabel )
{
SumBuffers<<<grid,threads,0,*stream>>>(GPUParentLabel, devLabelBuffer, size);
}
//deallocate GPU buffers
cudaFree( devSmoothnessBuffer );
cudaFree( devLabelBuffer );
//return to main class
return (double) retVal;
}
double CUDA_GHMFD_LeafNoSmoothnessForLabel(float* label, int x, int y, int z, int size, float* GPUParentLabel, float* devGradBuffer, cudaStream_t* stream)
{
//allocate GPU buffers
float* devLabelBuffer = 0;
cudaMalloc( &devLabelBuffer, sizeof(float)*size );
cudaMemcpyAsync( devLabelBuffer, label, sizeof(float)*size, cudaMemcpyHostToDevice, *stream );
//find gradient
dim3 threads(NUMTHREADS,1,1);
dim3 grid = GetGrid(size);
int3 dims = {x,y,z};
kern_GHMFD_GradientBuffer<<<grid,threads,0,*stream>>>(devLabelBuffer, devGradBuffer, dims, size);
//reduce buffer by summation
int i = 1;
while(i < size/2)
{
i+=i;
}
for(; i >= NUMTHREADS; i = i/2)
{
dim3 tempGrid( i>NUMTHREADS ? i/NUMTHREADS : 1, 1, 1);
SumOverLargeBuffer<<<tempGrid, threads, 0, *stream>>>(devGradBuffer, i, size );
}
SumData( min(NUMTHREADS,size), min(NUMTHREADS,size), 1, devGradBuffer, stream );
//return result to CPU
float retVal = 0.0f;
cudaMemcpyAsync( &retVal, devGradBuffer, sizeof(float), cudaMemcpyDeviceToHost, *stream );
cudaStreamSynchronize(*stream);
//add to parent buffer if present
if( GPUParentLabel )
{
SumBuffers<<<grid,threads,0,*stream>>>(GPUParentLabel, devLabelBuffer, size);
}
//deallocate GPU buffers
cudaFree( devLabelBuffer );
//return to main class
return (double) retVal;
}
double CUDA_GHMFD_BranchSmoothnessForLabel(float* smoothness, float* devLabelBuffer, int x, int y, int z, int size, float* GPUParentLabel, float* devGradBuffer, cudaStream_t* stream)
{
//allocate GPU buffers
float* devSmoothnessBuffer = 0;
cudaMalloc( &devSmoothnessBuffer, sizeof(float)*size );
cudaMemcpyAsync( devSmoothnessBuffer, smoothness, sizeof(float)*size, cudaMemcpyHostToDevice, *stream );
//find gradient
dim3 threads(NUMTHREADS,1,1);
dim3 grid = GetGrid(size);
int3 dims = {x,y,z};
kern_GHMFD_GradientBuffer<<<grid,threads,0,*stream>>>(devLabelBuffer, devGradBuffer, dims, size);
//multiply buffers
MultiplyBuffers<<<grid,threads,0,*stream>>>(devSmoothnessBuffer, devGradBuffer, size);
//reduce buffer by summation
int i = 1;
while(i < size/2)
{
i+=i;
}
for(; i >= NUMTHREADS; i = i/2)
{
dim3 tempGrid( i>NUMTHREADS ? i/NUMTHREADS : 1, 1, 1);
SumOverLargeBuffer<<<tempGrid, threads, 0, *stream>>>(devSmoothnessBuffer, i, size );
}
SumData( min(NUMTHREADS,size), min(NUMTHREADS,size), 1, devSmoothnessBuffer, stream );
//return result to CPU
float retVal = 0.0f;
cudaMemcpyAsync( &retVal, devSmoothnessBuffer, sizeof(float), cudaMemcpyDeviceToHost, *stream );
cudaStreamSynchronize(*stream);
//add to parent buffer if present
if( GPUParentLabel )
{
SumBuffers<<<grid,threads,0,*stream>>>(GPUParentLabel, devLabelBuffer, size);
}
//deallocate GPU buffers
cudaFree( devSmoothnessBuffer );
//return to main class
return (double) retVal;
}
double CUDA_GHMFD_BranchNoSmoothnessForLabel(float* devLabelBuffer, int x, int y, int z, int size, float* GPUParentLabel, float* devGradBuffer, cudaStream_t* stream)
{
//find gradient
dim3 threads(NUMTHREADS,1,1);
dim3 grid = GetGrid(size);
int3 dims = {x,y,z};
kern_GHMFD_GradientBuffer<<<grid,threads,0,*stream>>>(devLabelBuffer, devGradBuffer, dims, size);
//reduce buffer by summation
int i = 1;
while(i < size/2)
{
i+=i;
}
for(; i >= NUMTHREADS; i = i/2)
{
dim3 tempGrid( i>NUMTHREADS ? i/NUMTHREADS : 1, 1, 1);
SumOverLargeBuffer<<<tempGrid, threads, 0, *stream>>>(devGradBuffer, i, size );
}
SumData( min(NUMTHREADS,size), min(NUMTHREADS,size), 1, devGradBuffer, stream );
//return result to CPU
float retVal = 0.0f;
cudaMemcpyAsync( &retVal, devGradBuffer, sizeof(float), cudaMemcpyDeviceToHost, *stream );
cudaStreamSynchronize(*stream);
//add to parent buffer if present
if( GPUParentLabel )
{
SumBuffers<<<grid,threads,0,*stream>>>(GPUParentLabel, devLabelBuffer, size);
}
//return to main class
return (double) retVal;
}
float* CUDA_GHMFD_GetBuffer(int size, cudaStream_t* stream)
{
dim3 threads(NUMTHREADS,1,1);
dim3 grid = GetGrid(size);
float* buffer = 0;
cudaMalloc(&buffer,size*sizeof(float));
ZeroOutBuffer<<<grid,threads,0,*stream>>>(buffer, size);
return buffer;
}
void CUDA_GHMFD_ReturnBuffer(float* buffer)
{
cudaFree(buffer);
} |
26abc402388e7e32fd4b0c0e6a8e5e8297406ec9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if (y < numCols && x < numRows)
{
int index = numRows*y + x;
uchar4 rgba = rgbaImage[index];
greyImage[index] = (unsigned char)(0.299f*rgba.x+ 0.587f*rgba.y + 0.114f*rgba.z);;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
int blockWidth = 32;
const dim3 blockSize(blockWidth, blockWidth, 1);
int grid_x_dim = numRows/blockWidth+1;
int grid_y_dim = numCols/blockWidth+1;
const dim3 gridSize(grid_x_dim, grid_y_dim, 1);
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
}
| 26abc402388e7e32fd4b0c0e6a8e5e8297406ec9.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if (y < numCols && x < numRows)
{
int index = numRows*y + x;
uchar4 rgba = rgbaImage[index];
greyImage[index] = (unsigned char)(0.299f*rgba.x+ 0.587f*rgba.y + 0.114f*rgba.z);;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
int blockWidth = 32;
const dim3 blockSize(blockWidth, blockWidth, 1);
int grid_x_dim = numRows/blockWidth+1;
int grid_y_dim = numCols/blockWidth+1;
const dim3 gridSize(grid_x_dim, grid_y_dim, 1);
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
}
|
dea2c1a31a082be6ed00f2c9868d7373b304a401.hip | // !!! This is a file automatically generated by hipify!!!
#include "CudaInitStatus.h"
#include "CudaMesh.h"
#include "CudaAnimation.h"
#include <time.h>
void initTriTetQuality(
RealD& t_pointlist,
PointTypeD& t_pointtypelist,
RealD& t_weightlist,
IntD& t_trifacelist,
RealD& t_trifacecent,
TriStatusD& t_tristatus,
IntD& t_tetlist,
TetStatusD& t_tetstatus,
MESHCR* criteria,
int numoftrifaces,
int numoftets
)
{
// Init triface quality
int numberofblocks = (ceil)((float)numoftrifaces / BLOCK_SIZE);
kernelInitTriQuality << <numberofblocks, BLOCK_SIZE >> >(
thrust::raw_pointer_cast(&t_pointlist[0]),
thrust::raw_pointer_cast(&t_pointtypelist[0]),
thrust::raw_pointer_cast(&t_weightlist[0]),
thrust::raw_pointer_cast(&t_trifacelist[0]),
thrust::raw_pointer_cast(&t_trifacecent[0]),
thrust::raw_pointer_cast(&t_tristatus[0]),
criteria->facet_angle,
criteria->facet_size,
criteria->facet_distance,
numoftrifaces
);
// Init tet quality
numberofblocks = (ceil)((float)numoftets / BLOCK_SIZE);
kernelInitTetQuality << <numberofblocks, BLOCK_SIZE >> > (
thrust::raw_pointer_cast(&t_pointlist[0]),
thrust::raw_pointer_cast(&t_weightlist[0]),
thrust::raw_pointer_cast(&t_tetlist[0]),
thrust::raw_pointer_cast(&t_tetstatus[0]),
criteria->cell_radius_edge_ratio,
criteria->cell_size,
numoftets
);
}
// Init triface and its neighbors
void initTristatus(
IntD& t_aabbnodeleft,
IntD& t_aabbnoderight,
RealD& t_aabbnodebbs,
RealD& t_aabbpmcoord,
RealD& t_aabbpmbbs,
RealD& t_pointlist,
PointTypeD& t_pointtypelist,
RealD& t_weightlist,
IntD& t_trifacelist,
RealD& t_trifacecent,
TriStatusD& t_tristatus,
TetHandleD& t_tri2tetlist,
IntD& t_tetlist,
TriHandleD& t_tet2trilist,
TetHandleD& t_neighborlist,
MESHCR* criteria,
int& numoftrifaces,
int numoftets
)
{
#ifdef GQM3D_INIT_PROFILING
clock_t tv[2];
tv[0] = clock();
#endif
IntD trifacecount(4 * numoftets, 0);
IntD trifaceindices(4 * numoftets, 0);
RealD trifaceipt(4 * 3 * numoftets);
#ifdef GQM3D_INIT_PROFILING
hipDeviceSynchronize();
tv[1] = clock();
printf(" initTristatus vector initialization time = %f\n", (REAL)(tv[1] - tv[0]));
tv[0] = tv[1];
#endif
// Mark initial trifaces
int numberofblocks = (ceil)((float)numoftets / BLOCK_SIZE);
kernelMarkInitialTrifaces << <numberofblocks, BLOCK_SIZE >> >(
thrust::raw_pointer_cast(&t_aabbnodeleft[0]),
thrust::raw_pointer_cast(&t_aabbnoderight[0]),
thrust::raw_pointer_cast(&t_aabbnodebbs[0]),
thrust::raw_pointer_cast(&t_aabbpmcoord[0]),
thrust::raw_pointer_cast(&t_aabbpmbbs[0]),
thrust::raw_pointer_cast(&t_pointlist[0]),
thrust::raw_pointer_cast(&t_weightlist[0]),
thrust::raw_pointer_cast(&trifacecount[0]),
thrust::raw_pointer_cast(&trifaceipt[0]),
thrust::raw_pointer_cast(&t_tetlist[0]),
thrust::raw_pointer_cast(&t_neighborlist[0]),
numoftets
);
#ifdef GQM3D_INIT_PROFILING
hipDeviceSynchronize();
tv[1] = clock();
printf(" kernelMarkInitialTrifaces time = %f\n", (REAL)(tv[1] - tv[0]));
tv[0] = tv[1];
#endif
// Set up memory
numoftrifaces = thrust::count(trifacecount.begin(), trifacecount.end(), 1);
printf(" Number of trifaces = %d\n", numoftrifaces);
t_trifacelist.resize(3 * numoftrifaces);
t_trifacecent.resize(3 * numoftrifaces);
t_tristatus.resize(numoftrifaces, tristatus(1));
t_tri2tetlist.resize(2 * numoftrifaces);
thrust::exclusive_scan(trifacecount.begin(), trifacecount.end(), trifaceindices.begin());
#ifdef GQM3D_INIT_PROFILING
hipDeviceSynchronize();
tv[1] = clock();
printf(" initTristatus prepare vector time = %f\n", (REAL)(tv[1] - tv[0]));
tv[0] = tv[1];
#endif
// Append triface lists
kernelAppendInitialTrifaces << <numberofblocks, BLOCK_SIZE >> > (
thrust::raw_pointer_cast(&trifacecount[0]),
thrust::raw_pointer_cast(&trifaceindices[0]),
thrust::raw_pointer_cast(&trifaceipt[0]),
thrust::raw_pointer_cast(&t_pointlist[0]),
thrust::raw_pointer_cast(&t_pointtypelist[0]),
thrust::raw_pointer_cast(&t_weightlist[0]),
thrust::raw_pointer_cast(&t_trifacelist[0]),
thrust::raw_pointer_cast(&t_trifacecent[0]),
thrust::raw_pointer_cast(&t_tristatus[0]),
thrust::raw_pointer_cast(&t_tri2tetlist[0]),
thrust::raw_pointer_cast(&t_tetlist[0]),
thrust::raw_pointer_cast(&t_neighborlist[0]),
thrust::raw_pointer_cast(&t_tet2trilist[0]),
criteria->facet_angle,
criteria->facet_size,
criteria->facet_distance,
numoftets
);
#ifdef GQM3D_INIT_PROFILING
hipDeviceSynchronize();
tv[1] = clock();
printf(" kernelAppendInitialTrifaces time = %f\n", (REAL)(tv[1] - tv[0]));
#endif
int numofbadfacet = thrust::count_if(t_tristatus.begin(), t_tristatus.end(), isBadTri());
printf(" Number of bad trifaces = %d\n", numofbadfacet);
}
void initTetstatus(
IntD& t_aabbnodeleft,
IntD& t_aabbnoderight,
RealD& t_aabbnodebbs,
RealD& t_aabbpmcoord,
RealD& t_aabbpmbbs,
RealD& t_pointlist,
RealD& t_weightlist,
IntD& t_tetlist,
TetStatusD& t_tetstatus,
MESHCR* criteria,
REAL aabb_diglen,
int numoftets
)
{
#ifdef GQM3D_INIT_PROFILING
clock_t tv[2];
tv[0] = clock();
#endif
// Init tet status
int numberofblocks = (ceil)((float)numoftets / BLOCK_SIZE);
kernelInitTetStatus << <numberofblocks, BLOCK_SIZE >> > (
thrust::raw_pointer_cast(&t_aabbnodeleft[0]),
thrust::raw_pointer_cast(&t_aabbnoderight[0]),
thrust::raw_pointer_cast(&t_aabbnodebbs[0]),
thrust::raw_pointer_cast(&t_aabbpmcoord[0]),
thrust::raw_pointer_cast(&t_aabbpmbbs[0]),
thrust::raw_pointer_cast(&t_pointlist[0]),
thrust::raw_pointer_cast(&t_weightlist[0]),
thrust::raw_pointer_cast(&t_tetlist[0]),
thrust::raw_pointer_cast(&t_tetstatus[0]),
criteria->cell_radius_edge_ratio,
criteria->cell_size,
aabb_diglen,
numoftets
);
#ifdef GQM3D_INIT_PROFILING
hipDeviceSynchronize();
tv[1] = clock();
printf(" kernelInitTetStatus time = %f\n", (REAL)(tv[1] - tv[0]));
int numofcells = thrust::count_if(t_tetstatus.begin(), t_tetstatus.end(), isTetInDomain());
printf(" Number of tets in domain = %d\n", numofcells);
int numofbadcells = thrust::count_if(t_tetstatus.begin(), t_tetstatus.end(), isBadTet());
printf(" Number of bad tets in domain = %d\n", numofbadcells);
#endif
}
void initTetstatus2(
IntD& t_aabbnodeleft,
IntD& t_aabbnoderight,
RealD& t_aabbnodebbs,
RealD& t_aabbpmcoord,
RealD& t_aabbpmbbs,
RealD& t_pointlist,
RealD& t_weightlist,
IntD& t_tetlist,
TetStatusD& t_tetstatus,
MESHCR* criteria,
MESHIO* inputmesh,
int numoftets
)
{
//gpuMemoryCheck();
#ifdef GQM3D_INIT_PROFILING
clock_t tv[2];
tv[0] = clock();
#endif
int curhandlesize;
TetHandleD::iterator last_iterator;
IntD t_domaincount(numoftets, 0);// counter for in/out domain test
TetHandleD t_domainhandle;
RealD t_domainsegment;
IntD t_domainthreadlist;
IntD t_domainnode;
int numberofthreads;
int numberofblocks;
int basenum = 256;
int winsize = 3000 * basenum;
double free_mb;
getFreeMemory(free_mb);
#ifdef GQM3D_INIT_DEBUG
printf("free_mb = %lf\n", free_mb);
#endif
int maxreserve = 3 * 100 * 1000 * 1000;
int reservesize = free_mb * 1024 * 1024 / 3 / sizeof(tethandle);
if (reservesize > maxreserve)
reservesize = maxreserve;
#ifdef GQM3D_INIT_DEBUG
printf("reservesize = %d\n", reservesize);
#endif
try
{
t_domainhandle.reserve(reservesize);
t_domainnode.reserve(reservesize);
}
catch (thrust::system_error &e)
{
// output an error message and exit
std::cerr << "Error: " << e.what() << std::endl;
exit(-1);
}
//gpuMemoryCheck();
int offset = 0;
while (true)
{
numberofthreads = numoftets - offset;
if (numberofthreads > winsize)
numberofthreads = winsize;
else if (numberofthreads <= 0)
break;
#ifdef GQM3D_INIT_DEBUG
printf("numberofthreads = %d, offset = %d, winsize = %d\n", numberofthreads, offset, winsize);
#endif
numberofblocks = (ceil)((float)numberofthreads / BLOCK_SIZE);
t_domainhandle.resize(numberofthreads);
thrust::fill(t_domainhandle.begin(), t_domainhandle.begin() + numberofthreads, tethandle(-1, 11));
kernelInitDomainHandle_Tet << <numberofblocks, BLOCK_SIZE >> > (
thrust::raw_pointer_cast(&t_domainhandle[0]),
thrust::raw_pointer_cast(&t_tetlist[0]),
thrust::raw_pointer_cast(&t_tetstatus[0]),
offset,
numberofthreads
);
#ifdef GQM3D_INIT_DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
last_iterator =
thrust::remove_if(t_domainhandle.begin(), t_domainhandle.begin() + numberofthreads, isInvalidTetHandle());
curhandlesize = thrust::distance(t_domainhandle.begin(), last_iterator);
#ifdef GQM3D_INIT_DEBUG
printf("curhandlesize = %d\n", curhandlesize);
#endif
try
{
t_domainsegment.resize(6 * curhandlesize);
t_domainthreadlist.resize(curhandlesize); // thread indice list to store new tet thread indice
}
catch (thrust::system_error &e)
{
// output an error message and exit
std::cerr << "Error: " << e.what() << std::endl;
exit(-1);
}
numberofthreads = curhandlesize;
numberofblocks = (ceil)((float)numberofthreads / BLOCK_SIZE);
kernelInitDomainSegment_Tet << <numberofblocks, BLOCK_SIZE >> > (
thrust::raw_pointer_cast(&t_aabbnodebbs[0]),
thrust::raw_pointer_cast(&t_aabbpmcoord[0]),
thrust::raw_pointer_cast(&t_aabbpmbbs[0]),
thrust::raw_pointer_cast(&t_domainhandle[0]),
thrust::raw_pointer_cast(&t_domainsegment[0]),
thrust::raw_pointer_cast(&t_domainthreadlist[0]),
thrust::raw_pointer_cast(&t_pointlist[0]),
thrust::raw_pointer_cast(&t_weightlist[0]),
thrust::raw_pointer_cast(&t_tetlist[0]),
inputmesh->aabb_diglen,
numberofthreads
);
#ifdef GQM3D_INIT_DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
last_iterator = // remove degenerate cases and fast check cases
thrust::remove_if(t_domainhandle.begin(), t_domainhandle.begin() + numberofthreads, isInvalidTetHandle());
curhandlesize = thrust::distance(t_domainhandle.begin(), last_iterator);
#ifdef GQM3D_INIT_DEBUG
printf("curhandlesize = %d\n", curhandlesize);
#endif
if (curhandlesize == 0)
return;
t_domainnode.resize(t_domainhandle.size());
thrust::fill(t_domainnode.begin(), t_domainnode.begin() + curhandlesize, 1);
numberofthreads = curhandlesize;
numberofblocks = (ceil)((float)numberofthreads / BLOCK_SIZE);
int numofemptyhandleslot;
int domainexpanditer = 0;
bool halfwinsize = false;
while (true)
{
#ifdef GQM3D_INIT_DEBUG
printf("Domain search iteration = %d, curhandlesize = %d\n", domainexpanditer, curhandlesize);
#endif
kernelDomainSegmentAndBoxCheck << <numberofblocks, BLOCK_SIZE >> > (
thrust::raw_pointer_cast(&t_aabbnodebbs[0]),
thrust::raw_pointer_cast(&t_domainhandle[0]),
thrust::raw_pointer_cast(&t_domainnode[0]),
thrust::raw_pointer_cast(&t_domainsegment[0]),
numberofthreads
);
// remove the handles that do not intersect with node bounding boxes
typedef thrust::zip_iterator<thrust::tuple<TetHandleD::iterator, IntD::iterator>> ZipIterator;
ZipIterator first_iterator = thrust::make_zip_iterator(thrust::make_tuple(t_domainhandle.begin(), t_domainnode.begin()));
auto last_iterator =
thrust::remove_if(first_iterator,
thrust::make_zip_iterator(thrust::make_tuple(t_domainhandle.begin() + numberofthreads, t_domainnode.begin() + numberofthreads)),
isInvalidDomainTuple());
curhandlesize = thrust::distance(first_iterator, last_iterator);
if (curhandlesize == 0)
break;
if (domainexpanditer == inputmesh->aabb_level)
break;
// prepare enough space for new handles and nodes
numofemptyhandleslot = t_domainhandle.size() - curhandlesize;
if (numofemptyhandleslot < curhandlesize)
{
try
{
if (2 * curhandlesize > reservesize) // possible to run out of memory
{
// half the window size
#ifdef GQM3D_INIT_DEBUG
printf("half the window size\n");
#endif
winsize /= 2;
halfwinsize = true;
break;
}
t_domainhandle.resize(2 * curhandlesize);
t_domainnode.resize(2 * curhandlesize);
}
catch (thrust::system_error &e)
{
// output an error message and exit
std::cerr << "Error: " << e.what() << std::endl;
exit(-1);
}
}
thrust::fill(t_domainhandle.begin() + curhandlesize, t_domainhandle.begin() + 2 * curhandlesize, tethandle(-1, 11));
thrust::fill(t_domainnode.begin() + curhandlesize, t_domainnode.begin() + 2 * curhandlesize, 0);
numberofthreads = curhandlesize;
numberofblocks = (ceil)((float)numberofthreads / BLOCK_SIZE);
kernelDomainHandleAppend << <numberofblocks, BLOCK_SIZE >> > (
thrust::raw_pointer_cast(&t_aabbnodeleft[0]),
thrust::raw_pointer_cast(&t_aabbnoderight[0]),
thrust::raw_pointer_cast(&t_domainhandle[0]),
thrust::raw_pointer_cast(&t_domainnode[0]),
numberofthreads
);
curhandlesize = 2 * curhandlesize;
numberofthreads = curhandlesize;
numberofblocks = (ceil)((float)numberofthreads / BLOCK_SIZE);
domainexpanditer++;
}
if (halfwinsize)
continue;
numberofthreads = curhandlesize;
numberofblocks = (ceil)((float)numberofthreads / BLOCK_SIZE);
kernelDomainSegmentAndPrimitiveCheck_Tet << <numberofblocks, BLOCK_SIZE >> > (
thrust::raw_pointer_cast(&t_aabbpmcoord[0]),
thrust::raw_pointer_cast(&t_aabbpmbbs[0]),
thrust::raw_pointer_cast(&t_domainhandle[0]),
thrust::raw_pointer_cast(&t_domainnode[0]),
thrust::raw_pointer_cast(&t_domainsegment[0]),
thrust::raw_pointer_cast(&t_domaincount[0]),
thrust::raw_pointer_cast(&t_domainthreadlist[0]),
thrust::raw_pointer_cast(&t_pointlist[0]),
thrust::raw_pointer_cast(&t_weightlist[0]),
thrust::raw_pointer_cast(&t_tetlist[0]),
numberofthreads
);
#ifdef GQM3D_INIT_DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
numberofthreads = numoftets - offset;
if (numberofthreads > winsize)
numberofthreads = winsize;
numberofblocks = (ceil)((float)numberofthreads / BLOCK_SIZE);
kernelSetTetStatus << <numberofblocks, BLOCK_SIZE >> > (
thrust::raw_pointer_cast(&t_domaincount[0]),
thrust::raw_pointer_cast(&t_pointlist[0]),
thrust::raw_pointer_cast(&t_weightlist[0]),
thrust::raw_pointer_cast(&t_tetlist[0]),
thrust::raw_pointer_cast(&t_tetstatus[0]),
criteria->cell_radius_edge_ratio,
criteria->cell_size,
offset,
numberofthreads
);
#ifdef GQM3D_INIT_DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
offset += winsize;
}
#ifdef GQM3D_INIT_PROFILING
hipDeviceSynchronize();
tv[1] = clock();
printf(" Tet in/out domain test time = %f\n", (REAL)(tv[1] - tv[0]));
#endif
} | dea2c1a31a082be6ed00f2c9868d7373b304a401.cu | #include "CudaInitStatus.h"
#include "CudaMesh.h"
#include "CudaAnimation.h"
#include <time.h>
void initTriTetQuality(
RealD& t_pointlist,
PointTypeD& t_pointtypelist,
RealD& t_weightlist,
IntD& t_trifacelist,
RealD& t_trifacecent,
TriStatusD& t_tristatus,
IntD& t_tetlist,
TetStatusD& t_tetstatus,
MESHCR* criteria,
int numoftrifaces,
int numoftets
)
{
// Init triface quality
int numberofblocks = (ceil)((float)numoftrifaces / BLOCK_SIZE);
kernelInitTriQuality << <numberofblocks, BLOCK_SIZE >> >(
thrust::raw_pointer_cast(&t_pointlist[0]),
thrust::raw_pointer_cast(&t_pointtypelist[0]),
thrust::raw_pointer_cast(&t_weightlist[0]),
thrust::raw_pointer_cast(&t_trifacelist[0]),
thrust::raw_pointer_cast(&t_trifacecent[0]),
thrust::raw_pointer_cast(&t_tristatus[0]),
criteria->facet_angle,
criteria->facet_size,
criteria->facet_distance,
numoftrifaces
);
// Init tet quality
numberofblocks = (ceil)((float)numoftets / BLOCK_SIZE);
kernelInitTetQuality << <numberofblocks, BLOCK_SIZE >> > (
thrust::raw_pointer_cast(&t_pointlist[0]),
thrust::raw_pointer_cast(&t_weightlist[0]),
thrust::raw_pointer_cast(&t_tetlist[0]),
thrust::raw_pointer_cast(&t_tetstatus[0]),
criteria->cell_radius_edge_ratio,
criteria->cell_size,
numoftets
);
}
// Init triface and its neighbors
void initTristatus(
IntD& t_aabbnodeleft,
IntD& t_aabbnoderight,
RealD& t_aabbnodebbs,
RealD& t_aabbpmcoord,
RealD& t_aabbpmbbs,
RealD& t_pointlist,
PointTypeD& t_pointtypelist,
RealD& t_weightlist,
IntD& t_trifacelist,
RealD& t_trifacecent,
TriStatusD& t_tristatus,
TetHandleD& t_tri2tetlist,
IntD& t_tetlist,
TriHandleD& t_tet2trilist,
TetHandleD& t_neighborlist,
MESHCR* criteria,
int& numoftrifaces,
int numoftets
)
{
#ifdef GQM3D_INIT_PROFILING
clock_t tv[2];
tv[0] = clock();
#endif
IntD trifacecount(4 * numoftets, 0);
IntD trifaceindices(4 * numoftets, 0);
RealD trifaceipt(4 * 3 * numoftets);
#ifdef GQM3D_INIT_PROFILING
cudaDeviceSynchronize();
tv[1] = clock();
printf(" initTristatus vector initialization time = %f\n", (REAL)(tv[1] - tv[0]));
tv[0] = tv[1];
#endif
// Mark initial trifaces
int numberofblocks = (ceil)((float)numoftets / BLOCK_SIZE);
kernelMarkInitialTrifaces << <numberofblocks, BLOCK_SIZE >> >(
thrust::raw_pointer_cast(&t_aabbnodeleft[0]),
thrust::raw_pointer_cast(&t_aabbnoderight[0]),
thrust::raw_pointer_cast(&t_aabbnodebbs[0]),
thrust::raw_pointer_cast(&t_aabbpmcoord[0]),
thrust::raw_pointer_cast(&t_aabbpmbbs[0]),
thrust::raw_pointer_cast(&t_pointlist[0]),
thrust::raw_pointer_cast(&t_weightlist[0]),
thrust::raw_pointer_cast(&trifacecount[0]),
thrust::raw_pointer_cast(&trifaceipt[0]),
thrust::raw_pointer_cast(&t_tetlist[0]),
thrust::raw_pointer_cast(&t_neighborlist[0]),
numoftets
);
#ifdef GQM3D_INIT_PROFILING
cudaDeviceSynchronize();
tv[1] = clock();
printf(" kernelMarkInitialTrifaces time = %f\n", (REAL)(tv[1] - tv[0]));
tv[0] = tv[1];
#endif
// Set up memory
numoftrifaces = thrust::count(trifacecount.begin(), trifacecount.end(), 1);
printf(" Number of trifaces = %d\n", numoftrifaces);
t_trifacelist.resize(3 * numoftrifaces);
t_trifacecent.resize(3 * numoftrifaces);
t_tristatus.resize(numoftrifaces, tristatus(1));
t_tri2tetlist.resize(2 * numoftrifaces);
thrust::exclusive_scan(trifacecount.begin(), trifacecount.end(), trifaceindices.begin());
#ifdef GQM3D_INIT_PROFILING
cudaDeviceSynchronize();
tv[1] = clock();
printf(" initTristatus prepare vector time = %f\n", (REAL)(tv[1] - tv[0]));
tv[0] = tv[1];
#endif
// Append triface lists
kernelAppendInitialTrifaces << <numberofblocks, BLOCK_SIZE >> > (
thrust::raw_pointer_cast(&trifacecount[0]),
thrust::raw_pointer_cast(&trifaceindices[0]),
thrust::raw_pointer_cast(&trifaceipt[0]),
thrust::raw_pointer_cast(&t_pointlist[0]),
thrust::raw_pointer_cast(&t_pointtypelist[0]),
thrust::raw_pointer_cast(&t_weightlist[0]),
thrust::raw_pointer_cast(&t_trifacelist[0]),
thrust::raw_pointer_cast(&t_trifacecent[0]),
thrust::raw_pointer_cast(&t_tristatus[0]),
thrust::raw_pointer_cast(&t_tri2tetlist[0]),
thrust::raw_pointer_cast(&t_tetlist[0]),
thrust::raw_pointer_cast(&t_neighborlist[0]),
thrust::raw_pointer_cast(&t_tet2trilist[0]),
criteria->facet_angle,
criteria->facet_size,
criteria->facet_distance,
numoftets
);
#ifdef GQM3D_INIT_PROFILING
cudaDeviceSynchronize();
tv[1] = clock();
printf(" kernelAppendInitialTrifaces time = %f\n", (REAL)(tv[1] - tv[0]));
#endif
int numofbadfacet = thrust::count_if(t_tristatus.begin(), t_tristatus.end(), isBadTri());
printf(" Number of bad trifaces = %d\n", numofbadfacet);
}
void initTetstatus(
IntD& t_aabbnodeleft,
IntD& t_aabbnoderight,
RealD& t_aabbnodebbs,
RealD& t_aabbpmcoord,
RealD& t_aabbpmbbs,
RealD& t_pointlist,
RealD& t_weightlist,
IntD& t_tetlist,
TetStatusD& t_tetstatus,
MESHCR* criteria,
REAL aabb_diglen,
int numoftets
)
{
#ifdef GQM3D_INIT_PROFILING
clock_t tv[2];
tv[0] = clock();
#endif
// Init tet status
int numberofblocks = (ceil)((float)numoftets / BLOCK_SIZE);
kernelInitTetStatus << <numberofblocks, BLOCK_SIZE >> > (
thrust::raw_pointer_cast(&t_aabbnodeleft[0]),
thrust::raw_pointer_cast(&t_aabbnoderight[0]),
thrust::raw_pointer_cast(&t_aabbnodebbs[0]),
thrust::raw_pointer_cast(&t_aabbpmcoord[0]),
thrust::raw_pointer_cast(&t_aabbpmbbs[0]),
thrust::raw_pointer_cast(&t_pointlist[0]),
thrust::raw_pointer_cast(&t_weightlist[0]),
thrust::raw_pointer_cast(&t_tetlist[0]),
thrust::raw_pointer_cast(&t_tetstatus[0]),
criteria->cell_radius_edge_ratio,
criteria->cell_size,
aabb_diglen,
numoftets
);
#ifdef GQM3D_INIT_PROFILING
cudaDeviceSynchronize();
tv[1] = clock();
printf(" kernelInitTetStatus time = %f\n", (REAL)(tv[1] - tv[0]));
int numofcells = thrust::count_if(t_tetstatus.begin(), t_tetstatus.end(), isTetInDomain());
printf(" Number of tets in domain = %d\n", numofcells);
int numofbadcells = thrust::count_if(t_tetstatus.begin(), t_tetstatus.end(), isBadTet());
printf(" Number of bad tets in domain = %d\n", numofbadcells);
#endif
}
void initTetstatus2(
IntD& t_aabbnodeleft,
IntD& t_aabbnoderight,
RealD& t_aabbnodebbs,
RealD& t_aabbpmcoord,
RealD& t_aabbpmbbs,
RealD& t_pointlist,
RealD& t_weightlist,
IntD& t_tetlist,
TetStatusD& t_tetstatus,
MESHCR* criteria,
MESHIO* inputmesh,
int numoftets
)
{
//gpuMemoryCheck();
#ifdef GQM3D_INIT_PROFILING
clock_t tv[2];
tv[0] = clock();
#endif
int curhandlesize;
TetHandleD::iterator last_iterator;
IntD t_domaincount(numoftets, 0);// counter for in/out domain test
TetHandleD t_domainhandle;
RealD t_domainsegment;
IntD t_domainthreadlist;
IntD t_domainnode;
int numberofthreads;
int numberofblocks;
int basenum = 256;
int winsize = 3000 * basenum;
double free_mb;
getFreeMemory(free_mb);
#ifdef GQM3D_INIT_DEBUG
printf("free_mb = %lf\n", free_mb);
#endif
int maxreserve = 3 * 100 * 1000 * 1000;
int reservesize = free_mb * 1024 * 1024 / 3 / sizeof(tethandle);
if (reservesize > maxreserve)
reservesize = maxreserve;
#ifdef GQM3D_INIT_DEBUG
printf("reservesize = %d\n", reservesize);
#endif
try
{
t_domainhandle.reserve(reservesize);
t_domainnode.reserve(reservesize);
}
catch (thrust::system_error &e)
{
// output an error message and exit
std::cerr << "Error: " << e.what() << std::endl;
exit(-1);
}
//gpuMemoryCheck();
int offset = 0;
while (true)
{
numberofthreads = numoftets - offset;
if (numberofthreads > winsize)
numberofthreads = winsize;
else if (numberofthreads <= 0)
break;
#ifdef GQM3D_INIT_DEBUG
printf("numberofthreads = %d, offset = %d, winsize = %d\n", numberofthreads, offset, winsize);
#endif
numberofblocks = (ceil)((float)numberofthreads / BLOCK_SIZE);
t_domainhandle.resize(numberofthreads);
thrust::fill(t_domainhandle.begin(), t_domainhandle.begin() + numberofthreads, tethandle(-1, 11));
kernelInitDomainHandle_Tet << <numberofblocks, BLOCK_SIZE >> > (
thrust::raw_pointer_cast(&t_domainhandle[0]),
thrust::raw_pointer_cast(&t_tetlist[0]),
thrust::raw_pointer_cast(&t_tetstatus[0]),
offset,
numberofthreads
);
#ifdef GQM3D_INIT_DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
last_iterator =
thrust::remove_if(t_domainhandle.begin(), t_domainhandle.begin() + numberofthreads, isInvalidTetHandle());
curhandlesize = thrust::distance(t_domainhandle.begin(), last_iterator);
#ifdef GQM3D_INIT_DEBUG
printf("curhandlesize = %d\n", curhandlesize);
#endif
try
{
t_domainsegment.resize(6 * curhandlesize);
t_domainthreadlist.resize(curhandlesize); // thread indice list to store new tet thread indice
}
catch (thrust::system_error &e)
{
// output an error message and exit
std::cerr << "Error: " << e.what() << std::endl;
exit(-1);
}
numberofthreads = curhandlesize;
numberofblocks = (ceil)((float)numberofthreads / BLOCK_SIZE);
kernelInitDomainSegment_Tet << <numberofblocks, BLOCK_SIZE >> > (
thrust::raw_pointer_cast(&t_aabbnodebbs[0]),
thrust::raw_pointer_cast(&t_aabbpmcoord[0]),
thrust::raw_pointer_cast(&t_aabbpmbbs[0]),
thrust::raw_pointer_cast(&t_domainhandle[0]),
thrust::raw_pointer_cast(&t_domainsegment[0]),
thrust::raw_pointer_cast(&t_domainthreadlist[0]),
thrust::raw_pointer_cast(&t_pointlist[0]),
thrust::raw_pointer_cast(&t_weightlist[0]),
thrust::raw_pointer_cast(&t_tetlist[0]),
inputmesh->aabb_diglen,
numberofthreads
);
#ifdef GQM3D_INIT_DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
last_iterator = // remove degenerate cases and fast check cases
thrust::remove_if(t_domainhandle.begin(), t_domainhandle.begin() + numberofthreads, isInvalidTetHandle());
curhandlesize = thrust::distance(t_domainhandle.begin(), last_iterator);
#ifdef GQM3D_INIT_DEBUG
printf("curhandlesize = %d\n", curhandlesize);
#endif
if (curhandlesize == 0)
return;
t_domainnode.resize(t_domainhandle.size());
thrust::fill(t_domainnode.begin(), t_domainnode.begin() + curhandlesize, 1);
numberofthreads = curhandlesize;
numberofblocks = (ceil)((float)numberofthreads / BLOCK_SIZE);
int numofemptyhandleslot;
int domainexpanditer = 0;
bool halfwinsize = false;
while (true)
{
#ifdef GQM3D_INIT_DEBUG
printf("Domain search iteration = %d, curhandlesize = %d\n", domainexpanditer, curhandlesize);
#endif
kernelDomainSegmentAndBoxCheck << <numberofblocks, BLOCK_SIZE >> > (
thrust::raw_pointer_cast(&t_aabbnodebbs[0]),
thrust::raw_pointer_cast(&t_domainhandle[0]),
thrust::raw_pointer_cast(&t_domainnode[0]),
thrust::raw_pointer_cast(&t_domainsegment[0]),
numberofthreads
);
// remove the handles that do not intersect with node bounding boxes
typedef thrust::zip_iterator<thrust::tuple<TetHandleD::iterator, IntD::iterator>> ZipIterator;
ZipIterator first_iterator = thrust::make_zip_iterator(thrust::make_tuple(t_domainhandle.begin(), t_domainnode.begin()));
auto last_iterator =
thrust::remove_if(first_iterator,
thrust::make_zip_iterator(thrust::make_tuple(t_domainhandle.begin() + numberofthreads, t_domainnode.begin() + numberofthreads)),
isInvalidDomainTuple());
curhandlesize = thrust::distance(first_iterator, last_iterator);
if (curhandlesize == 0)
break;
if (domainexpanditer == inputmesh->aabb_level)
break;
// prepare enough space for new handles and nodes
numofemptyhandleslot = t_domainhandle.size() - curhandlesize;
if (numofemptyhandleslot < curhandlesize)
{
try
{
if (2 * curhandlesize > reservesize) // possible to run out of memory
{
// half the window size
#ifdef GQM3D_INIT_DEBUG
printf("half the window size\n");
#endif
winsize /= 2;
halfwinsize = true;
break;
}
t_domainhandle.resize(2 * curhandlesize);
t_domainnode.resize(2 * curhandlesize);
}
catch (thrust::system_error &e)
{
// output an error message and exit
std::cerr << "Error: " << e.what() << std::endl;
exit(-1);
}
}
thrust::fill(t_domainhandle.begin() + curhandlesize, t_domainhandle.begin() + 2 * curhandlesize, tethandle(-1, 11));
thrust::fill(t_domainnode.begin() + curhandlesize, t_domainnode.begin() + 2 * curhandlesize, 0);
numberofthreads = curhandlesize;
numberofblocks = (ceil)((float)numberofthreads / BLOCK_SIZE);
kernelDomainHandleAppend << <numberofblocks, BLOCK_SIZE >> > (
thrust::raw_pointer_cast(&t_aabbnodeleft[0]),
thrust::raw_pointer_cast(&t_aabbnoderight[0]),
thrust::raw_pointer_cast(&t_domainhandle[0]),
thrust::raw_pointer_cast(&t_domainnode[0]),
numberofthreads
);
curhandlesize = 2 * curhandlesize;
numberofthreads = curhandlesize;
numberofblocks = (ceil)((float)numberofthreads / BLOCK_SIZE);
domainexpanditer++;
}
if (halfwinsize)
continue;
numberofthreads = curhandlesize;
numberofblocks = (ceil)((float)numberofthreads / BLOCK_SIZE);
kernelDomainSegmentAndPrimitiveCheck_Tet << <numberofblocks, BLOCK_SIZE >> > (
thrust::raw_pointer_cast(&t_aabbpmcoord[0]),
thrust::raw_pointer_cast(&t_aabbpmbbs[0]),
thrust::raw_pointer_cast(&t_domainhandle[0]),
thrust::raw_pointer_cast(&t_domainnode[0]),
thrust::raw_pointer_cast(&t_domainsegment[0]),
thrust::raw_pointer_cast(&t_domaincount[0]),
thrust::raw_pointer_cast(&t_domainthreadlist[0]),
thrust::raw_pointer_cast(&t_pointlist[0]),
thrust::raw_pointer_cast(&t_weightlist[0]),
thrust::raw_pointer_cast(&t_tetlist[0]),
numberofthreads
);
#ifdef GQM3D_INIT_DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
numberofthreads = numoftets - offset;
if (numberofthreads > winsize)
numberofthreads = winsize;
numberofblocks = (ceil)((float)numberofthreads / BLOCK_SIZE);
kernelSetTetStatus << <numberofblocks, BLOCK_SIZE >> > (
thrust::raw_pointer_cast(&t_domaincount[0]),
thrust::raw_pointer_cast(&t_pointlist[0]),
thrust::raw_pointer_cast(&t_weightlist[0]),
thrust::raw_pointer_cast(&t_tetlist[0]),
thrust::raw_pointer_cast(&t_tetstatus[0]),
criteria->cell_radius_edge_ratio,
criteria->cell_size,
offset,
numberofthreads
);
#ifdef GQM3D_INIT_DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
offset += winsize;
}
#ifdef GQM3D_INIT_PROFILING
cudaDeviceSynchronize();
tv[1] = clock();
printf(" Tet in/out domain test time = %f\n", (REAL)(tv[1] - tv[0]));
#endif
} |
b7ca86e7666589c17b16783cdeaed19b833262f2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ScanArray.cu
//
#include "ScanArray.h"
#include "OperationFunctor.h"
#include <iostream>
#include <stdio.h>
using namespace std;
#define BLOCK_SIZE 1024
// NUM_BANKS
// bank
#define NUM_BANKS 16
// LOG_NUM_BANKS
// bank
#define LOG_NUM_BANKS 4
// Kernel : _scanNaiveKer
// scan log(n)
// n * (log(n) - 1) 2 * n
template < class T, class Operation >
__global__ void // Kernel
_scanNaiveKer(
T *outarray, //
T *inarray, //
T *blocksum, //
//
int n, //
Operation op, //
bool backward //
);
// Kernel : _scanWorkEfficientKer
// scan O(log(n))
// O(n) n balanced tree
// Blelloch1990 "Prefix Sums and Their Applications"
// http://www.cs.cmu.edu/~blelloch/papers/Ble93.pdf
// Prins and Chatterjee PRAM course notes
// https://www.cs.unc.edu/~prins/Classes/633/Handouts/pram.pdf
template < class T, class Operation >
__global__ void // Kernel
_scanWorkEfficientKer(
T *outarray, //
T *inarray, //
T *blocksum, //
//
int n, //
Operation op, //
bool backward //
);
// Kernel : _scanOptKer
// scan O(log(n))
// O(n)
// balanced tree
// Blelloch1990 "Prefix Sums and Their Applications"
// http://www.cs.cmu.edu/~blelloch/papers/Ble93.pdf
// Prins and Chatterjee PRAM course notes
// https://www.cs.unc.edu/~prins/Classes/633/Handouts/pram.pdf
template < class T, class Operation >
__global__ void // Kernel
_scanOptKer(
T *outarray, //
T *inarray, //
T *blocksum, //
//
int n, //
Operation op, //
bool backward //
);
// Kernel : _scanBetterKer
// scan O(log(n))
// O(n) n + n / NUM_BANKS
// balanced tree
// Blelloch1990 "Prefix Sums and Their Applications"
// http://www.cs.cmu.edu/~blelloch/papers/Ble93.pdf
// Prins and Chatterjee PRAM course notes
// https://www.cs.unc.edu/~prins/Classes/633/Handouts/pram.pdf
template < class T, class Operation >
__global__ void // Kernel
_scanBetterKer(
T *outarray, //
T *inarray, //
T *blocksum, //
//
int n, //
Operation op, //
bool backward //
);
// scanComputeGoldCPU inclusive
//
template < class T, class Operation >
__host__ int //
// NO_ERROR
scanComputeGold(
T *inarray, //
T *reference, //
const unsigned int len, //
Operation op, //
bool backward //
);
// Kernel : _addBackKer
//
//
template < class T, class Operation >
__global__ void // Kernel
_addBackKer(
T *array, //
T *lastelemarray, //
//
int n, //
int packnum, //
//
Operation op, //
bool backward //
);
// Kernel : _scanNaiveKer
template < class T, class Operation >
__global__ void _scanNaiveKer(T *outarray, T *inarray, T *blocksum, int n,
Operation op, bool backward)
{
//
extern __shared__ unsigned char sharedmemo[];
//
T *sharedmem = (T *)sharedmemo;
// threadIdx.x
int idx = blockIdx.x * blockDim.x + threadIdx.x;
//
// pout pin
T *pout = sharedmem;
T *pin = sharedmem + blockDim.x;
// 0
if (idx < n)
pout[threadIdx.x] = inarray[idx];
else
pout[threadIdx.x] = op.identity();
//
// 2 scan
//
if (!backward) {
for (int offset = 1; offset < blockDim.x; offset *= 2) {
// pout pin
//
T *ptemp;
ptemp = pout;
pout = pin;
pin = ptemp;
__syncthreads();
// scan
// double buffer
pout[threadIdx.x] = pin[threadIdx.x];
//
//
if (threadIdx.x >= offset)
pout[threadIdx.x] = op(pout[threadIdx.x],
pin[threadIdx.x - offset]);
}
//
__syncthreads();
// n
if (idx >= n)
return;
//
outarray[idx] = pout[threadIdx.x];
//
if (blocksum == NULL)
return;
//
//
if (threadIdx.x == blockDim.x - 1 &&
blockIdx.x < gridDim.x - 1) {
blocksum[blockIdx.x] = pout[threadIdx.x];
}
} else {
for (int offset = 1; offset < blockDim.x; offset *= 2) {
// pout pin
//
T *ptemp;
ptemp = pout;
pout = pin;
pin = ptemp;
__syncthreads();
// scan
// double buffer
pout[threadIdx.x] = pin[threadIdx.x];
//
//
if (threadIdx.x + offset < blockDim.x)
pout[threadIdx.x] = op(pin[threadIdx.x],
pin[threadIdx.x + offset]);
}
//
__syncthreads();
// n
if (idx >= n)
return;
//
outarray[idx] = pout[threadIdx.x];
//
if (blocksum == NULL)
return;
//
//
if (threadIdx.x == 0 && blockIdx.x > 0) {
blocksum[blockIdx.x - 1] = pout[threadIdx.x];
}
}
}
// Kernel : _scanWorkEfficientKer
template < class T, class Operation >
__global__ void _scanWorkEfficientKer(T *outarray, T *inarray,
T *blocksum, int n, Operation op,
bool backward)
{
//
extern __shared__ unsigned char sharedmemo[];
//
T *sharedmem = (T *)sharedmemo;
//
int baseidx = 2 * blockIdx.x * blockDim.x;
int inidx = 2 * threadIdx.x;
int idx = baseidx + inidx;
// offset
int offset = 1;
//
int length = blockDim.x * 2;
//
sharedmem[inidx] = (idx < n) ? inarray[idx] : op.identity();
sharedmem[inidx + 1] = (idx + 1 < n) ? inarray[idx + 1] : op.identity();
// scan
if (!backward) {
for (int d = blockDim.x; d > 0; d >>= 1) {
//
__syncthreads();
// d
if (threadIdx.x < d) {
//
int ai = offset * (inidx + 1) - 1;
int bi = offset * (inidx + 2) - 1;
// scan
//
sharedmem[bi] = op(sharedmem[bi], sharedmem[ai]);
}
// 2
offset *= 2;
}
//
if (threadIdx.x == 0) {
//
sharedmem[length - 1] = op.identity();
}
// scan
//
for (int d = 1; d < length; d *= 2) {
// 2
offset >>= 1;
//
__syncthreads();
// d
if (threadIdx.x < d) {
//
int ai = offset * (inidx + 1) - 1;
int bi = offset * (inidx + 2) - 1;
// ai bi ai bi
T t = sharedmem[ai];
sharedmem[ai] = sharedmem[bi];
sharedmem[bi] = op(sharedmem[bi], t);
}
}
//
__syncthreads();
// n
if (idx >= n)
return;
//
outarray[idx] = sharedmem[inidx + 1];
// n
if (idx + 1 >= n)
return;
//
if ((inidx + 1) == (2 * blockDim.x - 1))
//
outarray[idx + 1] = op(sharedmem[inidx + 1], inarray[idx + 1]);
else
//
outarray[idx + 1] = sharedmem[inidx + 2];
//
if (blocksum == NULL)
return;
//
//
if (threadIdx.x == blockDim.x - 1 &&
blockIdx.x < gridDim.x - 1) {
blocksum[blockIdx.x] = outarray[idx + 1];
}
} else {
for (int d = blockDim.x; d > 0; d >>= 1) {
//
__syncthreads();
// d
if (threadIdx.x < d) {
//
int ai = offset * inidx;
int bi = offset * (inidx + 1);
// scan
//
sharedmem[ai] = op(sharedmem[bi], sharedmem[ai]);
}
// 2
offset *= 2;
}
//
if (threadIdx.x == 0) {
//
sharedmem[0] = op.identity();
}
// scan
//
for (int d = 1; d < length; d *= 2) {
// 2
offset >>= 1;
//
__syncthreads();
// d
if (threadIdx.x < d) {
//
int ai = offset * inidx;
int bi = offset * (inidx + 1);
// bi ai bi ai
T t = sharedmem[bi];
sharedmem[bi] = sharedmem[ai];
sharedmem[ai] = op(sharedmem[ai], t);
}
}
//
__syncthreads();
// n
if (idx >= n)
return;
//
if (inidx == 0)
//
outarray[idx] = op(sharedmem[inidx], inarray[idx]);
else
//
outarray[idx] = sharedmem[inidx - 1];
// n
if (idx + 1 < n) {
//
outarray[idx + 1] = sharedmem[inidx];
}
//
if (blocksum == NULL)
return;
//
//
if (threadIdx.x == 0 &&
blockIdx.x > 0) {
blocksum[blockIdx.x - 1] = outarray[idx];
}
}
}
// Kernel : _scanOptKer
template < class T, class Operation >
__global__ void _scanOptKer(T *outarray, T *inarray,
T *blocksum, int n, Operation op, bool backward)
{
//
extern __shared__ unsigned char sharedmemo[];
//
T *sharedmem = (T *)sharedmemo;
//
int baseidx = 2 * blockIdx.x * blockDim.x;
//
int length = blockDim.x * 2;
//
int ai = threadIdx.x;
int bi = threadIdx.x + blockDim.x;
//
int aindex = baseidx + ai;
int bindex = baseidx + bi;
//
sharedmem[ai] = (aindex < n) ? inarray[aindex] : op.identity();
sharedmem[bi] = (bindex < n) ? inarray[bindex] : op.identity();
// offset
int offset = 1;
if (!backward) {
// scan
for (int d = blockDim.x; d > 0; d >>= 1) {
//
__syncthreads();
// d
if (threadIdx.x < d) {
//
int ai = offset * (2 * threadIdx.x + 1) - 1;
int bi = offset * (2 * threadIdx.x + 2) - 1;
// scan
//
sharedmem[bi] = op(sharedmem[bi], sharedmem[ai]);
}
// 2
offset *= 2;
}
//
if (threadIdx.x == 0) {
int index = length - 1;
//
sharedmem[index] = op.identity();
}
// scan
//
for (int d = 1; d < length; d *= 2) {
// 2
offset /= 2;
//
__syncthreads();
// d
if (threadIdx.x < d) {
//
int ai = offset * (2 * threadIdx.x + 1) - 1;
int bi = offset * (2 * threadIdx.x + 2) - 1;
// ai bi ai bi
T t = sharedmem[ai];
sharedmem[ai] = sharedmem[bi];
sharedmem[bi] = op(sharedmem[bi], t);
}
}
//
__syncthreads();
// n
if (aindex >= n)
return;
//
outarray[aindex] = sharedmem[ai + 1];
// n
if (bindex >= n)
return;
//
if (bi == (2 * blockDim.x - 1))
//
outarray[bindex] = op(sharedmem[bi], inarray[bindex]);
else
//
outarray[bindex] = sharedmem[bi + 1];
//
if (blocksum == NULL)
return;
//
//
if (threadIdx.x == blockDim.x - 1 &&
blockIdx.x < gridDim.x - 1) {
blocksum[blockIdx.x] = outarray[bindex];
}
} else {
// scan
for (int d = blockDim.x; d > 0; d >>= 1) {
//
__syncthreads();
// d
if (threadIdx.x < d) {
//
int ai = offset * 2 * threadIdx.x;
int bi = offset * (2 * threadIdx.x + 1);
// scan
//
sharedmem[ai] = op(sharedmem[bi], sharedmem[ai]);
}
// 2
offset *= 2;
}
//
if (threadIdx.x == 0) {
int index = 0;
//
sharedmem[index] = op.identity();
}
// scan
//
for (int d = 1; d < length; d *= 2) {
// 2
offset /= 2;
//
__syncthreads();
// d
if (threadIdx.x < d) {
//
int ai = offset * 2 * threadIdx.x;
int bi = offset * (2 * threadIdx.x + 1);
// bi ai bi ai
T t = sharedmem[bi];
sharedmem[bi] = sharedmem[ai];
sharedmem[ai] = op(sharedmem[ai], t);
}
}
//
__syncthreads();
// n
if (aindex >= n)
return;
//
if (ai == 0)
//
outarray[aindex] = op(sharedmem[ai], inarray[aindex]);
else
//
outarray[aindex] = sharedmem[ai - 1];
// n
if (bindex < n) {
//
outarray[bindex] = sharedmem[bi - 1];
}
//
if (blocksum == NULL)
return;
//
//
if (threadIdx.x == 0 &&
blockIdx.x > 0) {
blocksum[blockIdx.x - 1] = outarray[aindex];
}
}
}
// CONFLICT_FREE_OFFSET
// bank conflicts
#ifdef ZERO_BANK_CONFLICTS
# define CONFLICT_FREE_OFFSET(index) \
(((index) >> LOG_NUM_BANKS) + ((index) >> (2 * LOG_NUM_BANKS)))
#else
# define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
#endif
// Kernel : _scanBetterKerBetter
template < class T, class Operation >
__global__ void _scanBetterKer(T *outarray, T *inarray,
T *blocksum, int n, Operation op, bool backward)
{
//
extern __shared__ unsigned char sharedmemo[];
//
T *sharedmem = (T *)sharedmemo;
//
int baseidx = 2 * blockIdx.x * blockDim.x;
int idx = threadIdx.x + blockDim.x;
//
int length = blockDim.x * 2;
// bankOffset
int ai = threadIdx.x + CONFLICT_FREE_OFFSET(threadIdx.x);
int bi = idx + CONFLICT_FREE_OFFSET(idx);
//
int aindex = baseidx + threadIdx.x;
int bindex = aindex + blockDim.x;
//
sharedmem[ai] = (aindex < n) ? inarray[aindex] : op.identity();
sharedmem[bi] = (bindex < n) ? inarray[bindex] : op.identity();
// offset
int offset = 1;
if (!backward) {
// scan
for (int d = blockDim.x; d > 0; d >>= 1) {
//
__syncthreads();
// d
if (threadIdx.x < d) {
//
int ci = offset * (2 * threadIdx.x + 1) - 1;
int di = offset * (2 * threadIdx.x + 2) - 1;
// bank conflicts
ci += CONFLICT_FREE_OFFSET(ci);
di += CONFLICT_FREE_OFFSET(di);
// scan
//
sharedmem[di] = op(sharedmem[di], sharedmem[ci]);
}
// 2
offset *= 2;
}
//
if (threadIdx.x == 0) {
int index = length - 1;
// bank conflicts
index += CONFLICT_FREE_OFFSET(index);
//
sharedmem[index] = op.identity();
}
// scan
//
for (int d = 1; d < length; d *= 2) {
// 2
offset /= 2;
//
__syncthreads();
// d
if (threadIdx.x < d) {
//
int ci = offset * (2 * threadIdx.x + 1) - 1;
int di = offset * (2 * threadIdx.x + 2) - 1;
// bank conflicts
ci += CONFLICT_FREE_OFFSET(ci);
di += CONFLICT_FREE_OFFSET(di);
// ai bi ai bi
T t = sharedmem[ci];
sharedmem[ci] = sharedmem[di];
sharedmem[di] = op(sharedmem[di], t);
}
}
//
__syncthreads();
// n
if (aindex >= n)
return;
//
outarray[aindex] = op(sharedmem[ai], inarray[aindex]);
// n
if (bindex >= n)
return;
//
outarray[bindex] = op(sharedmem[bi], inarray[bindex]);
//
if (blocksum == NULL)
return;
//
//
if (threadIdx.x == blockDim.x - 1 &&
blockIdx.x < gridDim.x - 1) {
blocksum[blockIdx.x] = outarray[bindex];
}
} else {
// scan
for (int d = blockDim.x; d > 0; d >>= 1) {
//
__syncthreads();
// d
if (threadIdx.x < d) {
//
int ci = offset * 2 * threadIdx.x;
int di = offset * (2 * threadIdx.x + 1);
// bank conflicts
ci += CONFLICT_FREE_OFFSET(ci);
di += CONFLICT_FREE_OFFSET(di);
// scan
//
sharedmem[ci] = op(sharedmem[di], sharedmem[ci]);
}
// 2
offset *= 2;
}
//
if (threadIdx.x == 0) {
int index = 0;
// bank conflicts
index += CONFLICT_FREE_OFFSET(index);
//
sharedmem[index] = op.identity();
}
// scan
//
for (int d = 1; d < length; d *= 2) {
// 2
offset /= 2;
//
__syncthreads();
// d
if (threadIdx.x < d) {
//
int ci = offset * 2 * threadIdx.x;
int di = offset * (2 * threadIdx.x + 1);
// bank conflicts
ci += CONFLICT_FREE_OFFSET(ci);
di += CONFLICT_FREE_OFFSET(di);
// di ci di ci
T t = sharedmem[di];
sharedmem[di] = sharedmem[ci];
sharedmem[ci] = op(sharedmem[ci], t);
}
}
//
__syncthreads();
// n
if (aindex >= n)
return;
//
outarray[aindex] = op(sharedmem[ai], inarray[aindex]);
// n
if (bindex < n) {
//
outarray[bindex] = op(sharedmem[bi], inarray[bindex]);
}
//
if (blocksum == NULL)
return;
//
//
if (threadIdx.x == 0 &&
blockIdx.x > 0) {
blocksum[blockIdx.x - 1] = outarray[baseidx];
}
}
}
// scanComputeGoldCPU inclusive
template < class T, class Operation >
__host__ int scanComputeGold(T *inarray, T *reference,
const unsigned int len, Operation op,
bool backward)
{
//
int i;
if (!backward) {
// inarray[0]
reference[0] = inarray[0];
for (i = 1; i < len; ++i) {
//
reference[i] = op(inarray[i], reference[i-1]);
}
} else {
// inarray[len - 1]
reference[len - 1] = inarray[len - 1];
for (i = len - 2; i >= 0; i--) {
//
reference[i] = op(inarray[i], reference[i+1]);
}
}
//
return NO_ERROR;
}
// Kernel : _addBackKer
template < class T, class Operation >
__global__ void _addBackKer(T *array, T *lastelemarray, int n,
int packnum, Operation op, bool backward)
{
//
//
__shared__ T lastelement[1];
if (!backward) {
//
//
if (threadIdx.x == 0)
lastelement[0] = lastelemarray[blockIdx.x];
//
unsigned int idx = (blockIdx.x + 1) * (blockDim.x * packnum) +
threadIdx.x;
//
__syncthreads();
//
for (int i = 0; i < packnum; i++) {
//
if (idx >= n)
break;
//
array[idx] = op(array[idx],lastelement[0]);
//
idx += blockDim.x;
}
} else {
//
//
if (threadIdx.x == 0) {
lastelement[0] = lastelemarray[blockIdx.x];
}
//
unsigned int idx = blockIdx.x * (blockDim.x * packnum) + threadIdx.x;
//
__syncthreads();
//
for (int i = 0; i < packnum; i++) {
//
if (idx >= n)
break;
//
array[idx] = op(array[idx], lastelement[0]);
//
idx += blockDim.x;
}
}
}
// Host addBackfloat
template< class Operation >
__host__ int ScanArray::addBack(float *array, float *lastelemarray,
int numelements, int blocksize, int packnum,
Operation op, bool backward)
{
// NULL NULL
if (array == NULL || lastelemarray == NULL)
return NULL_POINTER;
// 0
if (numelements < 0)
return INVALID_DATA;
//
int gridsize = (numelements + blocksize * packnum - 1) /
(blocksize * packnum) - 1;
// gridsize 1
if (gridsize < 1)
return NO_ERROR;
// _addBackKer
hipLaunchKernelGGL(( _addBackKer), dim3(gridsize), dim3(blocksize), 0, 0, array, lastelemarray,
numelements, packnum, op, backward);
//
if (hipGetLastError() != hipSuccess)
return CUDA_ERROR;
//
return NO_ERROR;
}
// Host addBackint
template< class Operation >
__host__ int ScanArray::addBack(int *array, int *lastelemarray,
int numelements, int blocksize, int packnum,
Operation op, bool backward)
{
// NULL NULL
if (array == NULL || lastelemarray == NULL)
return NULL_POINTER;
// 0
if (numelements < 0)
return INVALID_DATA;
//
int gridsize = (numelements + blocksize * packnum - 1) /
(blocksize * packnum) - 1;
// gridsize 1
if (gridsize < 1)
return NO_ERROR;
// _addBackKer
hipLaunchKernelGGL(( _addBackKer), dim3(gridsize), dim3(blocksize), 0, 0, array, lastelemarray,
numelements, packnum, op, backward);
//
if (hipGetLastError() != hipSuccess)
return CUDA_ERROR;
//
return NO_ERROR;
}
// SCANFREE
//
#define SCANFREE do { \
if (gridsize > 1) \
hipFree(blocksumDev); \
if (hostinarray) \
hipFree(inarrayDev); \
if (hostoutarray) \
hipFree(outarrayDev); \
if (!hostinarray) \
delete inarrayHost; \
if (!hostoutarray) \
delete outarrayHost; \
} while (0)
// Host scanArrayfloat
template< class Operation >
__host__ int ScanArray::scanArray(float *inarray, float *outarray,
int numelements, Operation op, bool backward,
bool hostinarray, bool hostoutarray)
{
// NULL NULL
if (inarray == NULL || outarray == NULL)
return NULL_POINTER;
// 4
if (numelements < 0)
return INVALID_DATA;
//
hipError_t cuerrcode;
int errcode;
//
const unsigned int memsize = sizeof (float) * numelements;
unsigned int extraspace;
//
unsigned int sharedmemsize = 0;
// Host
//
float *inarrayDev = NULL;
float *outarrayDev = NULL;
// Device
//
float *inarrayHost = NULL;
float *outarrayHost = NULL;
// scan gridsize 1
// Kernel
//
int gridsize;
int blocksize;
//
float *blocksumDev = NULL;
//
int blocksumsize;
// scan
int packnum;
// CPU
if (scanType == CPU_IN_SCAN) {
// inarray Host Host
// Host
if (!hostinarray) {
// Host
inarrayHost = new float[memsize];
//
cuerrcode = hipMemcpy(inarrayHost, inarray, memsize,
hipMemcpyDeviceToHost);
if (cuerrcode != hipSuccess)
return cuerrcode;
} else {
//
inarrayHost = inarray;
}
// outarray Host Host
// Host
if (!hostoutarray) {
// Host
outarrayHost = new float[memsize];
//
cuerrcode = hipMemcpy(outarrayHost, outarray, memsize,
hipMemcpyDeviceToHost);
if (cuerrcode != hipSuccess)
return cuerrcode;
} else {
//
outarrayHost = outarray;
}
// inclusive scan
errcode = scanComputeGold<float>(inarrayHost, outarrayHost,
numelements, op, backward);
//
if (errcode != NO_ERROR) {
//
SCANFREE;
return errcode;
}
//
return NO_ERROR;
}
// inarray Host Device
// Device
if (hostinarray) {
//
cuerrcode = hipMalloc((void **)&inarrayDev, memsize);
if (cuerrcode != hipSuccess)
return cuerrcode;
//
cuerrcode = hipMemcpy(inarrayDev, inarray, memsize,
hipMemcpyHostToDevice);
if (cuerrcode != hipSuccess)
return cuerrcode;
} else {
//
inarrayDev = inarray;
}
// outarray Host Device
// Device
if (hostoutarray) {
//
cuerrcode = hipMalloc((void **)&outarrayDev, memsize);
if (cuerrcode != hipSuccess)
return cuerrcode;
} else {
//
outarrayDev = outarray;
}
//
switch(scanType) {
// scan
case NAIVE_SCAN:
// Kernel
//
blocksize = BLOCK_SIZE;
packnum = 1;
//
gridsize = max(1, (numelements + blocksize * packnum - 1) / blocksize);
sharedmemsize = sizeof (float) * blocksize * packnum;
// grid 1
//
if (gridsize > 1) {
//
blocksumsize = gridsize - 1;
//
cuerrcode = hipMalloc((void **)&blocksumDev,
blocksumsize * sizeof(float));
if (cuerrcode != hipSuccess) {
hipFree(blocksumDev);
return cuerrcode;
}
}
// Kernel
//
hipLaunchKernelGGL(( _scanNaiveKer<float>), dim3(gridsize), dim3(blocksize), 2 * sharedmemsize, 0,
outarrayDev, inarrayDev, blocksumDev, numelements, op,
backward);
//
if (hipGetLastError() != hipSuccess) {
//
SCANFREE;
return CUDA_ERROR;
}
break;
// scan
case EFFICIENT_SCAN:
// Kernel
//
blocksize = BLOCK_SIZE;
packnum = 2;
//
sharedmemsize = sizeof (float) * (blocksize * packnum);
gridsize = max(1, (numelements + packnum * blocksize - 1) /
(packnum * blocksize));
// grid 1
//
if (gridsize > 1) {
//
blocksumsize = gridsize - 1;
//
cuerrcode = hipMalloc((void **)&blocksumDev,
blocksumsize * sizeof(float));
if (cuerrcode != hipSuccess) {
hipFree(blocksumDev);
return cuerrcode;
}
}
// Kernel
//
hipLaunchKernelGGL(( _scanWorkEfficientKer<float>), dim3(gridsize), dim3(blocksize), sharedmemsize, 0,
outarrayDev, inarrayDev, blocksumDev, numelements, op,
backward);
//
if (hipGetLastError() != hipSuccess) {
//
SCANFREE;
return CUDA_ERROR;
}
break;
// scan
case OPTIMIZE_SCAN:
// Kernel
//
blocksize = BLOCK_SIZE;
packnum = 2;
//
gridsize = max(1, (numelements + packnum * blocksize - 1) /
(packnum * blocksize));
sharedmemsize = sizeof (float) * (blocksize * packnum);
// grid 1
//
if (gridsize > 1) {
//
blocksumsize = gridsize - 1;
//
cuerrcode = hipMalloc((void **)&blocksumDev,
blocksumsize * sizeof(float));
if (cuerrcode != hipSuccess) {
hipFree(blocksumDev);
return cuerrcode;
}
}
// Kernel
//
hipLaunchKernelGGL(( _scanOptKer<float>), dim3(gridsize), dim3(blocksize), sharedmemsize, 0,
outarrayDev, inarrayDev, blocksumDev, numelements, op,
backward);
//
if (hipGetLastError() != hipSuccess) {
//
SCANFREE;
return CUDA_ERROR;
}
break;
// scan
case BETTER_SCAN:
// Kernel
//
blocksize = BLOCK_SIZE;
packnum = 2;
//
gridsize = max(1, (numelements + packnum * blocksize - 1) /
(packnum * blocksize));
extraspace = blocksize * packnum / NUM_BANKS;;
sharedmemsize = sizeof (float) * (blocksize * packnum + extraspace);
// grid 1
//
if (gridsize > 1) {
//
blocksumsize = gridsize - 1;
//
cuerrcode = hipMalloc((void **)&blocksumDev,
blocksumsize * sizeof(float));
if (cuerrcode != hipSuccess) {
hipFree(blocksumDev);
return cuerrcode;
}
}
// Kernel
//
hipLaunchKernelGGL(( _scanBetterKer<float>), dim3(gridsize), dim3(blocksize), sharedmemsize, 0,
outarrayDev, inarrayDev, blocksumDev, numelements, op,
backward);
//
if (hipGetLastError() != hipSuccess) {
//
SCANFREE;
return CUDA_ERROR;
}
break;
//
default:
if (hostinarray)
hipFree(inarrayDev);
if (hostoutarray)
hipFree(outarrayDev);
return INVALID_DATA;
}
// gird 1 scan
//
if (gridsize > 1) {
//
//
errcode = scanArray(blocksumDev, blocksumDev, blocksumsize, op,
backward, false, false);
if (errcode != NO_ERROR) {
//
SCANFREE;
return errcode;
}
//
errcode = addBack(outarrayDev, blocksumDev, numelements,
blocksize, packnum, op, backward);
if (errcode != NO_ERROR) {
//
SCANFREE;
return errcode;
}
//
hipFree(blocksumDev);
}
// outarray Host
if (hostoutarray) {
//
cuerrcode = hipMemcpy(outarray, outarrayDev, memsize,
hipMemcpyDeviceToHost);
if (cuerrcode != hipSuccess) {
if (hostinarray)
hipFree(inarrayDev);
hipFree(outarrayDev);
return cuerrcode;
}
}
// Device host
if (hostinarray)
hipFree(inarrayDev);
if (hostoutarray)
hipFree(outarrayDev);
//
return NO_ERROR;
}
// Host scanArrayint
template< class Operation >
__host__ int ScanArray::scanArray(int *inarray, int *outarray,
int numelements, Operation op, bool backward,
bool hostinarray, bool hostoutarray)
{
// NULL NULL
if (inarray == NULL || outarray == NULL)
return NULL_POINTER;
// 4
if (numelements < 0)
return INVALID_DATA;
//
hipError_t cuerrcode;
int errcode;
//
int memsize = sizeof (int) * numelements;
int extraspace;
//
int sharedmemsize = 0;
// Host
//
int *inarrayDev = NULL;
int *outarrayDev = NULL;
// Device
//
int *inarrayHost = NULL;
int *outarrayHost = NULL;
// scan gridsize 1
// Kernel
//
int gridsize;
int blocksize;
//
int *blocksumDev = NULL;
//
int blocksumsize;
// scan
int packnum;
// CPU
if (scanType == CPU_IN_SCAN) {
// inarray Host Host
// Host
if (!hostinarray) {
// Host
inarrayHost = new int[memsize];
//
cuerrcode = hipMemcpy(inarrayHost, inarray, memsize,
hipMemcpyDeviceToHost);
if (cuerrcode != hipSuccess)
return cuerrcode;
} else {
//
inarrayHost = inarray;
}
// outarray Host Host
// Host
if (!hostoutarray) {
// Host
outarrayHost = new int[memsize];
//
cuerrcode = hipMemcpy(outarrayHost, outarray, memsize,
hipMemcpyDeviceToHost);
if (cuerrcode != hipSuccess)
return cuerrcode;
} else {
//
outarrayHost = outarray;
}
// inclusive scan
errcode = scanComputeGold<int>(inarrayHost, outarrayHost,
numelements, op, backward);
//
if (errcode != NO_ERROR) {
//
SCANFREE;
return errcode;
}
//
return NO_ERROR;
}
// inarray Host Device
// Device
if (hostinarray) {
//
cuerrcode = hipMalloc((void **)&inarrayDev, memsize);
if (cuerrcode != hipSuccess)
return cuerrcode;
//
cuerrcode = hipMemcpy(inarrayDev, inarray, memsize,
hipMemcpyHostToDevice);
if (cuerrcode != hipSuccess)
return cuerrcode;
} else {
//
inarrayDev = inarray;
}
// outarray Host Device
// Device
if (hostoutarray) {
//
cuerrcode = hipMalloc((void **)&outarrayDev, memsize);
if (cuerrcode != hipSuccess)
return cuerrcode;
} else {
//
outarrayDev = outarray;
}
//
switch(scanType) {
// scan
case NAIVE_SCAN:
// Kernel
//
blocksize = BLOCK_SIZE;
packnum = 1;
//
gridsize = max(1, (numelements + blocksize * packnum - 1) / blocksize);
sharedmemsize = sizeof (int) * blocksize * packnum;
// grid 1
//
if (gridsize > 1) {
//
blocksumsize = gridsize - 1;
//
cuerrcode = hipMalloc((void **)&blocksumDev,
blocksumsize * sizeof(int));
if (cuerrcode != hipSuccess) {
hipFree(blocksumDev);
return cuerrcode;
}
}
// Kernel
//
hipLaunchKernelGGL(( _scanNaiveKer<int>), dim3(gridsize), dim3(blocksize), 2 * sharedmemsize, 0,
outarrayDev, inarrayDev, blocksumDev, numelements, op,
backward);
//
if (hipGetLastError() != hipSuccess) {
//
SCANFREE;
return CUDA_ERROR;
}
break;
// scan
case EFFICIENT_SCAN:
// Kernel
//
blocksize = BLOCK_SIZE;
packnum = 2;
//
sharedmemsize = sizeof (int) * (blocksize * packnum);
gridsize = max(1, (numelements + packnum * blocksize - 1) /
(packnum * blocksize));
// grid 1
//
if (gridsize > 1) {
//
blocksumsize = gridsize - 1;
//
cuerrcode = hipMalloc((void **)&blocksumDev,
blocksumsize * sizeof(int));
if (cuerrcode != hipSuccess) {
hipFree(blocksumDev);
return cuerrcode;
}
}
// Kernel
//
hipLaunchKernelGGL(( _scanWorkEfficientKer<int>), dim3(gridsize), dim3(blocksize), sharedmemsize, 0,
outarrayDev, inarrayDev, blocksumDev, numelements, op,
backward);
//
if (hipGetLastError() != hipSuccess) {
//
SCANFREE;
return CUDA_ERROR;
}
break;
// scan
case OPTIMIZE_SCAN:
// Kernel
//
blocksize = BLOCK_SIZE;
packnum = 2;
//
gridsize = max(1, (numelements + packnum * blocksize - 1) /
(packnum * blocksize));
sharedmemsize = sizeof (int) * (blocksize * packnum);
// grid 1
//
if (gridsize > 1) {
//
blocksumsize = gridsize - 1;
//
cuerrcode = hipMalloc((void **)&blocksumDev,
blocksumsize * sizeof(int));
if (cuerrcode != hipSuccess) {
hipFree(blocksumDev);
return cuerrcode;
}
}
// Kernel
//
hipLaunchKernelGGL(( _scanOptKer<int>), dim3(gridsize), dim3(blocksize), sharedmemsize, 0,
outarrayDev, inarrayDev, blocksumDev, numelements, op,
backward);
//
if (hipGetLastError() != hipSuccess) {
//
SCANFREE;
return CUDA_ERROR;
}
break;
// scan
case BETTER_SCAN:
// Kernel
//
blocksize = BLOCK_SIZE;
packnum = 2;
//
gridsize = max(1, (numelements + packnum * blocksize - 1) /
(packnum * blocksize));
extraspace = blocksize * packnum / NUM_BANKS;;
sharedmemsize = sizeof (int) * (blocksize * packnum + extraspace);
// grid 1
//
if (gridsize > 1) {
//
blocksumsize = gridsize - 1;
//
cuerrcode = hipMalloc((void **)&blocksumDev,
blocksumsize * sizeof(int));
if (cuerrcode != hipSuccess) {
hipFree(blocksumDev);
return cuerrcode;
}
}
// Kernel
//
hipLaunchKernelGGL(( _scanBetterKer<int>), dim3(gridsize), dim3(blocksize), sharedmemsize, 0,
outarrayDev, inarrayDev, blocksumDev, numelements, op,
backward);
//
if (hipGetLastError() != hipSuccess) {
//
SCANFREE;
return CUDA_ERROR;
}
break;
//
default:
if (hostinarray)
hipFree(inarrayDev);
if (hostoutarray)
hipFree(outarrayDev);
return INVALID_DATA;
}
// gird 1 scan
//
if (gridsize > 1) {
//
//
errcode = scanArray(blocksumDev, blocksumDev, blocksumsize, op,
backward, false, false);
if (errcode != NO_ERROR) {
//
SCANFREE;
return errcode;
}
//
errcode = addBack(outarrayDev, blocksumDev, numelements,
blocksize, packnum, op, backward);
if (errcode != NO_ERROR) {
//
SCANFREE;
return errcode;
}
//
hipFree(blocksumDev);
}
// outarray Host
if (hostoutarray) {
//
cuerrcode = hipMemcpy(outarray, outarrayDev, memsize,
hipMemcpyDeviceToHost);
if (cuerrcode != hipSuccess) {
if (hostinarray)
hipFree(inarrayDev);
hipFree(outarrayDev);
return cuerrcode;
}
}
// Device host
if (hostinarray)
hipFree(inarrayDev);
if (hostoutarray)
hipFree(outarrayDev);
//
return NO_ERROR;
}
//
void example()
{
// ScanArray
ScanArray s;
//
unsigned int num_elements = 1024;
//
const unsigned int mem_size = sizeof(float) * num_elements;
const unsigned int mem_size1 = sizeof(int) * num_elements;
// float
float *inarray = new float[mem_size];
float *outarray = new float[mem_size];
// int
int *inarray1 = new int[mem_size1];
int *outarray1 = new int[mem_size1];
// NAIVE_SCAN
s.setScanType(NAIVE_SCAN);
// host
bool inhost, outhost;
inhost = true;
outhost = true;
//
add_class<float> a;
multi_class<float> m;
max_class<float> max;
min_class<float> min;
add_class<int> a1;
multi_class<int> m1;
max_class<int> max1;
min_class<int> min1;
// float int
s.scanArray(inarray, outarray, num_elements, a, false, inhost, outhost);
s.scanArray(inarray1, outarray1, num_elements, a1, false, inhost, outhost);
s.scanArray(inarray, outarray, num_elements, m, false, inhost, outhost);
s.scanArray(inarray1, outarray1, num_elements, m1, false, inhost, outhost);
s.scanArray(inarray, outarray, num_elements, max, false, inhost, outhost);
s.scanArray(inarray1, outarray1, num_elements, max1, false, inhost, outhost);
s.scanArray(inarray, outarray, num_elements, min, false, inhost, outhost);
s.scanArray(inarray1, outarray1, num_elements, min1, false, inhost, outhost);
}
//
#undef SCANFREE
| b7ca86e7666589c17b16783cdeaed19b833262f2.cu | // ScanArray.cu
// 找出图像中给定点集的包围矩形
#include "ScanArray.h"
#include "OperationFunctor.h"
#include <iostream>
#include <stdio.h>
using namespace std;
#define BLOCK_SIZE 1024
// 宏:NUM_BANKS
// 定义了bank的数量。
#define NUM_BANKS 16
// 宏:LOG_NUM_BANKS
// 定义了bank的对数值。
#define LOG_NUM_BANKS 4
// Kernel 函数: _scanNaiveKer(数组扫描的简单版本)
// 简单版本的 scan 实现,每个线程处理一个元素。运行 log(n) 步,
// 加 n * (log(n) - 1) 次。需要 2 * n 长度的贡献内存。
template < class T, class Operation >
__global__ void // Kernel 函数无返回值
_scanNaiveKer(
T *outarray, // 输入数组
T *inarray, // 输出数组
T *blocksum, // 扫描的中间结果数组,用于存放每块扫描结果的最后一
// 个值,不处理最后一个线程块。少于一块时不处理。
int n, // 元素个数,数组长度
Operation op, // 运算符类型
bool backward // 判断是否为后序扫描
);
// Kernel 函数: _scanWorkEfficientKer(数组扫描的效率版本)
// 效率版本的 scan 实现,每个线程处理两个元素。复杂度是 O(log(n)),需要加法的次
// 数为 O(n)。共享内存长度为 n,使用 balanced tree 算法。
// 来源一为:Blelloch,1990 "Prefix Sums and Their Applications"。
// http://www.cs.cmu.edu/~blelloch/papers/Ble93.pdf
// 来源二为:Prins and Chatterjee PRAM course notes:
// https://www.cs.unc.edu/~prins/Classes/633/Handouts/pram.pdf
template < class T, class Operation >
__global__ void // Kernel 函数无返回值
_scanWorkEfficientKer(
T *outarray, // 输入数组
T *inarray, // 输出数组
T *blocksum, // 扫描的中间结果数组,用于存放每块扫描结果的最后一
// 个值,不处理最后一个线程块。少于一块时不处理。
int n, // 元素个数,数组长度
Operation op, // 运算符类型
bool backward // 判断是否为后序扫描
);
// Kernel 函数: _scanOptKer(数组扫描的优化版本)
// 优化版本的 scan 实现,每个线程处理两个原色。复杂度是 O(log(n)),需要加法的次
// 数为 O(n)。共享内存不检查冲突。
// 使用 balanced tree 算法。
// 来源一为:Blelloch,1990 "Prefix Sums and Their Applications"。
// http://www.cs.cmu.edu/~blelloch/papers/Ble93.pdf
// 来源二为:Prins and Chatterjee PRAM course notes:
// https://www.cs.unc.edu/~prins/Classes/633/Handouts/pram.pdf
template < class T, class Operation >
__global__ void // Kernel 函数无返回值
_scanOptKer(
T *outarray, // 输入数组
T *inarray, // 输出数组
T *blocksum, // 扫描的中间结果数组,用于存放每块扫描结果的最后一
// 个值,不处理最后一个线程块。少于一块时不处理。
int n, // 元素个数,数组长度
Operation op, // 运算类型
bool backward // 判断是否为后序扫描
);
// Kernel 函数: _scanBetterKer(数组扫描的较优版本)
// 优化版本的 scan 实现,每个线程处理两个原色。复杂度是 O(log(n)),需要加法的次
// 数为 O(n)。共享内存的长度为了避免冲突,设计为 n + n / NUM_BANKS。
// 使用 balanced tree 算法。
// 来源一为:Blelloch,1990 "Prefix Sums and Their Applications"。
// http://www.cs.cmu.edu/~blelloch/papers/Ble93.pdf
// 来源二为:Prins and Chatterjee PRAM course notes:
// https://www.cs.unc.edu/~prins/Classes/633/Handouts/pram.pdf
template < class T, class Operation >
__global__ void // Kernel 函数无返回值
_scanBetterKer(
T *outarray, // 输入数组
T *inarray, // 输出数组
T *blocksum, // 扫描的中间结果数组,用于存放每块扫描结果的最后一
// 个值,不处理最后一个线程块。少于一块时不处理。
int n, // 元素个数,数组长度
Operation op, // 运算类型
bool backward // 判断是否为后序扫描
);
// 函数:scanComputeGold(CPU 端的 inclusive 类型数组计算)
// 对一个数组进行扫描,对所有元素进行某操作的遍历,操作包含自身。
template < class T, class Operation >
__host__ int // 返回值:函数是否正确执行,若函数正确
// 执行,返回 NO_ERROR。
scanComputeGold(
T *inarray, // 输入数组
T *reference, // 输出数组
const unsigned int len, // 数组的长度,处理元素的个数。
Operation op, // 运算类型
bool backward // 判断是否为后序扫描
);
// Kernel 函数: _addBackKer(中间结果数组加回操作)
// 对一个数组进行初始扫描后,将中间结果的小数组(原扫描数组每段最后一个元素)加
// 回到原扫描数组。
template < class T, class Operation >
__global__ void // Kernel 函数无返回值
_addBackKer(
T *array, // 初始扫描后的数组。
T *lastelemarray, // 中间结果数组,原扫描数组每段的最后一个元素提
// 取出来即为中间结果数组。
int n, // 元素个数,数组长度
int packnum, // 扫描核函数每块计算能力,即核函数每块的处理长
// 度与线程块大小的比值。
Operation op, // 运算类型
bool backward // 判断是否为后序扫描
);
// Kernel 函数: _scanNaiveKer(数组扫描的简单版本)
template < class T, class Operation >
__global__ void _scanNaiveKer(T *outarray, T *inarray, T *blocksum, int n,
Operation op, bool backward)
{
// 声明共享内存。
extern __shared__ unsigned char sharedmemo[];
// 转化为模板类型的共享内存。
T *sharedmem = (T *)sharedmemo;
// 数组索引(块内索引为 threadIdx.x)。
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// 定义共享内存中计算位置的两个变量。
// 共享内存大小是两倍的输入长度,所以通过 pout 和 pin 指针来控制计算位置。
T *pout = sharedmem;
T *pin = sharedmem + blockDim.x;
// 将需要计算的值从输入加载到共享内存上。大于数组长度的位置置为 0。
if (idx < n)
pout[threadIdx.x] = inarray[idx];
else
pout[threadIdx.x] = op.identity();
// 扫描过程,通过偏移量的控制,在两倍输入长度的共享内存上进行切换计算。
// 每次循环偏移量扩大 2 倍,这样能够实现不断的层次累加。最终实现 scan 的处
// 理效果。
if (!backward) {
for (int offset = 1; offset < blockDim.x; offset *= 2) {
// 共享内存大小是两倍的输入长度,所以通过 pout 和 pin 指针交换来换位
// 置进行处理,即计算值不覆盖原值。
T *ptemp;
ptemp = pout;
pout = pin;
pin = ptemp;
__syncthreads();
// 将所有当前的 scan 计算值,从共享内存的一侧复制到另一侧
// 一侧指共享内存采用两倍的输入数组的长度,即 double buffer。
pout[threadIdx.x] = pin[threadIdx.x];
// 如果线程索引大于偏移,那么要计算当前位置和当前位置减去偏移量处的
// 加和
if (threadIdx.x >= offset)
pout[threadIdx.x] = op(pout[threadIdx.x],
pin[threadIdx.x - offset]);
}
// 进行块内同步。
__syncthreads();
// 超出数组长度 n 的值不进行写入,直接返回。
if (idx >= n)
return;
// 将结果从共享内存写入到输出。
outarray[idx] = pout[threadIdx.x];
// 如果中间结果数组为空,不进行处理直接返回。
if (blocksum == NULL)
return;
// 每块的最后一个线程,将每段处理的最后一个元素写入中间结果数组。最后一
// 个线程块不进行处理。
if (threadIdx.x == blockDim.x - 1 &&
blockIdx.x < gridDim.x - 1) {
blocksum[blockIdx.x] = pout[threadIdx.x];
}
} else {
for (int offset = 1; offset < blockDim.x; offset *= 2) {
// 共享内存大小是两倍的输入长度,所以通过 pout 和 pin 指针交换来换位
// 置进行处理,即计算值不覆盖原值。
T *ptemp;
ptemp = pout;
pout = pin;
pin = ptemp;
__syncthreads();
// 将所有当前的 scan 计算值,从共享内存的一侧复制到另一侧
// 一侧指共享内存采用两倍的输入数组的长度,即 double buffer。
pout[threadIdx.x] = pin[threadIdx.x];
// 如果线程索引加上偏移量小于块长,那么要计算当前位置和当前位置加上
// 偏移量处的加和
if (threadIdx.x + offset < blockDim.x)
pout[threadIdx.x] = op(pin[threadIdx.x],
pin[threadIdx.x + offset]);
}
// 进行块内同步。
__syncthreads();
// 超出数组长度 n 的值不进行写入,直接返回。
if (idx >= n)
return;
// 将结果从共享内存写入到输出。
outarray[idx] = pout[threadIdx.x];
// 如果中间结果数组为空,不进行处理直接返回。
if (blocksum == NULL)
return;
// 每块的第一个线程,将每段处理的第一个元素写入中间结果数组。第一个线
// 程块不进行处理。
if (threadIdx.x == 0 && blockIdx.x > 0) {
blocksum[blockIdx.x - 1] = pout[threadIdx.x];
}
}
}
// Kernel 函数: _scanWorkEfficientKer(数组扫描的效率版本)
template < class T, class Operation >
__global__ void _scanWorkEfficientKer(T *outarray, T *inarray,
T *blocksum, int n, Operation op,
bool backward)
{
// 声明共享内存。
extern __shared__ unsigned char sharedmemo[];
// 转化为模板类型的共享内存。
T *sharedmem = (T *)sharedmemo;
// 定义块内索引。
int baseidx = 2 * blockIdx.x * blockDim.x;
int inidx = 2 * threadIdx.x;
int idx = baseidx + inidx;
// 定义偏移量 offset。
int offset = 1;
// 定义核函数每块的处理数组长度。
int length = blockDim.x * 2;
// 将需要计算的值从输入加载到共享内存上。每个线程处理两个元素(相邻元素)
sharedmem[inidx] = (idx < n) ? inarray[idx] : op.identity();
sharedmem[inidx + 1] = (idx + 1 < n) ? inarray[idx + 1] : op.identity();
// scan 的累加过程,自底向上进行累加操作。
if (!backward) {
for (int d = blockDim.x; d > 0; d >>= 1) {
// 进行块内同步。
__syncthreads();
// 当线程索引小于处理的范围 d,进行累加。
if (threadIdx.x < d) {
// 计算处理位置的索引。
int ai = offset * (inidx + 1) - 1;
int bi = offset * (inidx + 2) - 1;
// 累加。通过这样的过程使得最终最后一位的值是之前所有值 scan 操
// 作的结果。
sharedmem[bi] = op(sharedmem[bi], sharedmem[ai]);
}
// 偏移量每次扩大 2 倍。
offset *= 2;
}
// 配合自顶向下的回扫过程,清除最后一个位置上的值。
if (threadIdx.x == 0) {
// 根据运算类型不同,最后一个位置赋为不同的单位元。
sharedmem[length - 1] = op.identity();
}
// 自顶向下回扫,这样能够使每一位上算出需要的 scan 值。
// 回扫即把上一步的计算结果进行累加。
for (int d = 1; d < length; d *= 2) {
// 回扫过程中,每次偏移量缩小2倍。
offset >>= 1;
// 进行块内同步。
__syncthreads();
// 当线程索引小于处理的范围 d,进行回扫累加的过程。
if (threadIdx.x < d) {
// 计算处理位置的索引。
int ai = offset * (inidx + 1) - 1;
int bi = offset * (inidx + 2) - 1;
// 将 ai 位置处的值拷贝出来加到 bi 处,ai 的新值为 bi 原值。
T t = sharedmem[ai];
sharedmem[ai] = sharedmem[bi];
sharedmem[bi] = op(sharedmem[bi], t);
}
}
// 进行块内同步。
__syncthreads();
// 超出数组长度 n 的值不进行写入,直接返回。
if (idx >= n)
return;
// 将结果从共享内存写入到输出。
outarray[idx] = sharedmem[inidx + 1];
// 超出数组长度 n 的值不进行写入,直接返回。
if (idx + 1 >= n)
return;
// 判断是否为当前块处理数组的最后一个元素
if ((inidx + 1) == (2 * blockDim.x - 1))
// 是最后一个元素的话,需要与对应输入数组位置上的元素进行运算
outarray[idx + 1] = op(sharedmem[inidx + 1], inarray[idx + 1]);
else
// 每个线程处理的下一个元素,将结果从共享内存写入到输出。
outarray[idx + 1] = sharedmem[inidx + 2];
// 如果中间结果数组为空,不进行处理直接返回。
if (blocksum == NULL)
return;
// 每块的最后一个线程,将每段处理的最后一个元素写入中间结果数组。最后一
// 个线程块不进行处理。
if (threadIdx.x == blockDim.x - 1 &&
blockIdx.x < gridDim.x - 1) {
blocksum[blockIdx.x] = outarray[idx + 1];
}
} else {
for (int d = blockDim.x; d > 0; d >>= 1) {
// 进行块内同步。
__syncthreads();
// 当线程索引小于处理的范围 d,进行累加。
if (threadIdx.x < d) {
// 计算处理位置的索引。
int ai = offset * inidx;
int bi = offset * (inidx + 1);
// 累加。通过这样的过程使得最终第一位的值是之前所有值 scan 操
// 作的结果。
sharedmem[ai] = op(sharedmem[bi], sharedmem[ai]);
}
// 偏移量每次扩大 2 倍。
offset *= 2;
}
// 配合自顶向下的回扫过程,清除第一个位置上的值。
if (threadIdx.x == 0) {
// 根据运算类型不同,第一个位置赋为不同的单位元。
sharedmem[0] = op.identity();
}
// 自顶向下回扫,这样能够使每一位上算出需要的 scan 值。
// 回扫即把上一步的计算结果进行累加。
for (int d = 1; d < length; d *= 2) {
// 回扫过程中,每次偏移量缩小2倍。
offset >>= 1;
// 进行块内同步。
__syncthreads();
// 当线程索引小于处理的范围 d,进行回扫累加的过程。
if (threadIdx.x < d) {
// 计算处理位置的索引。
int ai = offset * inidx;
int bi = offset * (inidx + 1);
// 将 bi 位置处的值拷贝出来加到 ai 处,bi 的新值为 ai 原值。
T t = sharedmem[bi];
sharedmem[bi] = sharedmem[ai];
sharedmem[ai] = op(sharedmem[ai], t);
}
}
// 进行块内同步。
__syncthreads();
// 超出数组长度 n 的值不进行写入,直接返回。
if (idx >= n)
return;
// 判断是否为当前块处理数组的第一个元素
if (inidx == 0)
// 是第一个元素的话,需要与对应输入数组位置上的元素进行运算
outarray[idx] = op(sharedmem[inidx], inarray[idx]);
else
// 将结果从共享内存写入到输出。
outarray[idx] = sharedmem[inidx - 1];
// 超出数组长度 n 的值不进行写入,直接返回。
if (idx + 1 < n) {
// 每个线程处理的下一个元素,将结果从共享内存写入到输出。
outarray[idx + 1] = sharedmem[inidx];
}
// 如果中间结果数组为空,不进行处理直接返回。
if (blocksum == NULL)
return;
// 每块的第一个线程,将每段处理的第一个元素写入中间结果数组。第一个线程
// 块不进行处理。
if (threadIdx.x == 0 &&
blockIdx.x > 0) {
blocksum[blockIdx.x - 1] = outarray[idx];
}
}
}
// Kernel 函数: _scanOptKer(数组扫描的优化版本)
template < class T, class Operation >
__global__ void _scanOptKer(T *outarray, T *inarray,
T *blocksum, int n, Operation op, bool backward)
{
// 声明共享内存。
extern __shared__ unsigned char sharedmemo[];
// 转化为模板类型的共享内存。
T *sharedmem = (T *)sharedmemo;
// 定义块内索引。
int baseidx = 2 * blockIdx.x * blockDim.x;
// 定义核函数每块的处理数组长度。
int length = blockDim.x * 2;
// 定义计算位置的索引(块内)。
int ai = threadIdx.x;
int bi = threadIdx.x + blockDim.x;
// 定义数组索引(块外)。
int aindex = baseidx + ai;
int bindex = baseidx + bi;
// 将需要计算的值从输入加载到共享内存上。每个线程处理两个元素。
sharedmem[ai] = (aindex < n) ? inarray[aindex] : op.identity();
sharedmem[bi] = (bindex < n) ? inarray[bindex] : op.identity();
// 定义偏移值 offset。
int offset = 1;
if (!backward) {
// scan 的累加过程,自底向上进行累加操作。
for (int d = blockDim.x; d > 0; d >>= 1) {
// 进行块内同步。
__syncthreads();
// 当线程索引小于处理的范围 d,进行累加。
if (threadIdx.x < d) {
// 计算处理位置的索引。
int ai = offset * (2 * threadIdx.x + 1) - 1;
int bi = offset * (2 * threadIdx.x + 2) - 1;
// 累加。通过这样的过程使得最终最后一位的值是之前所有值 scan 操
// 作的结果。
sharedmem[bi] = op(sharedmem[bi], sharedmem[ai]);
}
// 偏移量每次扩大 2 倍。
offset *= 2;
}
// 配合自顶向下的回扫过程,清除最后一个位置上的值。
if (threadIdx.x == 0) {
int index = length - 1;
// 根据运算符类型,数组最后一个位置设为不同的单位元
sharedmem[index] = op.identity();
}
// 自顶向下回扫,这样能够使每一位上算出需要的 scan 值。
// 回扫即把上一步的计算结果进行累加。
for (int d = 1; d < length; d *= 2) {
// 回扫过程中,每次偏移量缩小2倍。
offset /= 2;
// 进行块内同步。
__syncthreads();
// 当线程索引小于处理的范围 d,进行回扫累加的过程。
if (threadIdx.x < d) {
// 计算处理位置的索引。
int ai = offset * (2 * threadIdx.x + 1) - 1;
int bi = offset * (2 * threadIdx.x + 2) - 1;
// 将 ai 位置处的值拷贝出来加到 bi 处,ai 的新值为 bi 原值。
T t = sharedmem[ai];
sharedmem[ai] = sharedmem[bi];
sharedmem[bi] = op(sharedmem[bi], t);
}
}
// 进行块内同步。
__syncthreads();
// 超出数组长度 n 的值不进行写入,直接返回。
if (aindex >= n)
return;
// 将结果从共享内存写入到输出。
outarray[aindex] = sharedmem[ai + 1];
// 超出数组长度 n 的值不进行写入,直接返回。
if (bindex >= n)
return;
// 判断是否为当前块处理数组的最后一个元素
if (bi == (2 * blockDim.x - 1))
// 是最后一个元素的话,需要与对应输入数组位置上的元素进行运算
outarray[bindex] = op(sharedmem[bi], inarray[bindex]);
else
// 每个线程处理的下一个元素,将结果从共享内存写入到输出。
outarray[bindex] = sharedmem[bi + 1];
// 如果中间结果数组为空,不进行处理直接返回。
if (blocksum == NULL)
return;
// 每块的最后一个线程,将每段处理的最后一个元素写入中间结果数组。最后一个线
// 程块不进行处理。
if (threadIdx.x == blockDim.x - 1 &&
blockIdx.x < gridDim.x - 1) {
blocksum[blockIdx.x] = outarray[bindex];
}
} else {
// scan 的累加过程,自底向上进行累加操作。
for (int d = blockDim.x; d > 0; d >>= 1) {
// 进行块内同步。
__syncthreads();
// 当线程索引小于处理的范围 d,进行累加。
if (threadIdx.x < d) {
// 计算处理位置的索引。
int ai = offset * 2 * threadIdx.x;
int bi = offset * (2 * threadIdx.x + 1);
// 累加。通过这样的过程使得最终第一位的值是之前所有值 scan 操
// 作的结果。
sharedmem[ai] = op(sharedmem[bi], sharedmem[ai]);
}
// 偏移量每次扩大 2 倍。
offset *= 2;
}
// 配合自顶向下的回扫过程,清除第一个位置上的值。
if (threadIdx.x == 0) {
int index = 0;
// 根据运算符类型,数组第一个位置设为不同的单位元
sharedmem[index] = op.identity();
}
// 自顶向下回扫,这样能够使每一位上算出需要的 scan 值。
// 回扫即把上一步的计算结果进行累加。
for (int d = 1; d < length; d *= 2) {
// 回扫过程中,每次偏移量缩小2倍。
offset /= 2;
// 进行块内同步。
__syncthreads();
// 当线程索引小于处理的范围 d,进行回扫累加的过程。
if (threadIdx.x < d) {
// 计算处理位置的索引。
int ai = offset * 2 * threadIdx.x;
int bi = offset * (2 * threadIdx.x + 1);
// 将 bi 位置处的值拷贝出来加到 ai 处,bi 的新值为 ai 原值。
T t = sharedmem[bi];
sharedmem[bi] = sharedmem[ai];
sharedmem[ai] = op(sharedmem[ai], t);
}
}
// 进行块内同步。
__syncthreads();
// 超出数组长度 n 的值不进行写入,直接返回。
if (aindex >= n)
return;
// 判断是否为当前块处理数组的第一个元素
if (ai == 0)
// 是第一个元素的话,需要与对应输入数组位置上的元素进行运算
outarray[aindex] = op(sharedmem[ai], inarray[aindex]);
else
// 将结果从共享内存写入到输出。
outarray[aindex] = sharedmem[ai - 1];
// 超出数组长度 n 的值不进行写入,直接返回。
if (bindex < n) {
// 每个线程处理的下一个元素,将结果从共享内存写入到输出。
outarray[bindex] = sharedmem[bi - 1];
}
// 如果中间结果数组为空,不进行处理直接返回。
if (blocksum == NULL)
return;
// 每块的第一个线程,将每段处理的第一个元素写入中间结果数组。第一个线
// 程块不进行处理。
if (threadIdx.x == 0 &&
blockIdx.x > 0) {
blocksum[blockIdx.x - 1] = outarray[aindex];
}
}
}
// 宏:CONFLICT_FREE_OFFSET
// 定义此是为了更严格的避免 bank conflicts,即使在树的低层上。
#ifdef ZERO_BANK_CONFLICTS
# define CONFLICT_FREE_OFFSET(index) \
(((index) >> LOG_NUM_BANKS) + ((index) >> (2 * LOG_NUM_BANKS)))
#else
# define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
#endif
// Kernel 函数: _scanBetterKer(数组扫描的Better版本)
template < class T, class Operation >
__global__ void _scanBetterKer(T *outarray, T *inarray,
T *blocksum, int n, Operation op, bool backward)
{
// 声明共享内存。
extern __shared__ unsigned char sharedmemo[];
// 转化为模板类型的共享内存。
T *sharedmem = (T *)sharedmemo;
// 本地变量,基索引。
int baseidx = 2 * blockIdx.x * blockDim.x;
int idx = threadIdx.x + blockDim.x;
// 定义核函数每块的处理数组长度。
int length = blockDim.x * 2;
// 定义计算位置的索引(块内,加上 bankOffset)。
int ai = threadIdx.x + CONFLICT_FREE_OFFSET(threadIdx.x);
int bi = idx + CONFLICT_FREE_OFFSET(idx);
// 定义数组索引(块外)。
int aindex = baseidx + threadIdx.x;
int bindex = aindex + blockDim.x;
// 将需要计算的值从输入加载到共享内存上。每个线程处理两个元素。
sharedmem[ai] = (aindex < n) ? inarray[aindex] : op.identity();
sharedmem[bi] = (bindex < n) ? inarray[bindex] : op.identity();
// 定义偏移值 offset。
int offset = 1;
if (!backward) {
// scan 的累加过程,自底向上进行累加操作。
for (int d = blockDim.x; d > 0; d >>= 1) {
// 进行块内同步。
__syncthreads();
// 当线程索引小于处理的范围 d,进行累加。
if (threadIdx.x < d) {
// 计算处理位置的索引。
int ci = offset * (2 * threadIdx.x + 1) - 1;
int di = offset * (2 * threadIdx.x + 2) - 1;
// 避免 bank conflicts,修改计算位置索引。
ci += CONFLICT_FREE_OFFSET(ci);
di += CONFLICT_FREE_OFFSET(di);
// 累加。通过这样的过程使得最终最后一位的值是之前所有值 scan 操
// 作的结果。
sharedmem[di] = op(sharedmem[di], sharedmem[ci]);
}
// 偏移量每次扩大 2 倍。
offset *= 2;
}
// 配合自顶向下的回扫过程,清除最后一个位置上的值。
if (threadIdx.x == 0) {
int index = length - 1;
// 避免 bank conflicts,重新计算索引。
index += CONFLICT_FREE_OFFSET(index);
// 根据运算符类型,数组最后一个位置设为不同的单位元。
sharedmem[index] = op.identity();
}
// 自顶向下回扫,这样能够使每一位上算出需要的 scan 值。
// 回扫即把上一步的计算结果进行累加。
for (int d = 1; d < length; d *= 2) {
// 回扫过程中,每次偏移量缩小2倍。
offset /= 2;
// 进行块内同步。
__syncthreads();
// 当线程索引小于处理的范围 d,进行回扫累加的过程。
if (threadIdx.x < d) {
// 计算处理位置的索引。
int ci = offset * (2 * threadIdx.x + 1) - 1;
int di = offset * (2 * threadIdx.x + 2) - 1;
// 避免 bank conflicts,重新计算索引。
ci += CONFLICT_FREE_OFFSET(ci);
di += CONFLICT_FREE_OFFSET(di);
// 将 ai 位置处的值拷贝出来加到 bi 处,ai 的新值为 bi 原值。
T t = sharedmem[ci];
sharedmem[ci] = sharedmem[di];
sharedmem[di] = op(sharedmem[di], t);
}
}
// 进行块内同步。
__syncthreads();
// 超出数组长度 n 的值不进行写入,直接返回。
if (aindex >= n)
return;
// 将结果从共享内存写入到输出。
outarray[aindex] = op(sharedmem[ai], inarray[aindex]);
// 超出数组长度 n 的值不进行写入,直接返回。
if (bindex >= n)
return;
// 每个线程处理的下一个元素,将结果从共享内存写入到输出。
outarray[bindex] = op(sharedmem[bi], inarray[bindex]);
// 如果中间结果数组为空,不进行处理直接返回。
if (blocksum == NULL)
return;
// 每块的最后一个线程,将每段处理的最后一个元素写入中间结果数组。最后一个线
// 程块不进行处理。
if (threadIdx.x == blockDim.x - 1 &&
blockIdx.x < gridDim.x - 1) {
blocksum[blockIdx.x] = outarray[bindex];
}
} else {
// scan 的累加过程,自底向上进行累加操作。
for (int d = blockDim.x; d > 0; d >>= 1) {
// 进行块内同步。
__syncthreads();
// 当线程索引小于处理的范围 d,进行累加。
if (threadIdx.x < d) {
// 计算处理位置的索引。
int ci = offset * 2 * threadIdx.x;
int di = offset * (2 * threadIdx.x + 1);
// 避免 bank conflicts,修改计算位置索引。
ci += CONFLICT_FREE_OFFSET(ci);
di += CONFLICT_FREE_OFFSET(di);
// 累加。通过这样的过程使得最终第一位的值是之前所有值 scan 操
// 作的结果。
sharedmem[ci] = op(sharedmem[di], sharedmem[ci]);
}
// 偏移量每次扩大 2 倍。
offset *= 2;
}
// 配合自顶向下的回扫过程,清除第一个位置上的值。
if (threadIdx.x == 0) {
int index = 0;
// 避免 bank conflicts,重新计算索引。
index += CONFLICT_FREE_OFFSET(index);
// 根据运算符类型,数组第一个位置设为不同的单位元。
sharedmem[index] = op.identity();
}
// 自顶向下回扫,这样能够使每一位上算出需要的 scan 值。
// 回扫即把上一步的计算结果进行累加。
for (int d = 1; d < length; d *= 2) {
// 回扫过程中,每次偏移量缩小2倍。
offset /= 2;
// 进行块内同步。
__syncthreads();
// 当线程索引小于处理的范围 d,进行回扫累加的过程。
if (threadIdx.x < d) {
// 计算处理位置的索引。
int ci = offset * 2 * threadIdx.x;
int di = offset * (2 * threadIdx.x + 1);
// 避免 bank conflicts,重新计算索引。
ci += CONFLICT_FREE_OFFSET(ci);
di += CONFLICT_FREE_OFFSET(di);
// 将 di 位置处的值拷贝出来加到 ci 处,di 的新值为 ci 原值。
T t = sharedmem[di];
sharedmem[di] = sharedmem[ci];
sharedmem[ci] = op(sharedmem[ci], t);
}
}
// 进行块内同步。
__syncthreads();
// 超出数组长度 n 的值不进行写入,直接返回。
if (aindex >= n)
return;
// 处理的第一个元素,需要与对应输入数组位置上的元素进行运算
outarray[aindex] = op(sharedmem[ai], inarray[aindex]);
// 超出数组长度 n 的值不进行写入,直接返回。
if (bindex < n) {
// 每个线程处理的下一个元素,将结果从共享内存写入到输出。
outarray[bindex] = op(sharedmem[bi], inarray[bindex]);
}
// 如果中间结果数组为空,不进行处理直接返回。
if (blocksum == NULL)
return;
// 每块的第一个线程,将每段处理的第一个元素写入中间结果数组。第一个线
// 程块不进行处理。
if (threadIdx.x == 0 &&
blockIdx.x > 0) {
blocksum[blockIdx.x - 1] = outarray[baseidx];
}
}
}
// 函数:scanComputeGold(CPU 端的 inclusive 类型数组计算)
template < class T, class Operation >
__host__ int scanComputeGold(T *inarray, T *reference,
const unsigned int len, Operation op,
bool backward)
{
// 计数器变量
int i;
if (!backward) {
// 初始化第一个输出元素为 inarray[0]
reference[0] = inarray[0];
for (i = 1; i < len; ++i) {
// 前序迭代累加计算
reference[i] = op(inarray[i], reference[i-1]);
}
} else {
// 初始化最后一个输出元素为 inarray[len - 1]
reference[len - 1] = inarray[len - 1];
for (i = len - 2; i >= 0; i--) {
// 后序迭代累加计算
reference[i] = op(inarray[i], reference[i+1]);
}
}
// 处理完毕退出。
return NO_ERROR;
}
// Kernel 函数: _addBackKer(中间结果数组加回操作)
template < class T, class Operation >
__global__ void _addBackKer(T *array, T *lastelemarray, int n,
int packnum, Operation op, bool backward)
{
// 声明共享内存。用来存放中间结果小数组中的元素,也就是原数组的每段最后
// 一个或第一个元素。
__shared__ T lastelement[1];
if (!backward) {
// 用每块的第一个线程来读取每块前一块的最后一个元素,从中间结果数组中读
// 取。
if (threadIdx.x == 0)
lastelement[0] = lastelemarray[blockIdx.x];
// 计算需要进行块间累加位置索引(块外的数组索引)。
unsigned int idx = (blockIdx.x + 1) * (blockDim.x * packnum) +
threadIdx.x;
// 块内同步。
__syncthreads();
// 每个线程处理两个元素,将中间结果数组中的值加回到原数组。
for (int i = 0; i < packnum; i++) {
// 如果索引大于处理数组长度,则退出。
if (idx >= n)
break;
// 将中间结果加回。
array[idx] = op(array[idx],lastelement[0]);
// 计算每个线程处理的下一个元素的索引值。
idx += blockDim.x;
}
} else {
// 用每块的第一个线程来读取每块后一块的第一个元素,从中间结果数组中读
// 取。
if (threadIdx.x == 0) {
lastelement[0] = lastelemarray[blockIdx.x];
}
// 计算需要进行块间累加位置索引(块外的数组索引)。
unsigned int idx = blockIdx.x * (blockDim.x * packnum) + threadIdx.x;
// 块内同步。
__syncthreads();
// 每个线程处理两个元素,将中间结果数组中的值加回到原数组。
for (int i = 0; i < packnum; i++) {
// 如果索引大于处理数组长度,则退出。
if (idx >= n)
break;
// 将中间结果加回。
array[idx] = op(array[idx], lastelement[0]);
// 计算每个线程处理的下一个元素的索引值。
idx += blockDim.x;
}
}
}
// Host 成员方法:addBack(float 型的中间结果加回)
template< class Operation >
__host__ int ScanArray::addBack(float *array, float *lastelemarray,
int numelements, int blocksize, int packnum,
Operation op, bool backward)
{
// 检查输入和输出是否为 NULL,如果为 NULL 直接报错返回。
if (array == NULL || lastelemarray == NULL)
return NULL_POINTER;
// 检查处理的数组长度,如果小于 0 出错。
if (numelements < 0)
return INVALID_DATA;
// 计算线程块大小。
int gridsize = (numelements + blocksize * packnum - 1) /
(blocksize * packnum) - 1;
// 判断 gridsize 大小,如果小于 1,则不用进行加回操作。返回正确。
if (gridsize < 1)
return NO_ERROR;
// 调用 _addBackKer 核函数,将中间结果数组加回到原扫描数组。
_addBackKer<<<gridsize, blocksize>>>(array, lastelemarray,
numelements, packnum, op, backward);
// 判断是否出错。
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
// 处理完毕退出。
return NO_ERROR;
}
// Host 成员方法:addBack(int 型的中间结果加回)
template< class Operation >
__host__ int ScanArray::addBack(int *array, int *lastelemarray,
int numelements, int blocksize, int packnum,
Operation op, bool backward)
{
// 检查输入和输出是否为 NULL,如果为 NULL 直接报错返回。
if (array == NULL || lastelemarray == NULL)
return NULL_POINTER;
// 检查处理的数组长度,如果小于 0 出错。
if (numelements < 0)
return INVALID_DATA;
// 计算线程块大小。
int gridsize = (numelements + blocksize * packnum - 1) /
(blocksize * packnum) - 1;
// 判断 gridsize 大小,如果小于 1,则不用进行加回操作。返回正确。
if (gridsize < 1)
return NO_ERROR;
// 调用 _addBackKer 核函数,将中间结果数组加回到原扫描数组。
_addBackKer<<<gridsize, blocksize>>>(array, lastelemarray,
numelements, packnum, op, backward);
// 判断是否出错。
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
// 处理完毕退出。
return NO_ERROR;
}
// 宏:SCANFREE
// 如果出错,就释放之前申请的内存。
#define SCANFREE do { \
if (gridsize > 1) \
cudaFree(blocksumDev); \
if (hostinarray) \
cudaFree(inarrayDev); \
if (hostoutarray) \
cudaFree(outarrayDev); \
if (!hostinarray) \
delete inarrayHost; \
if (!hostoutarray) \
delete outarrayHost; \
} while (0)
// Host 成员方法:scanArray(float 型的数组扫描)
template< class Operation >
__host__ int ScanArray::scanArray(float *inarray, float *outarray,
int numelements, Operation op, bool backward,
bool hostinarray, bool hostoutarray)
{
// 检查输入和输出是否为 NULL,如果为 NULL 直接报错返回。
if (inarray == NULL || outarray == NULL)
return NULL_POINTER;
// 本程序实现的 4 种方法可处理的数组长度。必须加以判断控制。
if (numelements < 0)
return INVALID_DATA;
// 局部变量,错误码。
cudaError_t cuerrcode;
int errcode;
// 局部变量。
const unsigned int memsize = sizeof (float) * numelements;
unsigned int extraspace;
// 计算共享内存的长度。
unsigned int sharedmemsize = 0;
// 定义设备端的输入输出数组指针,当输入输出指针在 Host 端时,在设备端申请对
// 应大小的数组。
float *inarrayDev = NULL;
float *outarrayDev = NULL;
// 定义主机端的输入输出数组指针,当输入输出指针在 Device 端时,在主机端申请
// 对应大小的数组。
float *inarrayHost = NULL;
float *outarrayHost = NULL;
// 这里 scan 实现只支持单个线程块的计算,这里的 gridsize 可以设置的大于 1,
// 从而让多个线程块都运行相同程序来测速。计算调用 Kernel 函数的线程块的尺寸
// 和线程块的数量。
int gridsize;
int blocksize;
// 局部变量,中间结果存放数组。长度会根据线程块大小来确定。
float *blocksumDev = NULL;
// 中间结果数组的长度。
int blocksumsize;
// scan 算法中每个线程块的计算能力。核函数每块处理长度与线程块大小的比值。
int packnum;
// 针对 CPU 端的实现类型,选择路径进行处理。
if (scanType == CPU_IN_SCAN) {
// 判断当前 inarray 数组是否存储在 Host 端。若不是,则需要在 Host 端
// 为数组申请一段空间;若该数组是在 Host 端,则直接使用。
if (!hostinarray) {
// 为输入数组在 Host 端申请内存。
inarrayHost = new float[memsize];
// 将输入数组拷贝到主机端内存。
cuerrcode = cudaMemcpy(inarrayHost, inarray, memsize,
cudaMemcpyDeviceToHost);
if (cuerrcode != cudaSuccess)
return cuerrcode;
} else {
// 如果在主机端,则将指针传给主机端指针。
inarrayHost = inarray;
}
// 判断当前 outarray 数组是否存储在 Host 端。若不是,则需要在 Host 端
// 为数组申请一段空间;若该数组是在 Host 端,则直接使用。
if (!hostoutarray) {
// 为输出数组在 Host 端申请内存。
outarrayHost = new float[memsize];
// 将输出数组拷贝到主机端内存。
cuerrcode = cudaMemcpy(outarrayHost, outarray, memsize,
cudaMemcpyDeviceToHost);
if (cuerrcode != cudaSuccess)
return cuerrcode;
} else {
// 如果在主机端,则将指针传给主机端指针。
outarrayHost = outarray;
}
// 调用 inclusive 版的 scan 函数
errcode = scanComputeGold<float>(inarrayHost, outarrayHost,
numelements, op, backward);
// 出错则返回错误码。
if (errcode != NO_ERROR) {
// 释放内存
SCANFREE;
return errcode;
}
// 执行结束
return NO_ERROR;
}
// 判断当前 inarray 数组是否存储在 Host 端。若是,则需要在 Device 端为数组
// 申请一段空间;若该数组是在 Device端,则直接使用。
if (hostinarray) {
// 为输入数组在设备端申请内存。
cuerrcode = cudaMalloc((void **)&inarrayDev, memsize);
if (cuerrcode != cudaSuccess)
return cuerrcode;
// 将输入数组拷贝到设备端内存。
cuerrcode = cudaMemcpy(inarrayDev, inarray, memsize,
cudaMemcpyHostToDevice);
if (cuerrcode != cudaSuccess)
return cuerrcode;
} else {
// 如果在设备端,则将指针传给对应的设备端统一指针。
inarrayDev = inarray;
}
// 判断当前 outarray 数组是否存储在 Host 端。若是,则需要在 Device 端为数组
// 申请一段空间;若该数组是在 Device端,则直接使用。
if (hostoutarray) {
// 为输出数组在设备端申请内存。
cuerrcode = cudaMalloc((void **)&outarrayDev, memsize);
if (cuerrcode != cudaSuccess)
return cuerrcode;
} else {
// 如果在设备端,则将指针传给对应的设备端统一指针。
outarrayDev = outarray;
}
// 针对不同的实现类型,选择不同的路径进行处理。
switch(scanType) {
// 使用简单版本的 scan 实现。
case NAIVE_SCAN:
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
// 简单版本每个线程处理一个元素。
blocksize = BLOCK_SIZE;
packnum = 1;
// 计算线程块大小和共享内存长度。
gridsize = max(1, (numelements + blocksize * packnum - 1) / blocksize);
sharedmemsize = sizeof (float) * blocksize * packnum;
// 如果扫描所需要的线程的 grid 尺寸大于 1,就需要进行加回操作,就需要申
// 请存放中间结果的数组。
if (gridsize > 1) {
// 需要将每段处理的最后一个元素取出,最后一个线程块不进行处理。
blocksumsize = gridsize - 1;
// 为存放中间结果的数组在设备端申请内存。
cuerrcode = cudaMalloc((void **)&blocksumDev,
blocksumsize * sizeof(float));
if (cuerrcode != cudaSuccess) {
cudaFree(blocksumDev);
return cuerrcode;
}
}
// 调用 Kernel 函数,完成实际的数组扫描。
// 这里需要判断输入输出指针是否在设备端。
_scanNaiveKer<float><<<gridsize, blocksize, 2 * sharedmemsize>>>(
outarrayDev, inarrayDev, blocksumDev, numelements, op,
backward);
// 判断核函数是否出错。
if (cudaGetLastError() != cudaSuccess) {
// 释放之前申请的内存。
SCANFREE;
return CUDA_ERROR;
}
break;
// 使用效率版本的 scan 实现。
case EFFICIENT_SCAN:
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
// 效率版本每个线程处理两个元素。
blocksize = BLOCK_SIZE;
packnum = 2;
// 计算线程块大小和共享内存长度。
sharedmemsize = sizeof (float) * (blocksize * packnum);
gridsize = max(1, (numelements + packnum * blocksize - 1) /
(packnum * blocksize));
// 如果扫描所需要的线程的 grid 尺寸大于 1,就需要进行加回操作,就需要申
// 请存放中间结果的数组。
if (gridsize > 1) {
// 需要将每段处理的最后一个元素取出,最后一个线程块不进行处理。
blocksumsize = gridsize - 1;
// 为存放中间结果的数组在设备端申请内存。
cuerrcode = cudaMalloc((void **)&blocksumDev,
blocksumsize * sizeof(float));
if (cuerrcode != cudaSuccess) {
cudaFree(blocksumDev);
return cuerrcode;
}
}
// 调用 Kernel 函数,完成实际的数组扫描。
// 这里需要判断输入输出指针是否在设备端。
_scanWorkEfficientKer<float><<<gridsize, blocksize, sharedmemsize>>>(
outarrayDev, inarrayDev, blocksumDev, numelements, op,
backward);
// 判断是否出错。
if (cudaGetLastError() != cudaSuccess) {
// 释放之前申请的内存。
SCANFREE;
return CUDA_ERROR;
}
break;
// 使用优化版本的 scan 实现。
case OPTIMIZE_SCAN:
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
// 优化版本每个线程处理两个元素。
blocksize = BLOCK_SIZE;
packnum = 2;
// 计算线程块大小和共享内存长度。
gridsize = max(1, (numelements + packnum * blocksize - 1) /
(packnum * blocksize));
sharedmemsize = sizeof (float) * (blocksize * packnum);
// 如果扫描所需要的线程的 grid 尺寸大于 1,就需要进行加回操作,就需要申
// 请存放中间结果的数组。
if (gridsize > 1) {
// 需要将每段处理的最后一个元素取出,最后一个线程块不进行处理。
blocksumsize = gridsize - 1;
// 为存放中间结果的数组在设备端申请内存。
cuerrcode = cudaMalloc((void **)&blocksumDev,
blocksumsize * sizeof(float));
if (cuerrcode != cudaSuccess) {
cudaFree(blocksumDev);
return cuerrcode;
}
}
// 调用 Kernel 函数,完成实际的数组扫描。
// 这里需要判断输入输出指针是否在设备端。
_scanOptKer<float><<<gridsize, blocksize, sharedmemsize>>>(
outarrayDev, inarrayDev, blocksumDev, numelements, op,
backward);
// 判断是否出错。
if (cudaGetLastError() != cudaSuccess) {
// 释放之前申请的内存。
SCANFREE;
return CUDA_ERROR;
}
break;
// 使用优化版本的 scan 实现。
case BETTER_SCAN:
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
// 优化版本每个线程处理两个元素。
blocksize = BLOCK_SIZE;
packnum = 2;
// 计算线程块大小和共享内存长度。
gridsize = max(1, (numelements + packnum * blocksize - 1) /
(packnum * blocksize));
extraspace = blocksize * packnum / NUM_BANKS;;
sharedmemsize = sizeof (float) * (blocksize * packnum + extraspace);
// 如果扫描所需要的线程的 grid 尺寸大于 1,就需要进行加回操作,就需要申
// 请存放中间结果的数组。
if (gridsize > 1) {
// 需要将每段处理的最后一个元素取出,最后一个线程块不进行处理。
blocksumsize = gridsize - 1;
// 为存放中间结果的数组在设备端申请内存。
cuerrcode = cudaMalloc((void **)&blocksumDev,
blocksumsize * sizeof(float));
if (cuerrcode != cudaSuccess) {
cudaFree(blocksumDev);
return cuerrcode;
}
}
// 调用 Kernel 函数,完成实际的数组扫描。
// 这里需要判断输入输出指针是否在设备端。
_scanBetterKer<float><<<gridsize, blocksize, sharedmemsize>>>(
outarrayDev, inarrayDev, blocksumDev, numelements, op,
backward);
// 判断是否出错。
if (cudaGetLastError() != cudaSuccess) {
// 释放之前申请的内存。
SCANFREE;
return CUDA_ERROR;
}
break;
// 其他方式情况下,直接返回非法数据错误。
default:
if (hostinarray)
cudaFree(inarrayDev);
if (hostoutarray)
cudaFree(outarrayDev);
return INVALID_DATA;
}
// 如果处理的 gird 尺寸大于 1,那么对于扫描中间结果数组进行一次 scan
// 操作和加回操作。
if (gridsize > 1) {
// 递归调用扫描函数。此时输入输出数组皆为中间结果数组。
// 这里的递归调用不会调用多次,数组的规模是指数倍减小的。
errcode = scanArray(blocksumDev, blocksumDev, blocksumsize, op,
backward, false, false);
if (errcode != NO_ERROR) {
// 释放之前申请的内存。
SCANFREE;
return errcode;
}
// 调用加回函数,将各块的扫描中间结果加回到输出数组。
errcode = addBack(outarrayDev, blocksumDev, numelements,
blocksize, packnum, op, backward);
if (errcode != NO_ERROR) {
// 释放之前申请的内存。
SCANFREE;
return errcode;
}
// 释放中间结果数组的设备端内存。
cudaFree(blocksumDev);
}
// 如果 outarray 在 Host 端,将结果拷贝到输出。
if (hostoutarray) {
// 将结果从设备端内存拷贝到输出数组。
cuerrcode = cudaMemcpy(outarray, outarrayDev, memsize,
cudaMemcpyDeviceToHost);
if (cuerrcode != cudaSuccess) {
if (hostinarray)
cudaFree(inarrayDev);
cudaFree(outarrayDev);
return cuerrcode;
}
}
// 释放 Device 内存。需要判断输入输出参数是否在 host 端。
if (hostinarray)
cudaFree(inarrayDev);
if (hostoutarray)
cudaFree(outarrayDev);
// 处理完毕退出。
return NO_ERROR;
}
// Host 成员方法:scanArray(int 型的数组扫描)
template< class Operation >
__host__ int ScanArray::scanArray(int *inarray, int *outarray,
int numelements, Operation op, bool backward,
bool hostinarray, bool hostoutarray)
{
// 检查输入和输出是否为 NULL,如果为 NULL 直接报错返回。
if (inarray == NULL || outarray == NULL)
return NULL_POINTER;
// 本程序实现的 4 种方法可处理的数组长度。必须加以判断控制。
if (numelements < 0)
return INVALID_DATA;
// 局部变量,错误码。
cudaError_t cuerrcode;
int errcode;
// 局部变量。
int memsize = sizeof (int) * numelements;
int extraspace;
// 计算共享内存的长度。
int sharedmemsize = 0;
// 定义设备端的输入输出数组指针,当输入输出指针在 Host 端时,在设备端申请对
// 应大小的数组。
int *inarrayDev = NULL;
int *outarrayDev = NULL;
// 定义主机端的输入输出数组指针,当输入输出指针在 Device 端时,在主机端申请
// 对应大小的数组。
int *inarrayHost = NULL;
int *outarrayHost = NULL;
// 这里 scan 实现只支持单个线程块的计算,这里的 gridsize 可以设置的大于 1,
// 从而让多个线程块都运行相同程序来测速。计算调用 Kernel 函数的线程块的尺寸
// 和线程块的数量。
int gridsize;
int blocksize;
// 局部变量,中间结果存放数组。长度会根据线程块大小来确定。
int *blocksumDev = NULL;
// 中间结果数组的长度。
int blocksumsize;
// scan 算法中每个线程块的计算能力。核函数每块处理长度与线程块大小的比值。
int packnum;
// 针对 CPU 端实现类型,选择路径进行处理。
if (scanType == CPU_IN_SCAN) {
// 判断当前 inarray 数组是否存储在 Host 端。若不是,则需要在 Host 端
// 为数组申请一段空间;若该数组是在 Host 端,则直接使用。
if (!hostinarray) {
// 为输入数组在 Host 端申请内存。
inarrayHost = new int[memsize];
// 将输入数组拷贝到主机端内存。
cuerrcode = cudaMemcpy(inarrayHost, inarray, memsize,
cudaMemcpyDeviceToHost);
if (cuerrcode != cudaSuccess)
return cuerrcode;
} else {
// 如果在主机端,则将指针传给主机端指针。
inarrayHost = inarray;
}
// 判断当前 outarray 数组是否存储在 Host 端。若不是,则需要在 Host 端
// 为数组申请一段空间;若该数组是在 Host 端,则直接使用。
if (!hostoutarray) {
// 为输出数组在 Host 端申请内存。
outarrayHost = new int[memsize];
// 将输出数组拷贝到主机端内存。
cuerrcode = cudaMemcpy(outarrayHost, outarray, memsize,
cudaMemcpyDeviceToHost);
if (cuerrcode != cudaSuccess)
return cuerrcode;
} else {
// 如果在主机端,则将指针传给主机端指针。
outarrayHost = outarray;
}
// 调用 inclusive 版的 scan 函数
errcode = scanComputeGold<int>(inarrayHost, outarrayHost,
numelements, op, backward);
// 出错则返回错误码。
if (errcode != NO_ERROR) {
// 释放内存
SCANFREE;
return errcode;
}
// 执行结束
return NO_ERROR;
}
// 判断当前 inarray 数组是否存储在 Host 端。若是,则需要在 Device 端为数组
// 申请一段空间;若该数组是在 Device端,则直接使用。
if (hostinarray) {
// 为输入数组在设备端申请内存。
cuerrcode = cudaMalloc((void **)&inarrayDev, memsize);
if (cuerrcode != cudaSuccess)
return cuerrcode;
// 将输入数组拷贝到设备端内存。
cuerrcode = cudaMemcpy(inarrayDev, inarray, memsize,
cudaMemcpyHostToDevice);
if (cuerrcode != cudaSuccess)
return cuerrcode;
} else {
// 如果在设备端,则将指针传给对应的设备端统一指针。
inarrayDev = inarray;
}
// 判断当前 outarray 数组是否存储在 Host 端。若是,则需要在 Device 端为数组
// 申请一段空间;若该数组是在 Device端,则直接使用。
if (hostoutarray) {
// 为输出数组在设备端申请内存。
cuerrcode = cudaMalloc((void **)&outarrayDev, memsize);
if (cuerrcode != cudaSuccess)
return cuerrcode;
} else {
// 如果在设备端,则将指针传给对应的设备端统一指针。
outarrayDev = outarray;
}
// 针对不同的实现类型,选择不同的路径进行处理。
switch(scanType) {
// 使用简单版本的 scan 实现。
case NAIVE_SCAN:
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
// 简单版本每个线程处理一个元素。
blocksize = BLOCK_SIZE;
packnum = 1;
// 计算线程块大小和共享内存长度。
gridsize = max(1, (numelements + blocksize * packnum - 1) / blocksize);
sharedmemsize = sizeof (int) * blocksize * packnum;
// 如果扫描所需要的线程的 grid 尺寸大于 1,就需要进行加回操作,就需要申
// 请存放中间结果的数组。
if (gridsize > 1) {
// 需要将每段处理的最后一个元素取出,最后一个线程块不进行处理。
blocksumsize = gridsize - 1;
// 为存放中间结果的数组在设备端申请内存。
cuerrcode = cudaMalloc((void **)&blocksumDev,
blocksumsize * sizeof(int));
if (cuerrcode != cudaSuccess) {
cudaFree(blocksumDev);
return cuerrcode;
}
}
// 调用 Kernel 函数,完成实际的数组扫描。
// 这里需要判断输入输出指针是否在设备端。
_scanNaiveKer<int><<<gridsize, blocksize, 2 * sharedmemsize>>>(
outarrayDev, inarrayDev, blocksumDev, numelements, op,
backward);
// 判断核函数是否出错。
if (cudaGetLastError() != cudaSuccess) {
// 释放之前申请的内存。
SCANFREE;
return CUDA_ERROR;
}
break;
// 使用效率版本的 scan 实现。
case EFFICIENT_SCAN:
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
// 效率版本每个线程处理两个元素。
blocksize = BLOCK_SIZE;
packnum = 2;
// 计算线程块大小和共享内存长度。
sharedmemsize = sizeof (int) * (blocksize * packnum);
gridsize = max(1, (numelements + packnum * blocksize - 1) /
(packnum * blocksize));
// 如果扫描所需要的线程的 grid 尺寸大于 1,就需要进行加回操作,就需要申
// 请存放中间结果的数组。
if (gridsize > 1) {
// 需要将每段处理的最后一个元素取出,最后一个线程块不进行处理。
blocksumsize = gridsize - 1;
// 为存放中间结果的数组在设备端申请内存。
cuerrcode = cudaMalloc((void **)&blocksumDev,
blocksumsize * sizeof(int));
if (cuerrcode != cudaSuccess) {
cudaFree(blocksumDev);
return cuerrcode;
}
}
// 调用 Kernel 函数,完成实际的数组扫描。
// 这里需要判断输入输出指针是否在设备端。
_scanWorkEfficientKer<int><<<gridsize, blocksize, sharedmemsize>>>(
outarrayDev, inarrayDev, blocksumDev, numelements, op,
backward);
// 判断是否出错。
if (cudaGetLastError() != cudaSuccess) {
// 释放之前申请的内存。
SCANFREE;
return CUDA_ERROR;
}
break;
// 使用优化版本的 scan 实现。
case OPTIMIZE_SCAN:
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
// 优化版本每个线程处理两个元素。
blocksize = BLOCK_SIZE;
packnum = 2;
// 计算线程块大小和共享内存长度。
gridsize = max(1, (numelements + packnum * blocksize - 1) /
(packnum * blocksize));
sharedmemsize = sizeof (int) * (blocksize * packnum);
// 如果扫描所需要的线程的 grid 尺寸大于 1,就需要进行加回操作,就需要申
// 请存放中间结果的数组。
if (gridsize > 1) {
// 需要将每段处理的最后一个元素取出,最后一个线程块不进行处理。
blocksumsize = gridsize - 1;
// 为存放中间结果的数组在设备端申请内存。
cuerrcode = cudaMalloc((void **)&blocksumDev,
blocksumsize * sizeof(int));
if (cuerrcode != cudaSuccess) {
cudaFree(blocksumDev);
return cuerrcode;
}
}
// 调用 Kernel 函数,完成实际的数组扫描。
// 这里需要判断输入输出指针是否在设备端。
_scanOptKer<int><<<gridsize, blocksize, sharedmemsize>>>(
outarrayDev, inarrayDev, blocksumDev, numelements, op,
backward);
// 判断是否出错。
if (cudaGetLastError() != cudaSuccess) {
// 释放之前申请的内存。
SCANFREE;
return CUDA_ERROR;
}
break;
// 使用优化版本的 scan 实现。
case BETTER_SCAN:
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
// 优化版本每个线程处理两个元素。
blocksize = BLOCK_SIZE;
packnum = 2;
// 计算线程块大小和共享内存长度。
gridsize = max(1, (numelements + packnum * blocksize - 1) /
(packnum * blocksize));
extraspace = blocksize * packnum / NUM_BANKS;;
sharedmemsize = sizeof (int) * (blocksize * packnum + extraspace);
// 如果扫描所需要的线程的 grid 尺寸大于 1,就需要进行加回操作,就需要申
// 请存放中间结果的数组。
if (gridsize > 1) {
// 需要将每段处理的最后一个元素取出,最后一个线程块不进行处理。
blocksumsize = gridsize - 1;
// 为存放中间结果的数组在设备端申请内存。
cuerrcode = cudaMalloc((void **)&blocksumDev,
blocksumsize * sizeof(int));
if (cuerrcode != cudaSuccess) {
cudaFree(blocksumDev);
return cuerrcode;
}
}
// 调用 Kernel 函数,完成实际的数组扫描。
// 这里需要判断输入输出指针是否在设备端。
_scanBetterKer<int><<<gridsize, blocksize, sharedmemsize>>>(
outarrayDev, inarrayDev, blocksumDev, numelements, op,
backward);
// 判断是否出错。
if (cudaGetLastError() != cudaSuccess) {
// 释放之前申请的内存。
SCANFREE;
return CUDA_ERROR;
}
break;
// 其他方式情况下,直接返回非法数据错误。
default:
if (hostinarray)
cudaFree(inarrayDev);
if (hostoutarray)
cudaFree(outarrayDev);
return INVALID_DATA;
}
// 如果处理的 gird 尺寸大于 1,那么对于扫描中间结果数组进行一次 scan
// 操作和加回操作。
if (gridsize > 1) {
// 递归调用扫描函数。此时输入输出数组皆为中间结果数组。
// 这里的递归调用不会调用多次,数组的规模是指数倍减小的。
errcode = scanArray(blocksumDev, blocksumDev, blocksumsize, op,
backward, false, false);
if (errcode != NO_ERROR) {
// 释放之前申请的内存。
SCANFREE;
return errcode;
}
// 调用加回函数,将各块的扫描中间结果加回到输出数组。
errcode = addBack(outarrayDev, blocksumDev, numelements,
blocksize, packnum, op, backward);
if (errcode != NO_ERROR) {
// 释放之前申请的内存。
SCANFREE;
return errcode;
}
// 释放中间结果数组的设备端内存。
cudaFree(blocksumDev);
}
// 如果 outarray 在 Host 端,将结果拷贝到输出。
if (hostoutarray) {
// 将结果从设备端内存拷贝到输出数组。
cuerrcode = cudaMemcpy(outarray, outarrayDev, memsize,
cudaMemcpyDeviceToHost);
if (cuerrcode != cudaSuccess) {
if (hostinarray)
cudaFree(inarrayDev);
cudaFree(outarrayDev);
return cuerrcode;
}
}
// 释放 Device 内存。需要判断输入输出参数是否在 host 端。
if (hostinarray)
cudaFree(inarrayDev);
if (hostoutarray)
cudaFree(outarrayDev);
// 处理完毕退出。
return NO_ERROR;
}
// 函数:为了使模板运算能够连接通过,通过此函数预处理,将用到模板的函数实例化。
void example()
{
// 定义 ScanArray 对象
ScanArray s;
// 数组长度
unsigned int num_elements = 1024;
// 开辟空间的大小
const unsigned int mem_size = sizeof(float) * num_elements;
const unsigned int mem_size1 = sizeof(int) * num_elements;
// 为 float 型输入输出指针开辟空间
float *inarray = new float[mem_size];
float *outarray = new float[mem_size];
// 为 int 型输入输出指针开辟空间
int *inarray1 = new int[mem_size1];
int *outarray1 = new int[mem_size1];
// 设置扫描类型为 NAIVE_SCAN
s.setScanType(NAIVE_SCAN);
// 默认输入输出数组均在 host 端
bool inhost, outhost;
inhost = true;
outhost = true;
// 新建加法和乘法运算对象
add_class<float> a;
multi_class<float> m;
max_class<float> max;
min_class<float> min;
add_class<int> a1;
multi_class<int> m1;
max_class<int> max1;
min_class<int> min1;
// 用 float 型和 int 型分别调用加法和乘法的仿函数。
s.scanArray(inarray, outarray, num_elements, a, false, inhost, outhost);
s.scanArray(inarray1, outarray1, num_elements, a1, false, inhost, outhost);
s.scanArray(inarray, outarray, num_elements, m, false, inhost, outhost);
s.scanArray(inarray1, outarray1, num_elements, m1, false, inhost, outhost);
s.scanArray(inarray, outarray, num_elements, max, false, inhost, outhost);
s.scanArray(inarray1, outarray1, num_elements, max1, false, inhost, outhost);
s.scanArray(inarray, outarray, num_elements, min, false, inhost, outhost);
s.scanArray(inarray1, outarray1, num_elements, min1, false, inhost, outhost);
}
// 取消前面的宏定义。
#undef SCANFREE
|
26c5520c434d530dd542dafb4f7f25a205766475.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
////////////////////////////////////////////////////////////////////////////////
//
// NVIDIA CUDA implementation of Viola-Jones Object Detection Framework
//
// The algorithm and code are explained in the upcoming GPU Computing Gems
// chapter in detail:
//
// Anton Obukhov, "Haar Classifiers for Object Detection with CUDA"
// PDF URL placeholder
// email: [email protected], [email protected]
//
// Credits for help with the code to:
// Alexey Mendelenko, Cyril Crassin, and Mikhail Smirnov.
//
////////////////////////////////////////////////////////////////////////////////
#include <algorithm>
#include <cstdio>
#include "opencv2/core/cuda/warp.hpp"
#include "opencv2/core/cuda/warp_shuffle.hpp"
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_OBJDETECT
# include "opencv2/objdetect.hpp"
# include "opencv2/objdetect/objdetect_c.h"
#endif
#include "opencv2/cudalegacy/NCV.hpp"
#include "opencv2/cudalegacy/NPP_staging.hpp"
#include "opencv2/cudalegacy/NCVHaarObjectDetection.hpp"
#include "NCVRuntimeTemplates.hpp"
#include "NCVAlg.hpp"
//==============================================================================
//
// BlockScan file
//
//==============================================================================
NCV_CT_ASSERT(K_WARP_SIZE == 32); //this is required for the manual unroll of the loop in warpScanInclusive
//Almost the same as naive scan1Inclusive, but doesn't need __syncthreads()
//assuming size <= WARP_SIZE and size is power of 2
__device__ Ncv32u warpScanInclusive(Ncv32u idata, volatile Ncv32u *s_Data)
{
#if __CUDA_ARCH__ >= 300
const unsigned int laneId = cv::cuda::device::Warp::laneId();
// scan on shuffl functions
#pragma unroll
for (int i = 1; i <= (K_WARP_SIZE / 2); i *= 2)
{
const Ncv32u n = cv::cuda::device::shfl_up(idata, i);
if (laneId >= i)
idata += n;
}
return idata;
#else
Ncv32u pos = 2 * threadIdx.x - (threadIdx.x & (K_WARP_SIZE - 1));
s_Data[pos] = 0;
pos += K_WARP_SIZE;
s_Data[pos] = idata;
s_Data[pos] += s_Data[pos - 1];
s_Data[pos] += s_Data[pos - 2];
s_Data[pos] += s_Data[pos - 4];
s_Data[pos] += s_Data[pos - 8];
s_Data[pos] += s_Data[pos - 16];
return s_Data[pos];
#endif
}
__device__ __forceinline__ Ncv32u warpScanExclusive(Ncv32u idata, volatile Ncv32u *s_Data)
{
return warpScanInclusive(idata, s_Data) - idata;
}
template <Ncv32u tiNumScanThreads>
__device__ Ncv32u scan1Inclusive(Ncv32u idata, volatile Ncv32u *s_Data)
{
if (tiNumScanThreads > K_WARP_SIZE)
{
//Bottom-level inclusive warp scan
Ncv32u warpResult = warpScanInclusive(idata, s_Data);
//Save top elements of each warp for exclusive warp scan
//sync to wait for warp scans to complete (because s_Data is being overwritten)
__syncthreads();
if( (threadIdx.x & (K_WARP_SIZE - 1)) == (K_WARP_SIZE - 1) )
{
s_Data[threadIdx.x >> K_LOG2_WARP_SIZE] = warpResult;
}
//wait for warp scans to complete
__syncthreads();
if( threadIdx.x < (tiNumScanThreads / K_WARP_SIZE) )
{
//grab top warp elements
Ncv32u val = s_Data[threadIdx.x];
//calculate exclusive scan and write back to shared memory
s_Data[threadIdx.x] = warpScanExclusive(val, s_Data);
}
//return updated warp scans with exclusive scan results
__syncthreads();
return warpResult + s_Data[threadIdx.x >> K_LOG2_WARP_SIZE];
}
else
{
return warpScanInclusive(idata, s_Data);
}
}
//==============================================================================
//
// HaarClassifierCascade file
//
//==============================================================================
const Ncv32u MAX_GRID_DIM = 65535;
const Ncv32u NUM_THREADS_ANCHORSPARALLEL = 64;
#define NUM_THREADS_CLASSIFIERPARALLEL_LOG2 6
#define NUM_THREADS_CLASSIFIERPARALLEL (1 << NUM_THREADS_CLASSIFIERPARALLEL_LOG2)
/** \internal
* Haar features solid array.
*/
texture<uint2, 1, hipReadModeElementType> texHaarFeatures;
/** \internal
* Haar classifiers flattened trees container.
* Two parts: first contains root nodes, second - nodes that are referred by root nodes.
* Drawback: breaks tree locality (might cause more cache misses
* Advantage: No need to introduce additional 32-bit field to index root nodes offsets
*/
texture<uint4, 1, hipReadModeElementType> texHaarClassifierNodes;
texture<Ncv32u, 1, hipReadModeElementType> texIImage;
__device__ HaarStage64 getStage(Ncv32u iStage, HaarStage64 *d_Stages)
{
return d_Stages[iStage];
}
template <NcvBool tbCacheTextureCascade>
__device__ HaarClassifierNode128 getClassifierNode(Ncv32u iNode, HaarClassifierNode128 *d_ClassifierNodes)
{
HaarClassifierNode128 tmpNode;
if (tbCacheTextureCascade)
{
tmpNode._ui4 = tex1Dfetch(texHaarClassifierNodes, iNode);
}
else
{
tmpNode = d_ClassifierNodes[iNode];
}
return tmpNode;
}
template <NcvBool tbCacheTextureCascade>
__device__ void getFeature(Ncv32u iFeature, HaarFeature64 *d_Features,
Ncv32f *weight,
Ncv32u *rectX, Ncv32u *rectY, Ncv32u *rectWidth, Ncv32u *rectHeight)
{
HaarFeature64 feature;
if (tbCacheTextureCascade)
{
feature._ui2 = tex1Dfetch(texHaarFeatures, iFeature);
}
else
{
feature = d_Features[iFeature];
}
feature.getRect(rectX, rectY, rectWidth, rectHeight);
*weight = feature.getWeight();
}
template <NcvBool tbCacheTextureIImg>
__device__ Ncv32u getElemIImg(Ncv32u x, Ncv32u *d_IImg)
{
if (tbCacheTextureIImg)
{
return tex1Dfetch(texIImage, x);
}
else
{
return d_IImg[x];
}
}
__device__ Ncv32u d_outMaskPosition;
__device__ void compactBlockWriteOutAnchorParallel(Ncv32u threadPassFlag, Ncv32u threadElem, Ncv32u *vectorOut)
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110
__shared__ Ncv32u shmem[NUM_THREADS_ANCHORSPARALLEL * 2];
__shared__ Ncv32u numPassed;
__shared__ Ncv32u outMaskOffset;
Ncv32u incScan = scan1Inclusive<NUM_THREADS_ANCHORSPARALLEL>(threadPassFlag, shmem);
__syncthreads();
if (threadIdx.x == NUM_THREADS_ANCHORSPARALLEL-1)
{
numPassed = incScan;
outMaskOffset = atomicAdd(&d_outMaskPosition, incScan);
}
if (threadPassFlag)
{
Ncv32u excScan = incScan - threadPassFlag;
shmem[excScan] = threadElem;
}
__syncthreads();
if (threadIdx.x < numPassed)
{
vectorOut[outMaskOffset + threadIdx.x] = shmem[threadIdx.x];
}
#endif
}
template <NcvBool tbInitMaskPositively,
NcvBool tbCacheTextureIImg,
NcvBool tbCacheTextureCascade,
NcvBool tbReadPixelIndexFromVector,
NcvBool tbDoAtomicCompaction>
__global__ void applyHaarClassifierAnchorParallel(Ncv32u *d_IImg, Ncv32u IImgStride,
Ncv32f *d_weights, Ncv32u weightsStride,
HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea)
{
Ncv32u y_offs;
Ncv32u x_offs;
Ncv32u maskOffset;
Ncv32u outMaskVal;
NcvBool bInactiveThread = false;
if (tbReadPixelIndexFromVector)
{
maskOffset = (MAX_GRID_DIM * blockIdx.y + blockIdx.x) * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x;
if (maskOffset >= mask1Dlen)
{
if (tbDoAtomicCompaction) bInactiveThread = true; else return;
}
if (!tbDoAtomicCompaction || tbDoAtomicCompaction && !bInactiveThread)
{
outMaskVal = d_inMask[maskOffset];
y_offs = outMaskVal >> 16;
x_offs = outMaskVal & 0xFFFF;
}
}
else
{
y_offs = blockIdx.y;
x_offs = blockIdx.x * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x;
if (x_offs >= mask2Dstride)
{
if (tbDoAtomicCompaction) bInactiveThread = true; else return;
}
if (!tbDoAtomicCompaction || tbDoAtomicCompaction && !bInactiveThread)
{
maskOffset = y_offs * mask2Dstride + x_offs;
if ((x_offs >= anchorsRoi.width) ||
(!tbInitMaskPositively &&
d_inMask != d_outMask &&
d_inMask[maskOffset] == OBJDET_MASK_ELEMENT_INVALID_32U))
{
if (tbDoAtomicCompaction)
{
bInactiveThread = true;
}
else
{
d_outMask[maskOffset] = OBJDET_MASK_ELEMENT_INVALID_32U;
return;
}
}
outMaskVal = (y_offs << 16) | x_offs;
}
}
NcvBool bPass = true;
if (!tbDoAtomicCompaction || tbDoAtomicCompaction)
{
Ncv32f pixelStdDev = 0.0f;
if (!bInactiveThread)
pixelStdDev = d_weights[y_offs * weightsStride + x_offs];
for (Ncv32u iStage = startStageInc; iStage < endStageExc; iStage++)
{
Ncv32f curStageSum = 0.0f;
HaarStage64 curStage = getStage(iStage, d_Stages);
Ncv32u numRootNodesInStage = curStage.getNumClassifierRootNodes();
Ncv32u curRootNodeOffset = curStage.getStartClassifierRootNodeOffset();
Ncv32f stageThreshold = curStage.getStageThreshold();
while (numRootNodesInStage--)
{
NcvBool bMoreNodesToTraverse = true;
Ncv32u iNode = curRootNodeOffset;
if (bPass && !bInactiveThread)
{
while (bMoreNodesToTraverse)
{
HaarClassifierNode128 curNode = getClassifierNode<tbCacheTextureCascade>(iNode, d_ClassifierNodes);
HaarFeatureDescriptor32 featuresDesc = curNode.getFeatureDesc();
Ncv32u curNodeFeaturesNum = featuresDesc.getNumFeatures();
Ncv32u iFeature = featuresDesc.getFeaturesOffset();
Ncv32f curNodeVal = 0.0f;
for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++)
{
Ncv32f rectWeight;
Ncv32u rectX, rectY, rectWidth, rectHeight;
getFeature<tbCacheTextureCascade>
(iFeature + iRect, d_Features,
&rectWeight, &rectX, &rectY, &rectWidth, &rectHeight);
Ncv32u iioffsTL = (y_offs + rectY) * IImgStride + (x_offs + rectX);
Ncv32u iioffsTR = iioffsTL + rectWidth;
Ncv32u iioffsBL = iioffsTL + rectHeight * IImgStride;
Ncv32u iioffsBR = iioffsBL + rectWidth;
Ncv32u rectSum = getElemIImg<tbCacheTextureIImg>(iioffsBR, d_IImg) -
getElemIImg<tbCacheTextureIImg>(iioffsBL, d_IImg) +
getElemIImg<tbCacheTextureIImg>(iioffsTL, d_IImg) -
getElemIImg<tbCacheTextureIImg>(iioffsTR, d_IImg);
#if defined CPU_FP_COMPLIANCE || defined DISABLE_MAD_SELECTIVELY
curNodeVal += __fmul_rn((Ncv32f)rectSum, rectWeight);
#else
curNodeVal += (Ncv32f)rectSum * rectWeight;
#endif
}
HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc();
HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc();
Ncv32f nodeThreshold = curNode.getThreshold();
HaarClassifierNodeDescriptor32 nextNodeDescriptor;
NcvBool nextNodeIsLeaf;
if (curNodeVal < scaleArea * pixelStdDev * nodeThreshold)
{
nextNodeDescriptor = nodeLeft;
nextNodeIsLeaf = featuresDesc.isLeftNodeLeaf();
}
else
{
nextNodeDescriptor = nodeRight;
nextNodeIsLeaf = featuresDesc.isRightNodeLeaf();
}
if (nextNodeIsLeaf)
{
Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValue();
curStageSum += tmpLeafValue;
bMoreNodesToTraverse = false;
}
else
{
iNode = nextNodeDescriptor.getNextNodeOffset();
}
}
}
__syncthreads();
curRootNodeOffset++;
}
if (curStageSum < stageThreshold)
{
bPass = false;
outMaskVal = OBJDET_MASK_ELEMENT_INVALID_32U;
}
}
}
__syncthreads();
if (!tbDoAtomicCompaction)
{
if (!tbReadPixelIndexFromVector ||
(tbReadPixelIndexFromVector && (!bPass || d_inMask != d_outMask)))
{
d_outMask[maskOffset] = outMaskVal;
}
}
else
{
compactBlockWriteOutAnchorParallel(bPass && !bInactiveThread,
outMaskVal,
d_outMask);
}
}
template <NcvBool tbCacheTextureIImg,
NcvBool tbCacheTextureCascade,
NcvBool tbDoAtomicCompaction>
__global__ void applyHaarClassifierClassifierParallel(Ncv32u *d_IImg, Ncv32u IImgStride,
Ncv32f *d_weights, Ncv32u weightsStride,
HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea)
{
Ncv32u maskOffset = MAX_GRID_DIM * blockIdx.y + blockIdx.x;
if (maskOffset >= mask1Dlen)
{
return;
}
Ncv32u outMaskVal = d_inMask[maskOffset];
Ncv32u y_offs = outMaskVal >> 16;
Ncv32u x_offs = outMaskVal & 0xFFFF;
Ncv32f pixelStdDev = d_weights[y_offs * weightsStride + x_offs];
NcvBool bPass = true;
for (Ncv32u iStage = startStageInc; iStage<endStageExc; iStage++)
{
//this variable is subject to reduction
Ncv32f curStageSum = 0.0f;
HaarStage64 curStage = getStage(iStage, d_Stages);
Ncv32s numRootNodesInStage = curStage.getNumClassifierRootNodes();
Ncv32u curRootNodeOffset = curStage.getStartClassifierRootNodeOffset() + threadIdx.x;
Ncv32f stageThreshold = curStage.getStageThreshold();
Ncv32u numRootChunks = (numRootNodesInStage + NUM_THREADS_CLASSIFIERPARALLEL - 1) >> NUM_THREADS_CLASSIFIERPARALLEL_LOG2;
for (Ncv32u chunkId=0; chunkId<numRootChunks; chunkId++)
{
NcvBool bMoreNodesToTraverse = true;
if (chunkId * NUM_THREADS_CLASSIFIERPARALLEL + threadIdx.x < numRootNodesInStage)
{
Ncv32u iNode = curRootNodeOffset;
while (bMoreNodesToTraverse)
{
HaarClassifierNode128 curNode = getClassifierNode<tbCacheTextureCascade>(iNode, d_ClassifierNodes);
HaarFeatureDescriptor32 featuresDesc = curNode.getFeatureDesc();
Ncv32u curNodeFeaturesNum = featuresDesc.getNumFeatures();
Ncv32u iFeature = featuresDesc.getFeaturesOffset();
Ncv32f curNodeVal = 0.0f;
//TODO: fetch into shmem if size suffices. Shmem can be shared with reduce
for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++)
{
Ncv32f rectWeight;
Ncv32u rectX, rectY, rectWidth, rectHeight;
getFeature<tbCacheTextureCascade>
(iFeature + iRect, d_Features,
&rectWeight, &rectX, &rectY, &rectWidth, &rectHeight);
Ncv32u iioffsTL = (y_offs + rectY) * IImgStride + (x_offs + rectX);
Ncv32u iioffsTR = iioffsTL + rectWidth;
Ncv32u iioffsBL = iioffsTL + rectHeight * IImgStride;
Ncv32u iioffsBR = iioffsBL + rectWidth;
Ncv32u rectSum = getElemIImg<tbCacheTextureIImg>(iioffsBR, d_IImg) -
getElemIImg<tbCacheTextureIImg>(iioffsBL, d_IImg) +
getElemIImg<tbCacheTextureIImg>(iioffsTL, d_IImg) -
getElemIImg<tbCacheTextureIImg>(iioffsTR, d_IImg);
#if defined CPU_FP_COMPLIANCE || defined DISABLE_MAD_SELECTIVELY
curNodeVal += __fmul_rn((Ncv32f)rectSum, rectWeight);
#else
curNodeVal += (Ncv32f)rectSum * rectWeight;
#endif
}
HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc();
HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc();
Ncv32f nodeThreshold = curNode.getThreshold();
HaarClassifierNodeDescriptor32 nextNodeDescriptor;
NcvBool nextNodeIsLeaf;
if (curNodeVal < scaleArea * pixelStdDev * nodeThreshold)
{
nextNodeDescriptor = nodeLeft;
nextNodeIsLeaf = featuresDesc.isLeftNodeLeaf();
}
else
{
nextNodeDescriptor = nodeRight;
nextNodeIsLeaf = featuresDesc.isRightNodeLeaf();
}
if (nextNodeIsLeaf)
{
Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValue();
curStageSum += tmpLeafValue;
bMoreNodesToTraverse = false;
}
else
{
iNode = nextNodeDescriptor.getNextNodeOffset();
}
}
}
__syncthreads();
curRootNodeOffset += NUM_THREADS_CLASSIFIERPARALLEL;
}
Ncv32f finalStageSum = subReduce<Ncv32f, functorAddValues<Ncv32f>, NUM_THREADS_CLASSIFIERPARALLEL>(curStageSum);
if (finalStageSum < stageThreshold)
{
bPass = false;
outMaskVal = OBJDET_MASK_ELEMENT_INVALID_32U;
break;
}
}
if (!tbDoAtomicCompaction)
{
if (!bPass || d_inMask != d_outMask)
{
if (!threadIdx.x)
{
d_outMask[maskOffset] = outMaskVal;
}
}
}
else
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110
if (bPass && !threadIdx.x)
{
Ncv32u outMaskOffset = atomicAdd(&d_outMaskPosition, 1);
d_outMask[outMaskOffset] = outMaskVal;
}
#endif
}
}
template <NcvBool tbMaskByInmask,
NcvBool tbDoAtomicCompaction>
__global__ void initializeMaskVector(Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u step)
{
Ncv32u y_offs = blockIdx.y;
Ncv32u x_offs = blockIdx.x * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x;
Ncv32u outMaskOffset = y_offs * gridDim.x * blockDim.x + x_offs;
Ncv32u y_offs_upsc = step * y_offs;
Ncv32u x_offs_upsc = step * x_offs;
Ncv32u inMaskOffset = y_offs_upsc * mask2Dstride + x_offs_upsc;
Ncv32u outElem = OBJDET_MASK_ELEMENT_INVALID_32U;
if (x_offs_upsc < anchorsRoi.width &&
(!tbMaskByInmask || d_inMask[inMaskOffset] != OBJDET_MASK_ELEMENT_INVALID_32U))
{
outElem = (y_offs_upsc << 16) | x_offs_upsc;
}
if (!tbDoAtomicCompaction)
{
d_outMask[outMaskOffset] = outElem;
}
else
{
compactBlockWriteOutAnchorParallel(outElem != OBJDET_MASK_ELEMENT_INVALID_32U,
outElem,
d_outMask);
}
}
struct applyHaarClassifierAnchorParallelFunctor
{
dim3 gridConf, blockConf;
hipStream_t cuStream;
//Kernel arguments are stored as members;
Ncv32u *d_IImg;
Ncv32u IImgStride;
Ncv32f *d_weights;
Ncv32u weightsStride;
HaarFeature64 *d_Features;
HaarClassifierNode128 *d_ClassifierNodes;
HaarStage64 *d_Stages;
Ncv32u *d_inMask;
Ncv32u *d_outMask;
Ncv32u mask1Dlen;
Ncv32u mask2Dstride;
NcvSize32u anchorsRoi;
Ncv32u startStageInc;
Ncv32u endStageExc;
Ncv32f scaleArea;
//Arguments are passed through the constructor
applyHaarClassifierAnchorParallelFunctor(dim3 _gridConf, dim3 _blockConf, hipStream_t _cuStream,
Ncv32u *_d_IImg, Ncv32u _IImgStride,
Ncv32f *_d_weights, Ncv32u _weightsStride,
HaarFeature64 *_d_Features, HaarClassifierNode128 *_d_ClassifierNodes, HaarStage64 *_d_Stages,
Ncv32u *_d_inMask, Ncv32u *_d_outMask,
Ncv32u _mask1Dlen, Ncv32u _mask2Dstride,
NcvSize32u _anchorsRoi, Ncv32u _startStageInc,
Ncv32u _endStageExc, Ncv32f _scaleArea) :
gridConf(_gridConf),
blockConf(_blockConf),
cuStream(_cuStream),
d_IImg(_d_IImg),
IImgStride(_IImgStride),
d_weights(_d_weights),
weightsStride(_weightsStride),
d_Features(_d_Features),
d_ClassifierNodes(_d_ClassifierNodes),
d_Stages(_d_Stages),
d_inMask(_d_inMask),
d_outMask(_d_outMask),
mask1Dlen(_mask1Dlen),
mask2Dstride(_mask2Dstride),
anchorsRoi(_anchorsRoi),
startStageInc(_startStageInc),
endStageExc(_endStageExc),
scaleArea(_scaleArea)
{}
template<class TList>
void call(TList tl)
{
(void)tl;
hipLaunchKernelGGL(( applyHaarClassifierAnchorParallel <
Loki::TL::TypeAt<TList, 0>::Result::value,
Loki::TL::TypeAt<TList, 1>::Result::value,
Loki::TL::TypeAt<TList, 2>::Result::value,
Loki::TL::TypeAt<TList, 3>::Result::value,
Loki::TL::TypeAt<TList, 4>::Result::value >)
, dim3(gridConf), dim3(blockConf), 0, cuStream,
d_IImg, IImgStride,
d_weights, weightsStride,
d_Features, d_ClassifierNodes, d_Stages,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, startStageInc,
endStageExc, scaleArea);
}
};
void applyHaarClassifierAnchorParallelDynTemplate(NcvBool tbInitMaskPositively,
NcvBool tbCacheTextureIImg,
NcvBool tbCacheTextureCascade,
NcvBool tbReadPixelIndexFromVector,
NcvBool tbDoAtomicCompaction,
dim3 gridConf, dim3 blockConf, hipStream_t cuStream,
Ncv32u *d_IImg, Ncv32u IImgStride,
Ncv32f *d_weights, Ncv32u weightsStride,
HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u startStageInc,
Ncv32u endStageExc, Ncv32f scaleArea)
{
applyHaarClassifierAnchorParallelFunctor functor(gridConf, blockConf, cuStream,
d_IImg, IImgStride,
d_weights, weightsStride,
d_Features, d_ClassifierNodes, d_Stages,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, startStageInc,
endStageExc, scaleArea);
//Second parameter is the number of "dynamic" template parameters
NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 5, applyHaarClassifierAnchorParallelFunctor>
::call( &functor,
tbInitMaskPositively,
tbCacheTextureIImg,
tbCacheTextureCascade,
tbReadPixelIndexFromVector,
tbDoAtomicCompaction);
}
struct applyHaarClassifierClassifierParallelFunctor
{
dim3 gridConf, blockConf;
hipStream_t cuStream;
//Kernel arguments are stored as members;
Ncv32u *d_IImg;
Ncv32u IImgStride;
Ncv32f *d_weights;
Ncv32u weightsStride;
HaarFeature64 *d_Features;
HaarClassifierNode128 *d_ClassifierNodes;
HaarStage64 *d_Stages;
Ncv32u *d_inMask;
Ncv32u *d_outMask;
Ncv32u mask1Dlen;
Ncv32u mask2Dstride;
NcvSize32u anchorsRoi;
Ncv32u startStageInc;
Ncv32u endStageExc;
Ncv32f scaleArea;
//Arguments are passed through the constructor
applyHaarClassifierClassifierParallelFunctor(dim3 _gridConf, dim3 _blockConf, hipStream_t _cuStream,
Ncv32u *_d_IImg, Ncv32u _IImgStride,
Ncv32f *_d_weights, Ncv32u _weightsStride,
HaarFeature64 *_d_Features, HaarClassifierNode128 *_d_ClassifierNodes, HaarStage64 *_d_Stages,
Ncv32u *_d_inMask, Ncv32u *_d_outMask,
Ncv32u _mask1Dlen, Ncv32u _mask2Dstride,
NcvSize32u _anchorsRoi, Ncv32u _startStageInc,
Ncv32u _endStageExc, Ncv32f _scaleArea) :
gridConf(_gridConf),
blockConf(_blockConf),
cuStream(_cuStream),
d_IImg(_d_IImg),
IImgStride(_IImgStride),
d_weights(_d_weights),
weightsStride(_weightsStride),
d_Features(_d_Features),
d_ClassifierNodes(_d_ClassifierNodes),
d_Stages(_d_Stages),
d_inMask(_d_inMask),
d_outMask(_d_outMask),
mask1Dlen(_mask1Dlen),
mask2Dstride(_mask2Dstride),
anchorsRoi(_anchorsRoi),
startStageInc(_startStageInc),
endStageExc(_endStageExc),
scaleArea(_scaleArea)
{}
template<class TList>
void call(TList tl)
{
(void)tl;
hipLaunchKernelGGL(( applyHaarClassifierClassifierParallel <
Loki::TL::TypeAt<TList, 0>::Result::value,
Loki::TL::TypeAt<TList, 1>::Result::value,
Loki::TL::TypeAt<TList, 2>::Result::value >)
, dim3(gridConf), dim3(blockConf), 0, cuStream,
d_IImg, IImgStride,
d_weights, weightsStride,
d_Features, d_ClassifierNodes, d_Stages,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, startStageInc,
endStageExc, scaleArea);
}
};
void applyHaarClassifierClassifierParallelDynTemplate(NcvBool tbCacheTextureIImg,
NcvBool tbCacheTextureCascade,
NcvBool tbDoAtomicCompaction,
dim3 gridConf, dim3 blockConf, hipStream_t cuStream,
Ncv32u *d_IImg, Ncv32u IImgStride,
Ncv32f *d_weights, Ncv32u weightsStride,
HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u startStageInc,
Ncv32u endStageExc, Ncv32f scaleArea)
{
applyHaarClassifierClassifierParallelFunctor functor(gridConf, blockConf, cuStream,
d_IImg, IImgStride,
d_weights, weightsStride,
d_Features, d_ClassifierNodes, d_Stages,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, startStageInc,
endStageExc, scaleArea);
//Second parameter is the number of "dynamic" template parameters
NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 3, applyHaarClassifierClassifierParallelFunctor>
::call( &functor,
tbCacheTextureIImg,
tbCacheTextureCascade,
tbDoAtomicCompaction);
}
struct initializeMaskVectorFunctor
{
dim3 gridConf, blockConf;
hipStream_t cuStream;
//Kernel arguments are stored as members;
Ncv32u *d_inMask;
Ncv32u *d_outMask;
Ncv32u mask1Dlen;
Ncv32u mask2Dstride;
NcvSize32u anchorsRoi;
Ncv32u step;
//Arguments are passed through the constructor
initializeMaskVectorFunctor(dim3 _gridConf, dim3 _blockConf, hipStream_t _cuStream,
Ncv32u *_d_inMask, Ncv32u *_d_outMask,
Ncv32u _mask1Dlen, Ncv32u _mask2Dstride,
NcvSize32u _anchorsRoi, Ncv32u _step) :
gridConf(_gridConf),
blockConf(_blockConf),
cuStream(_cuStream),
d_inMask(_d_inMask),
d_outMask(_d_outMask),
mask1Dlen(_mask1Dlen),
mask2Dstride(_mask2Dstride),
anchorsRoi(_anchorsRoi),
step(_step)
{}
template<class TList>
void call(TList tl)
{
(void)tl;
hipLaunchKernelGGL(( initializeMaskVector <
Loki::TL::TypeAt<TList, 0>::Result::value,
Loki::TL::TypeAt<TList, 1>::Result::value >)
, dim3(gridConf), dim3(blockConf), 0, cuStream,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, step);
}
};
void initializeMaskVectorDynTemplate(NcvBool tbMaskByInmask,
NcvBool tbDoAtomicCompaction,
dim3 gridConf, dim3 blockConf, hipStream_t cuStream,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u step)
{
initializeMaskVectorFunctor functor(gridConf, blockConf, cuStream,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, step);
//Second parameter is the number of "dynamic" template parameters
NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 2, initializeMaskVectorFunctor>
::call( &functor,
tbMaskByInmask,
tbDoAtomicCompaction);
}
Ncv32u getStageNumWithNotLessThanNclassifiers(Ncv32u N, HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages)
{
Ncv32u i = 0;
for (; i<haar.NumStages; i++)
{
if (h_HaarStages.ptr()[i].getNumClassifierRootNodes() >= N)
{
break;
}
}
return i;
}
NCVStatus ncvApplyHaarClassifierCascade_device(NCVMatrix<Ncv32u> &integral,
NCVMatrix<Ncv32f> &d_weights,
NCVMatrixAlloc<Ncv32u> &d_pixelMask,
Ncv32u &numDetections,
HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarStage64> &d_HaarStages,
NCVVector<HaarClassifierNode128> &d_HaarNodes,
NCVVector<HaarFeature64> &d_HaarFeatures,
NcvBool bMaskElements,
NcvSize32u anchorsRoi,
Ncv32u pixelStep,
Ncv32f scaleArea,
INCVMemAllocator &gpuAllocator,
INCVMemAllocator &cpuAllocator,
hipDeviceProp_t &devProp,
hipStream_t cuStream)
{
ncvAssertReturn(integral.memType() == d_weights.memType()&&
integral.memType() == d_pixelMask.memType() &&
integral.memType() == gpuAllocator.memType() &&
(integral.memType() == NCVMemoryTypeDevice ||
integral.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(d_HaarStages.memType() == d_HaarNodes.memType() &&
d_HaarStages.memType() == d_HaarFeatures.memType() &&
(d_HaarStages.memType() == NCVMemoryTypeDevice ||
d_HaarStages.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(h_HaarStages.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(gpuAllocator.isInitialized() && cpuAllocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED);
ncvAssertReturn((integral.ptr() != NULL && d_weights.ptr() != NULL && d_pixelMask.ptr() != NULL &&
h_HaarStages.ptr() != NULL && d_HaarStages.ptr() != NULL && d_HaarNodes.ptr() != NULL &&
d_HaarFeatures.ptr() != NULL) || gpuAllocator.isCounting(), NCV_NULL_PTR);
ncvAssertReturn(anchorsRoi.width > 0 && anchorsRoi.height > 0 &&
d_pixelMask.width() >= anchorsRoi.width && d_pixelMask.height() >= anchorsRoi.height &&
d_weights.width() >= anchorsRoi.width && d_weights.height() >= anchorsRoi.height &&
integral.width() >= anchorsRoi.width + haar.ClassifierSize.width &&
integral.height() >= anchorsRoi.height + haar.ClassifierSize.height, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(scaleArea > 0, NCV_INVALID_SCALE);
ncvAssertReturn(d_HaarStages.length() >= haar.NumStages &&
d_HaarNodes.length() >= haar.NumClassifierTotalNodes &&
d_HaarFeatures.length() >= haar.NumFeatures &&
d_HaarStages.length() == h_HaarStages.length() &&
haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(haar.bNeedsTiltedII == false || gpuAllocator.isCounting(), NCV_NOIMPL_HAAR_TILTED_FEATURES);
ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP);
NCV_SET_SKIP_COND(gpuAllocator.isCounting());
#if defined _SELF_TEST_
NCVStatus ncvStat;
NCVMatrixAlloc<Ncv32u> h_integralImage(cpuAllocator, integral.width, integral.height, integral.pitch);
ncvAssertReturn(h_integralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32f> h_weights(cpuAllocator, d_weights.width, d_weights.height, d_weights.pitch);
ncvAssertReturn(h_weights.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32u> h_pixelMask(cpuAllocator, d_pixelMask.width, d_pixelMask.height, d_pixelMask.pitch);
ncvAssertReturn(h_pixelMask.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<HaarClassifierNode128> h_HaarNodes(cpuAllocator, d_HaarNodes.length);
ncvAssertReturn(h_HaarNodes.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<HaarFeature64> h_HaarFeatures(cpuAllocator, d_HaarFeatures.length);
ncvAssertReturn(h_HaarFeatures.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32u> h_pixelMask_d(cpuAllocator, d_pixelMask.width, d_pixelMask.height, d_pixelMask.pitch);
ncvAssertReturn(h_pixelMask_d.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCV_SKIP_COND_BEGIN
ncvStat = d_pixelMask.copySolid(h_pixelMask, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = integral.copySolid(h_integralImage, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = d_weights.copySolid(h_weights, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = d_HaarNodes.copySolid(h_HaarNodes, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = d_HaarFeatures.copySolid(h_HaarFeatures, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvAssertCUDAReturn(hipStreamSynchronize(0), NCV_CUDA_ERROR);
for (Ncv32u i=0; i<(Ncv32u)anchorsRoi.height; i++)
{
for (Ncv32u j=0; j<d_pixelMask.stride(); j++)
{
if ((i%pixelStep==0) && (j%pixelStep==0) && (j<(Ncv32u)anchorsRoi.width))
{
if (!bMaskElements || h_pixelMask.ptr[i*d_pixelMask.stride()+j] != OBJDET_MASK_ELEMENT_INVALID_32U)
{
h_pixelMask.ptr[i*d_pixelMask.stride()+j] = (i << 16) | j;
}
}
else
{
h_pixelMask.ptr[i*d_pixelMask.stride()+j] = OBJDET_MASK_ELEMENT_INVALID_32U;
}
}
}
NCV_SKIP_COND_END
#endif
NCVVectorReuse<Ncv32u> d_vecPixelMask(d_pixelMask.getSegment(), anchorsRoi.height * d_pixelMask.stride());
ncvAssertReturn(d_vecPixelMask.isMemReused(), NCV_ALLOCATOR_BAD_REUSE);
NCVVectorAlloc<Ncv32u> d_vecPixelMaskTmp(gpuAllocator, static_cast<Ncv32u>(d_vecPixelMask.length()));
ncvAssertReturn(d_vecPixelMaskTmp.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<Ncv32u> hp_pool32u(cpuAllocator, 2);
ncvAssertReturn(hp_pool32u.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
Ncv32u *hp_zero = &hp_pool32u.ptr()[0];
Ncv32u *hp_numDet = &hp_pool32u.ptr()[1];
NCV_SKIP_COND_BEGIN
*hp_zero = 0;
*hp_numDet = 0;
NCV_SKIP_COND_END
Ncv32f scaleAreaPixels = scaleArea * ((haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER) *
(haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER));
NcvBool bTexCacheCascade = devProp.major < 2;
NcvBool bTexCacheIImg = true; //this works better even on Fermi so far
NcvBool bDoAtomicCompaction = devProp.major >= 2 || (devProp.major == 1 && devProp.minor >= 3);
NCVVector<Ncv32u> *d_ptrNowData = &d_vecPixelMask;
NCVVector<Ncv32u> *d_ptrNowTmp = &d_vecPixelMaskTmp;
Ncv32u szNppCompactTmpBuf;
nppsStCompactGetSize_32u(static_cast<Ncv32u>(d_vecPixelMask.length()), &szNppCompactTmpBuf, devProp);
if (bDoAtomicCompaction)
{
szNppCompactTmpBuf = 0;
}
NCVVectorAlloc<Ncv8u> d_tmpBufCompact(gpuAllocator, szNppCompactTmpBuf);
NCV_SKIP_COND_BEGIN
if (bTexCacheIImg)
{
hipChannelFormatDesc cfdTexIImage;
cfdTexIImage = hipCreateChannelDesc<Ncv32u>();
size_t alignmentOffset;
ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, texIImage, integral.ptr(), cfdTexIImage,
(anchorsRoi.height + haar.ClassifierSize.height) * integral.pitch()), NCV_CUDA_ERROR);
ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR);
}
if (bTexCacheCascade)
{
hipChannelFormatDesc cfdTexHaarFeatures;
hipChannelFormatDesc cfdTexHaarClassifierNodes;
cfdTexHaarFeatures = hipCreateChannelDesc<uint2>();
cfdTexHaarClassifierNodes = hipCreateChannelDesc<uint4>();
size_t alignmentOffset;
ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, texHaarFeatures,
d_HaarFeatures.ptr(), cfdTexHaarFeatures,sizeof(HaarFeature64) * haar.NumFeatures), NCV_CUDA_ERROR);
ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR);
ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, texHaarClassifierNodes,
d_HaarNodes.ptr(), cfdTexHaarClassifierNodes, sizeof(HaarClassifierNode128) * haar.NumClassifierTotalNodes), NCV_CUDA_ERROR);
ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR);
}
Ncv32u stageStartAnchorParallel = 0;
Ncv32u stageMiddleSwitch = getStageNumWithNotLessThanNclassifiers(NUM_THREADS_CLASSIFIERPARALLEL,
haar, h_HaarStages);
Ncv32u stageEndClassifierParallel = haar.NumStages;
if (stageMiddleSwitch == 0)
{
stageMiddleSwitch = 1;
}
//create stages subdivision for pixel-parallel processing
const Ncv32u compactEveryNstage = bDoAtomicCompaction ? 7 : 1;
Ncv32u curStop = stageStartAnchorParallel;
std::vector<Ncv32u> pixParallelStageStops;
while (curStop < stageMiddleSwitch)
{
pixParallelStageStops.push_back(curStop);
curStop += compactEveryNstage;
}
if (curStop > compactEveryNstage && curStop - stageMiddleSwitch > compactEveryNstage / 2)
{
pixParallelStageStops[pixParallelStageStops.size()-1] =
(stageMiddleSwitch - (curStop - 2 * compactEveryNstage)) / 2;
}
pixParallelStageStops.push_back(stageMiddleSwitch);
Ncv32u pixParallelStageStopsIndex = 0;
if (pixelStep != 1 || bMaskElements)
{
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(hipMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u),
0, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
dim3 gridInit((((anchorsRoi.width + pixelStep - 1) / pixelStep + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL),
(anchorsRoi.height + pixelStep - 1) / pixelStep);
dim3 blockInit(NUM_THREADS_ANCHORSPARALLEL);
if (gridInit.x == 0 || gridInit.y == 0)
{
numDetections = 0;
return NCV_SUCCESS;
}
initializeMaskVectorDynTemplate(bMaskElements,
bDoAtomicCompaction,
gridInit, blockInit, cuStream,
d_ptrNowData->ptr(),
d_ptrNowTmp->ptr(),
static_cast<Ncv32u>(d_vecPixelMask.length()), d_pixelMask.stride(),
anchorsRoi, pixelStep);
ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u),
0, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
swap(d_ptrNowData, d_ptrNowTmp);
}
else
{
NCVStatus nppSt;
nppSt = nppsStCompact_32u(d_ptrNowTmp->ptr(), static_cast<Ncv32u>(d_vecPixelMask.length()),
d_ptrNowData->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U,
d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp);
ncvAssertReturn(nppSt == NPPST_SUCCESS, NCV_NPP_ERROR);
}
numDetections = *hp_numDet;
}
else
{
//
// 1. Run the first pixel-input pixel-parallel classifier for few stages
//
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(hipMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u),
0, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
dim3 grid1(((d_pixelMask.stride() + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL),
anchorsRoi.height);
dim3 block1(NUM_THREADS_ANCHORSPARALLEL);
applyHaarClassifierAnchorParallelDynTemplate(
true, //tbInitMaskPositively
bTexCacheIImg, //tbCacheTextureIImg
bTexCacheCascade, //tbCacheTextureCascade
pixParallelStageStops[pixParallelStageStopsIndex] != 0,//tbReadPixelIndexFromVector
bDoAtomicCompaction, //tbDoAtomicCompaction
grid1,
block1,
cuStream,
integral.ptr(), integral.stride(),
d_weights.ptr(), d_weights.stride(),
d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(),
d_ptrNowData->ptr(),
bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(),
0,
d_pixelMask.stride(),
anchorsRoi,
pixParallelStageStops[pixParallelStageStopsIndex],
pixParallelStageStops[pixParallelStageStopsIndex+1],
scaleAreaPixels);
ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u),
0, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
else
{
NCVStatus nppSt;
nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), static_cast<Ncv32u>(d_vecPixelMask.length()),
d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U,
d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp);
ncvAssertReturnNcvStat(nppSt);
}
swap(d_ptrNowData, d_ptrNowTmp);
numDetections = *hp_numDet;
pixParallelStageStopsIndex++;
}
//
// 2. Run pixel-parallel stages
//
for (; pixParallelStageStopsIndex < pixParallelStageStops.size()-1; pixParallelStageStopsIndex++)
{
if (numDetections == 0)
{
break;
}
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(hipMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u),
0, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
dim3 grid2((numDetections + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL);
if (numDetections > MAX_GRID_DIM)
{
grid2.x = MAX_GRID_DIM;
grid2.y = (numDetections + MAX_GRID_DIM - 1) / MAX_GRID_DIM;
}
dim3 block2(NUM_THREADS_ANCHORSPARALLEL);
applyHaarClassifierAnchorParallelDynTemplate(
false, //tbInitMaskPositively
bTexCacheIImg, //tbCacheTextureIImg
bTexCacheCascade, //tbCacheTextureCascade
pixParallelStageStops[pixParallelStageStopsIndex] != 0 || pixelStep != 1 || bMaskElements,//tbReadPixelIndexFromVector
bDoAtomicCompaction, //tbDoAtomicCompaction
grid2,
block2,
cuStream,
integral.ptr(), integral.stride(),
d_weights.ptr(), d_weights.stride(),
d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(),
d_ptrNowData->ptr(),
bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(),
numDetections,
d_pixelMask.stride(),
anchorsRoi,
pixParallelStageStops[pixParallelStageStopsIndex],
pixParallelStageStops[pixParallelStageStopsIndex+1],
scaleAreaPixels);
ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u),
0, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
else
{
NCVStatus nppSt;
nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), numDetections,
d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U,
d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp);
ncvAssertReturnNcvStat(nppSt);
}
swap(d_ptrNowData, d_ptrNowTmp);
numDetections = *hp_numDet;
}
//
// 3. Run all left stages in one stage-parallel kernel
//
if (numDetections > 0 && stageMiddleSwitch < stageEndClassifierParallel)
{
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(hipMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u),
0, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
dim3 grid3(numDetections);
if (numDetections > MAX_GRID_DIM)
{
grid3.x = MAX_GRID_DIM;
grid3.y = (numDetections + MAX_GRID_DIM - 1) / MAX_GRID_DIM;
}
dim3 block3(NUM_THREADS_CLASSIFIERPARALLEL);
applyHaarClassifierClassifierParallelDynTemplate(
bTexCacheIImg, //tbCacheTextureIImg
bTexCacheCascade, //tbCacheTextureCascade
bDoAtomicCompaction, //tbDoAtomicCompaction
grid3,
block3,
cuStream,
integral.ptr(), integral.stride(),
d_weights.ptr(), d_weights.stride(),
d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(),
d_ptrNowData->ptr(),
bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(),
numDetections,
d_pixelMask.stride(),
anchorsRoi,
stageMiddleSwitch,
stageEndClassifierParallel,
scaleAreaPixels);
ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u),
0, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
else
{
NCVStatus nppSt;
nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), numDetections,
d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U,
d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp);
ncvAssertReturnNcvStat(nppSt);
}
swap(d_ptrNowData, d_ptrNowTmp);
numDetections = *hp_numDet;
}
if (d_ptrNowData != &d_vecPixelMask)
{
d_vecPixelMaskTmp.copySolid(d_vecPixelMask, cuStream);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
#if defined _SELF_TEST_
ncvStat = d_pixelMask.copySolid(h_pixelMask_d, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
std::sort(h_pixelMask_d.ptr, h_pixelMask_d.ptr + numDetections);
}
Ncv32u fpu_oldcw, fpu_cw;
_controlfp_s(&fpu_cw, 0, 0);
fpu_oldcw = fpu_cw;
_controlfp_s(&fpu_cw, _PC_24, _MCW_PC);
Ncv32u numDetGold;
ncvStat = ncvApplyHaarClassifierCascade_host(h_integralImage, h_weights, h_pixelMask, numDetGold, haar,
h_HaarStages, h_HaarNodes, h_HaarFeatures,
bMaskElements, anchorsRoi, pixelStep, scaleArea);
ncvAssertReturnNcvStat(ncvStat);
_controlfp_s(&fpu_cw, fpu_oldcw, _MCW_PC);
bool bPass = true;
if (numDetGold != numDetections)
{
printf("NCVHaarClassifierCascade::applyHaarClassifierCascade numdetections don't match: cpu=%d, gpu=%d\n", numDetGold, numDetections);
bPass = false;
}
else
{
for (Ncv32u i=0; i<::max(numDetGold, numDetections) && bPass; i++)
{
if (h_pixelMask.ptr[i] != h_pixelMask_d.ptr[i])
{
printf("NCVHaarClassifierCascade::applyHaarClassifierCascade self test failed: i=%d, cpu=%d, gpu=%d\n", i, h_pixelMask.ptr[i], h_pixelMask_d.ptr[i]);
bPass = false;
}
}
}
printf("NCVHaarClassifierCascade::applyHaarClassifierCascade %s\n", bPass?"PASSED":"FAILED");
#endif
NCV_SKIP_COND_END
return NCV_SUCCESS;
}
//==============================================================================
//
// HypothesesOperations file
//
//==============================================================================
const Ncv32u NUM_GROW_THREADS = 128;
__device__ __host__ NcvRect32u pixelToRect(Ncv32u pixel, Ncv32u width, Ncv32u height, Ncv32f scale)
{
NcvRect32u res;
res.x = (Ncv32u)(scale * (pixel & 0xFFFF));
res.y = (Ncv32u)(scale * (pixel >> 16));
res.width = (Ncv32u)(scale * width);
res.height = (Ncv32u)(scale * height);
return res;
}
__global__ void growDetectionsKernel(Ncv32u *pixelMask, Ncv32u numElements,
NcvRect32u *hypotheses,
Ncv32u rectWidth, Ncv32u rectHeight, Ncv32f curScale)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
Ncv32u elemAddr = blockId * NUM_GROW_THREADS + threadIdx.x;
if (elemAddr >= numElements)
{
return;
}
hypotheses[elemAddr] = pixelToRect(pixelMask[elemAddr], rectWidth, rectHeight, curScale);
}
NCVStatus ncvGrowDetectionsVector_device(NCVVector<Ncv32u> &pixelMask,
Ncv32u numPixelMaskDetections,
NCVVector<NcvRect32u> &hypotheses,
Ncv32u &totalDetections,
Ncv32u totalMaxDetections,
Ncv32u rectWidth,
Ncv32u rectHeight,
Ncv32f curScale,
hipStream_t cuStream)
{
ncvAssertReturn(pixelMask.ptr() != NULL && hypotheses.ptr() != NULL, NCV_NULL_PTR);
ncvAssertReturn(pixelMask.memType() == hypotheses.memType() &&
pixelMask.memType() == NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(rectWidth > 0 && rectHeight > 0 && curScale > 0, NCV_INVALID_ROI);
ncvAssertReturn(curScale > 0, NCV_INVALID_SCALE);
ncvAssertReturn(totalMaxDetections <= hypotheses.length() &&
numPixelMaskDetections <= pixelMask.length() &&
totalMaxDetections <= totalMaxDetections, NCV_INCONSISTENT_INPUT);
NCVStatus ncvStat = NCV_SUCCESS;
Ncv32u numDetsToCopy = numPixelMaskDetections;
if (numDetsToCopy == 0)
{
return ncvStat;
}
if (totalDetections + numPixelMaskDetections > totalMaxDetections)
{
ncvStat = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW;
numDetsToCopy = totalMaxDetections - totalDetections;
}
dim3 block(NUM_GROW_THREADS);
dim3 grid((numDetsToCopy + NUM_GROW_THREADS - 1) / NUM_GROW_THREADS);
if (grid.x > 65535)
{
grid.y = (grid.x + 65534) / 65535;
grid.x = 65535;
}
hipLaunchKernelGGL(( growDetectionsKernel), dim3(grid), dim3(block), 0, cuStream, pixelMask.ptr(), numDetsToCopy,
hypotheses.ptr() + totalDetections,
rectWidth, rectHeight, curScale);
ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR);
totalDetections += numDetsToCopy;
return ncvStat;
}
//==============================================================================
//
// Pipeline file
//
//==============================================================================
NCVStatus ncvDetectObjectsMultiScale_device(NCVMatrix<Ncv8u> &d_srcImg,
NcvSize32u srcRoi,
NCVVector<NcvRect32u> &d_dstRects,
Ncv32u &dstNumRects,
HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarStage64> &d_HaarStages,
NCVVector<HaarClassifierNode128> &d_HaarNodes,
NCVVector<HaarFeature64> &d_HaarFeatures,
NcvSize32u minObjSize,
Ncv32u minNeighbors, //default 4
Ncv32f scaleStep, //default 1.2f
Ncv32u pixelStep, //default 1
Ncv32u flags, //default NCVPipeObjDet_Default
INCVMemAllocator &gpuAllocator,
INCVMemAllocator &cpuAllocator,
hipDeviceProp_t &devProp,
hipStream_t cuStream)
{
ncvAssertReturn(d_srcImg.memType() == d_dstRects.memType() &&
d_srcImg.memType() == gpuAllocator.memType() &&
(d_srcImg.memType() == NCVMemoryTypeDevice ||
d_srcImg.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(d_HaarStages.memType() == d_HaarNodes.memType() &&
d_HaarStages.memType() == d_HaarFeatures.memType() &&
(d_HaarStages.memType() == NCVMemoryTypeDevice ||
d_HaarStages.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(h_HaarStages.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(gpuAllocator.isInitialized() && cpuAllocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED);
ncvAssertReturn((d_srcImg.ptr() != NULL && d_dstRects.ptr() != NULL &&
h_HaarStages.ptr() != NULL && d_HaarStages.ptr() != NULL && d_HaarNodes.ptr() != NULL &&
d_HaarFeatures.ptr() != NULL) || gpuAllocator.isCounting(), NCV_NULL_PTR);
ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0 &&
d_srcImg.width() >= srcRoi.width && d_srcImg.height() >= srcRoi.height &&
srcRoi.width >= minObjSize.width && srcRoi.height >= minObjSize.height &&
d_dstRects.length() >= 1, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(scaleStep > 1.0f, NCV_INVALID_SCALE);
ncvAssertReturn(d_HaarStages.length() >= haar.NumStages &&
d_HaarNodes.length() >= haar.NumClassifierTotalNodes &&
d_HaarFeatures.length() >= haar.NumFeatures &&
d_HaarStages.length() == h_HaarStages.length() &&
haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(haar.bNeedsTiltedII == false, NCV_NOIMPL_HAAR_TILTED_FEATURES);
ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP);
//TODO: set NPP active stream to cuStream
NCVStatus ncvStat;
NCV_SET_SKIP_COND(gpuAllocator.isCounting());
Ncv32u integralWidth = d_srcImg.width() + 1;
Ncv32u integralHeight = d_srcImg.height() + 1;
NCVMatrixAlloc<Ncv32u> integral(gpuAllocator, integralWidth, integralHeight);
ncvAssertReturn(integral.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv64u> d_sqIntegralImage(gpuAllocator, integralWidth, integralHeight);
ncvAssertReturn(d_sqIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32f> d_rectStdDev(gpuAllocator, d_srcImg.width(), d_srcImg.height());
ncvAssertReturn(d_rectStdDev.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32u> d_pixelMask(gpuAllocator, d_srcImg.width(), d_srcImg.height());
ncvAssertReturn(d_pixelMask.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32u> d_scaledIntegralImage(gpuAllocator, integralWidth, integralHeight);
ncvAssertReturn(d_scaledIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv64u> d_scaledSqIntegralImage(gpuAllocator, integralWidth, integralHeight);
ncvAssertReturn(d_scaledSqIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<NcvRect32u> d_hypothesesIntermediate(gpuAllocator, d_srcImg.width() * d_srcImg.height());
ncvAssertReturn(d_hypothesesIntermediate.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<NcvRect32u> h_hypothesesIntermediate(cpuAllocator, d_srcImg.width() * d_srcImg.height());
ncvAssertReturn(h_hypothesesIntermediate.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVStatus nppStat;
Ncv32u szTmpBufIntegral, szTmpBufSqIntegral;
nppStat = nppiStIntegralGetSize_8u32u(NcvSize32u(d_srcImg.width(), d_srcImg.height()), &szTmpBufIntegral, devProp);
ncvAssertReturnNcvStat(nppStat);
nppStat = nppiStSqrIntegralGetSize_8u64u(NcvSize32u(d_srcImg.width(), d_srcImg.height()), &szTmpBufSqIntegral, devProp);
ncvAssertReturnNcvStat(nppStat);
NCVVectorAlloc<Ncv8u> d_tmpIIbuf(gpuAllocator, ::max(szTmpBufIntegral, szTmpBufSqIntegral));
ncvAssertReturn(d_tmpIIbuf.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCV_SKIP_COND_BEGIN
nppStat = nppiStIntegral_8u32u_C1R(d_srcImg.ptr(), d_srcImg.pitch(),
integral.ptr(), integral.pitch(),
NcvSize32u(d_srcImg.width(), d_srcImg.height()),
d_tmpIIbuf.ptr(), szTmpBufIntegral, devProp);
ncvAssertReturnNcvStat(nppStat);
nppStat = nppiStSqrIntegral_8u64u_C1R(d_srcImg.ptr(), d_srcImg.pitch(),
d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(),
NcvSize32u(d_srcImg.width(), d_srcImg.height()),
d_tmpIIbuf.ptr(), szTmpBufSqIntegral, devProp);
ncvAssertReturnNcvStat(nppStat);
NCV_SKIP_COND_END
dstNumRects = 0;
Ncv32u lastCheckedScale = 0;
NcvBool bReverseTraverseScale = ((flags & NCVPipeObjDet_FindLargestObject) != 0);
std::vector<Ncv32u> scalesVector;
NcvBool bFoundLargestFace = false;
for (Ncv32f scaleIter = 1.0f; ; scaleIter *= scaleStep)
{
Ncv32u scale = (Ncv32u)scaleIter;
if (lastCheckedScale == scale)
{
continue;
}
lastCheckedScale = scale;
if (haar.ClassifierSize.width * (Ncv32s)scale < minObjSize.width ||
haar.ClassifierSize.height * (Ncv32s)scale < minObjSize.height)
{
continue;
}
NcvSize32s srcRoi_, srcIIRo_i, scaledIIRoi, searchRoi;
srcRoi_.width = d_srcImg.width();
srcRoi_.height = d_srcImg.height();
srcIIRo_i.width = srcRoi_.width + 1;
srcIIRo_i.height = srcRoi_.height + 1;
scaledIIRoi.width = srcIIRo_i.width / scale;
scaledIIRoi.height = srcIIRo_i.height / scale;
searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width;
searchRoi.height = scaledIIRoi.height - haar.ClassifierSize.height;
if (searchRoi.width <= 0 || searchRoi.height <= 0)
{
break;
}
scalesVector.push_back(scale);
if (gpuAllocator.isCounting())
{
break;
}
}
if (bReverseTraverseScale)
{
std::reverse(scalesVector.begin(), scalesVector.end());
}
//TODO: handle _fair_scale_ flag
for (Ncv32u i=0; i<scalesVector.size(); i++)
{
Ncv32u scale = scalesVector[i];
NcvSize32u srcRoi_, scaledIIRoi, searchRoi;
NcvSize32u srcIIRoi;
srcRoi_.width = d_srcImg.width();
srcRoi_.height = d_srcImg.height();
srcIIRoi.width = srcRoi_.width + 1;
srcIIRoi.height = srcRoi_.height + 1;
scaledIIRoi.width = srcIIRoi.width / scale;
scaledIIRoi.height = srcIIRoi.height / scale;
searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width;
searchRoi.height = scaledIIRoi.height - haar.ClassifierSize.height;
NCV_SKIP_COND_BEGIN
nppStat = nppiStDecimate_32u_C1R(
integral.ptr(), integral.pitch(),
d_scaledIntegralImage.ptr(), d_scaledIntegralImage.pitch(),
srcIIRoi, scale, true);
ncvAssertReturnNcvStat(nppStat);
nppStat = nppiStDecimate_64u_C1R(
d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(),
d_scaledSqIntegralImage.ptr(), d_scaledSqIntegralImage.pitch(),
srcIIRoi, scale, true);
ncvAssertReturnNcvStat(nppStat);
const NcvRect32u rect(
HAAR_STDDEV_BORDER,
HAAR_STDDEV_BORDER,
haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER,
haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER);
nppStat = nppiStRectStdDev_32f_C1R(
d_scaledIntegralImage.ptr(), d_scaledIntegralImage.pitch(),
d_scaledSqIntegralImage.ptr(), d_scaledSqIntegralImage.pitch(),
d_rectStdDev.ptr(), d_rectStdDev.pitch(),
NcvSize32u(searchRoi.width, searchRoi.height), rect,
(Ncv32f)scale*scale, true);
ncvAssertReturnNcvStat(nppStat);
NCV_SKIP_COND_END
Ncv32u detectionsOnThisScale;
ncvStat = ncvApplyHaarClassifierCascade_device(
d_scaledIntegralImage, d_rectStdDev, d_pixelMask,
detectionsOnThisScale,
haar, h_HaarStages, d_HaarStages, d_HaarNodes, d_HaarFeatures, false,
searchRoi, pixelStep, (Ncv32f)scale*scale,
gpuAllocator, cpuAllocator, devProp, cuStream);
ncvAssertReturnNcvStat(nppStat);
NCV_SKIP_COND_BEGIN
NCVVectorReuse<Ncv32u> d_vecPixelMask(d_pixelMask.getSegment());
ncvStat = ncvGrowDetectionsVector_device(
d_vecPixelMask,
detectionsOnThisScale,
d_hypothesesIntermediate,
dstNumRects,
static_cast<Ncv32u>(d_hypothesesIntermediate.length()),
haar.ClassifierSize.width,
haar.ClassifierSize.height,
(Ncv32f)scale,
cuStream);
ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat);
if (flags & NCVPipeObjDet_FindLargestObject)
{
if (dstNumRects == 0)
{
continue;
}
if (dstNumRects != 0)
{
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvStat = d_hypothesesIntermediate.copySolid(h_hypothesesIntermediate, cuStream,
dstNumRects * sizeof(NcvRect32u));
ncvAssertReturnNcvStat(ncvStat);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
Ncv32u numStrongHypothesesNow = dstNumRects;
ncvStat = ncvGroupRectangles_host(
h_hypothesesIntermediate,
numStrongHypothesesNow,
minNeighbors,
RECT_SIMILARITY_PROPORTION,
NULL);
ncvAssertReturnNcvStat(ncvStat);
if (numStrongHypothesesNow > 0)
{
NcvRect32u maxRect = h_hypothesesIntermediate.ptr()[0];
for (Ncv32u j=1; j<numStrongHypothesesNow; j++)
{
if (maxRect.width < h_hypothesesIntermediate.ptr()[j].width)
{
maxRect = h_hypothesesIntermediate.ptr()[j];
}
}
h_hypothesesIntermediate.ptr()[0] = maxRect;
dstNumRects = 1;
ncvStat = h_hypothesesIntermediate.copySolid(d_dstRects, cuStream, sizeof(NcvRect32u));
ncvAssertReturnNcvStat(ncvStat);
bFoundLargestFace = true;
break;
}
}
NCV_SKIP_COND_END
if (gpuAllocator.isCounting())
{
break;
}
}
NCVStatus ncvRetCode = NCV_SUCCESS;
NCV_SKIP_COND_BEGIN
if (flags & NCVPipeObjDet_FindLargestObject)
{
if (!bFoundLargestFace)
{
dstNumRects = 0;
}
}
else
{
//TODO: move hypotheses filtration to GPU pipeline (the only CPU-resident element of the pipeline left)
if (dstNumRects != 0)
{
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvStat = d_hypothesesIntermediate.copySolid(h_hypothesesIntermediate, cuStream,
dstNumRects * sizeof(NcvRect32u));
ncvAssertReturnNcvStat(ncvStat);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
ncvStat = ncvGroupRectangles_host(
h_hypothesesIntermediate,
dstNumRects,
minNeighbors,
RECT_SIMILARITY_PROPORTION,
NULL);
ncvAssertReturnNcvStat(ncvStat);
if (dstNumRects > d_dstRects.length())
{
ncvRetCode = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW;
dstNumRects = static_cast<Ncv32u>(d_dstRects.length());
}
if (dstNumRects != 0)
{
ncvStat = h_hypothesesIntermediate.copySolid(d_dstRects, cuStream,
dstNumRects * sizeof(NcvRect32u));
ncvAssertReturnNcvStat(ncvStat);
}
}
if (flags & NCVPipeObjDet_VisualizeInPlace)
{
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvDrawRects_8u_device(d_srcImg.ptr(), d_srcImg.stride(),
d_srcImg.width(), d_srcImg.height(),
d_dstRects.ptr(), dstNumRects, 255, cuStream);
}
NCV_SKIP_COND_END
return ncvRetCode;
}
//==============================================================================
//
// Purely Host code: classifier IO, mock-ups
//
//==============================================================================
#ifdef _SELF_TEST_
#include <float.h>
#endif
NCVStatus ncvApplyHaarClassifierCascade_host(NCVMatrix<Ncv32u> &h_integralImage,
NCVMatrix<Ncv32f> &h_weights,
NCVMatrixAlloc<Ncv32u> &h_pixelMask,
Ncv32u &numDetections,
HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarClassifierNode128> &h_HaarNodes,
NCVVector<HaarFeature64> &h_HaarFeatures,
NcvBool bMaskElements,
NcvSize32u anchorsRoi,
Ncv32u pixelStep,
Ncv32f scaleArea)
{
ncvAssertReturn(h_integralImage.memType() == h_weights.memType() &&
h_integralImage.memType() == h_pixelMask.memType() &&
(h_integralImage.memType() == NCVMemoryTypeHostPageable ||
h_integralImage.memType() == NCVMemoryTypeHostPinned), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(h_HaarStages.memType() == h_HaarNodes.memType() &&
h_HaarStages.memType() == h_HaarFeatures.memType() &&
(h_HaarStages.memType() == NCVMemoryTypeHostPageable ||
h_HaarStages.memType() == NCVMemoryTypeHostPinned), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(h_integralImage.ptr() != NULL && h_weights.ptr() != NULL && h_pixelMask.ptr() != NULL &&
h_HaarStages.ptr() != NULL && h_HaarNodes.ptr() != NULL && h_HaarFeatures.ptr() != NULL, NCV_NULL_PTR);
ncvAssertReturn(anchorsRoi.width > 0 && anchorsRoi.height > 0 &&
h_pixelMask.width() >= anchorsRoi.width && h_pixelMask.height() >= anchorsRoi.height &&
h_weights.width() >= anchorsRoi.width && h_weights.height() >= anchorsRoi.height &&
h_integralImage.width() >= anchorsRoi.width + haar.ClassifierSize.width &&
h_integralImage.height() >= anchorsRoi.height + haar.ClassifierSize.height, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(scaleArea > 0, NCV_INVALID_SCALE);
ncvAssertReturn(h_HaarStages.length() >= haar.NumStages &&
h_HaarNodes.length() >= haar.NumClassifierTotalNodes &&
h_HaarFeatures.length() >= haar.NumFeatures &&
h_HaarStages.length() == h_HaarStages.length() &&
haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(haar.bNeedsTiltedII == false, NCV_NOIMPL_HAAR_TILTED_FEATURES);
ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP);
Ncv32f scaleAreaPixels = scaleArea * ((haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER) *
(haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER));
for (Ncv32u i=0; i<anchorsRoi.height; i++)
{
for (Ncv32u j=0; j<h_pixelMask.stride(); j++)
{
if (i % pixelStep != 0 || j % pixelStep != 0 || j >= anchorsRoi.width)
{
h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = OBJDET_MASK_ELEMENT_INVALID_32U;
}
else
{
for (Ncv32u iStage = 0; iStage < haar.NumStages; iStage++)
{
Ncv32f curStageSum = 0.0f;
Ncv32u numRootNodesInStage = h_HaarStages.ptr()[iStage].getNumClassifierRootNodes();
Ncv32u curRootNodeOffset = h_HaarStages.ptr()[iStage].getStartClassifierRootNodeOffset();
if (iStage == 0)
{
if (bMaskElements && h_pixelMask.ptr()[i * h_pixelMask.stride() + j] == OBJDET_MASK_ELEMENT_INVALID_32U)
{
break;
}
else
{
h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = ((i << 16) | j);
}
}
else if (h_pixelMask.ptr()[i * h_pixelMask.stride() + j] == OBJDET_MASK_ELEMENT_INVALID_32U)
{
break;
}
while (numRootNodesInStage--)
{
NcvBool bMoreNodesToTraverse = true;
Ncv32u curNodeOffset = curRootNodeOffset;
while (bMoreNodesToTraverse)
{
HaarClassifierNode128 curNode = h_HaarNodes.ptr()[curNodeOffset];
HaarFeatureDescriptor32 curFeatDesc = curNode.getFeatureDesc();
Ncv32u curNodeFeaturesNum = curFeatDesc.getNumFeatures();
Ncv32u curNodeFeaturesOffs = curFeatDesc.getFeaturesOffset();
Ncv32f curNodeVal = 0.f;
for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++)
{
HaarFeature64 feature = h_HaarFeatures.ptr()[curNodeFeaturesOffs + iRect];
Ncv32u rectX, rectY, rectWidth, rectHeight;
feature.getRect(&rectX, &rectY, &rectWidth, &rectHeight);
Ncv32f rectWeight = feature.getWeight();
Ncv32u iioffsTL = (i + rectY) * h_integralImage.stride() + (j + rectX);
Ncv32u iioffsTR = iioffsTL + rectWidth;
Ncv32u iioffsBL = iioffsTL + rectHeight * h_integralImage.stride();
Ncv32u iioffsBR = iioffsBL + rectWidth;
Ncv32u iivalTL = h_integralImage.ptr()[iioffsTL];
Ncv32u iivalTR = h_integralImage.ptr()[iioffsTR];
Ncv32u iivalBL = h_integralImage.ptr()[iioffsBL];
Ncv32u iivalBR = h_integralImage.ptr()[iioffsBR];
Ncv32u rectSum = iivalBR - iivalBL + iivalTL - iivalTR;
curNodeVal += (Ncv32f)rectSum * rectWeight;
}
HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc();
HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc();
Ncv32f nodeThreshold = curNode.getThreshold();
HaarClassifierNodeDescriptor32 nextNodeDescriptor;
NcvBool nextNodeIsLeaf;
if (curNodeVal < scaleAreaPixels * h_weights.ptr()[i * h_weights.stride() + j] * nodeThreshold)
{
nextNodeDescriptor = nodeLeft;
nextNodeIsLeaf = curFeatDesc.isLeftNodeLeaf();
}
else
{
nextNodeDescriptor = nodeRight;
nextNodeIsLeaf = curFeatDesc.isRightNodeLeaf();
}
if (nextNodeIsLeaf)
{
Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValueHost();
curStageSum += tmpLeafValue;
bMoreNodesToTraverse = false;
}
else
{
curNodeOffset = nextNodeDescriptor.getNextNodeOffset();
}
}
curRootNodeOffset++;
}
Ncv32f tmpStageThreshold = h_HaarStages.ptr()[iStage].getStageThreshold();
if (curStageSum < tmpStageThreshold)
{
//drop
h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = OBJDET_MASK_ELEMENT_INVALID_32U;
break;
}
}
}
}
}
std::sort(h_pixelMask.ptr(), h_pixelMask.ptr() + anchorsRoi.height * h_pixelMask.stride());
Ncv32u i = 0;
for (; i<anchorsRoi.height * h_pixelMask.stride(); i++)
{
if (h_pixelMask.ptr()[i] == OBJDET_MASK_ELEMENT_INVALID_32U)
{
break;
}
}
numDetections = i;
return NCV_SUCCESS;
}
NCVStatus ncvGrowDetectionsVector_host(NCVVector<Ncv32u> &pixelMask,
Ncv32u numPixelMaskDetections,
NCVVector<NcvRect32u> &hypotheses,
Ncv32u &totalDetections,
Ncv32u totalMaxDetections,
Ncv32u rectWidth,
Ncv32u rectHeight,
Ncv32f curScale)
{
ncvAssertReturn(pixelMask.ptr() != NULL && hypotheses.ptr() != NULL, NCV_NULL_PTR);
ncvAssertReturn(pixelMask.memType() == hypotheses.memType() &&
pixelMask.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(rectWidth > 0 && rectHeight > 0 && curScale > 0, NCV_INVALID_ROI);
ncvAssertReturn(curScale > 0, NCV_INVALID_SCALE);
ncvAssertReturn(totalMaxDetections <= hypotheses.length() &&
numPixelMaskDetections <= pixelMask.length() &&
totalMaxDetections <= totalMaxDetections, NCV_INCONSISTENT_INPUT);
NCVStatus ncvStat = NCV_SUCCESS;
Ncv32u numDetsToCopy = numPixelMaskDetections;
if (numDetsToCopy == 0)
{
return ncvStat;
}
if (totalDetections + numPixelMaskDetections > totalMaxDetections)
{
ncvStat = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW;
numDetsToCopy = totalMaxDetections - totalDetections;
}
for (Ncv32u i=0; i<numDetsToCopy; i++)
{
hypotheses.ptr()[totalDetections + i] = pixelToRect(pixelMask.ptr()[i], rectWidth, rectHeight, curScale);
}
totalDetections += numDetsToCopy;
return ncvStat;
}
static NCVStatus loadFromXML(const cv::String &filename,
HaarClassifierCascadeDescriptor &haar,
std::vector<HaarStage64> &haarStages,
std::vector<HaarClassifierNode128> &haarClassifierNodes,
std::vector<HaarFeature64> &haarFeatures)
{
#ifndef HAVE_OPENCV_OBJDETECT
(void) filename;
(void) haar;
(void) haarStages;
(void) haarClassifierNodes;
(void) haarFeatures;
CV_Error(cv::Error::StsNotImplemented, "This functionality requires objdetect module");
return NCV_HAAR_XML_LOADING_EXCEPTION;
#else
NCVStatus ncvStat;
haar.NumStages = 0;
haar.NumClassifierRootNodes = 0;
haar.NumClassifierTotalNodes = 0;
haar.NumFeatures = 0;
haar.ClassifierSize.width = 0;
haar.ClassifierSize.height = 0;
haar.bHasStumpsOnly = true;
haar.bNeedsTiltedII = false;
Ncv32u curMaxTreeDepth = 0;
std::vector<HaarClassifierNode128> h_TmpClassifierNotRootNodes;
haarStages.resize(0);
haarClassifierNodes.resize(0);
haarFeatures.resize(0);
cv::Ptr<CvHaarClassifierCascade> oldCascade((CvHaarClassifierCascade*)cvLoad(filename.c_str(), 0, 0, 0));
if (!oldCascade)
{
return NCV_HAAR_XML_LOADING_EXCEPTION;
}
haar.ClassifierSize.width = oldCascade->orig_window_size.width;
haar.ClassifierSize.height = oldCascade->orig_window_size.height;
int stagesCound = oldCascade->count;
for(int s = 0; s < stagesCound; ++s) // by stages
{
HaarStage64 curStage;
curStage.setStartClassifierRootNodeOffset(static_cast<Ncv32u>(haarClassifierNodes.size()));
curStage.setStageThreshold(oldCascade->stage_classifier[s].threshold);
int treesCount = oldCascade->stage_classifier[s].count;
for(int t = 0; t < treesCount; ++t) // by trees
{
Ncv32u nodeId = 0;
CvHaarClassifier* tree = &oldCascade->stage_classifier[s].classifier[t];
int nodesCount = tree->count;
for(int n = 0; n < nodesCount; ++n) //by features
{
CvHaarFeature* feature = &tree->haar_feature[n];
HaarClassifierNode128 curNode;
curNode.setThreshold(tree->threshold[n]);
NcvBool bIsLeftNodeLeaf = false;
NcvBool bIsRightNodeLeaf = false;
HaarClassifierNodeDescriptor32 nodeLeft;
if ( tree->left[n] <= 0 )
{
Ncv32f leftVal = tree->alpha[-tree->left[n]];
ncvStat = nodeLeft.create(leftVal);
ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat);
bIsLeftNodeLeaf = true;
}
else
{
Ncv32u leftNodeOffset = tree->left[n];
nodeLeft.create((Ncv32u)(h_TmpClassifierNotRootNodes.size() + leftNodeOffset - 1));
haar.bHasStumpsOnly = false;
}
curNode.setLeftNodeDesc(nodeLeft);
HaarClassifierNodeDescriptor32 nodeRight;
if ( tree->right[n] <= 0 )
{
Ncv32f rightVal = tree->alpha[-tree->right[n]];
ncvStat = nodeRight.create(rightVal);
ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat);
bIsRightNodeLeaf = true;
}
else
{
Ncv32u rightNodeOffset = tree->right[n];
nodeRight.create((Ncv32u)(h_TmpClassifierNotRootNodes.size() + rightNodeOffset - 1));
haar.bHasStumpsOnly = false;
}
curNode.setRightNodeDesc(nodeRight);
Ncv32u tiltedVal = feature->tilted;
haar.bNeedsTiltedII = (tiltedVal != 0);
Ncv32u featureId = 0;
for(int l = 0; l < CV_HAAR_FEATURE_MAX; ++l) //by rects
{
Ncv32u rectX = feature->rect[l].r.x;
Ncv32u rectY = feature->rect[l].r.y;
Ncv32u rectWidth = feature->rect[l].r.width;
Ncv32u rectHeight = feature->rect[l].r.height;
Ncv32f rectWeight = feature->rect[l].weight;
if (rectWeight == 0/* && rectX == 0 &&rectY == 0 && rectWidth == 0 && rectHeight == 0*/)
break;
HaarFeature64 curFeature;
ncvStat = curFeature.setRect(rectX, rectY, rectWidth, rectHeight, haar.ClassifierSize.width, haar.ClassifierSize.height);
curFeature.setWeight(rectWeight);
ncvAssertReturn(NCV_SUCCESS == ncvStat, ncvStat);
haarFeatures.push_back(curFeature);
featureId++;
}
HaarFeatureDescriptor32 tmpFeatureDesc;
ncvStat = tmpFeatureDesc.create(haar.bNeedsTiltedII, bIsLeftNodeLeaf, bIsRightNodeLeaf,
featureId, static_cast<Ncv32u>(haarFeatures.size()) - featureId);
ncvAssertReturn(NCV_SUCCESS == ncvStat, ncvStat);
curNode.setFeatureDesc(tmpFeatureDesc);
if (!nodeId)
{
//root node
haarClassifierNodes.push_back(curNode);
curMaxTreeDepth = 1;
}
else
{
//other node
h_TmpClassifierNotRootNodes.push_back(curNode);
curMaxTreeDepth++;
}
nodeId++;
}
}
curStage.setNumClassifierRootNodes(treesCount);
haarStages.push_back(curStage);
}
//fill in cascade stats
haar.NumStages = static_cast<Ncv32u>(haarStages.size());
haar.NumClassifierRootNodes = static_cast<Ncv32u>(haarClassifierNodes.size());
haar.NumClassifierTotalNodes = static_cast<Ncv32u>(haar.NumClassifierRootNodes + h_TmpClassifierNotRootNodes.size());
haar.NumFeatures = static_cast<Ncv32u>(haarFeatures.size());
//merge root and leaf nodes in one classifiers array
Ncv32u offsetRoot = static_cast<Ncv32u>(haarClassifierNodes.size());
for (Ncv32u i=0; i<haarClassifierNodes.size(); i++)
{
HaarFeatureDescriptor32 featureDesc = haarClassifierNodes[i].getFeatureDesc();
HaarClassifierNodeDescriptor32 nodeLeft = haarClassifierNodes[i].getLeftNodeDesc();
if (!featureDesc.isLeftNodeLeaf())
{
Ncv32u newOffset = nodeLeft.getNextNodeOffset() + offsetRoot;
nodeLeft.create(newOffset);
}
haarClassifierNodes[i].setLeftNodeDesc(nodeLeft);
HaarClassifierNodeDescriptor32 nodeRight = haarClassifierNodes[i].getRightNodeDesc();
if (!featureDesc.isRightNodeLeaf())
{
Ncv32u newOffset = nodeRight.getNextNodeOffset() + offsetRoot;
nodeRight.create(newOffset);
}
haarClassifierNodes[i].setRightNodeDesc(nodeRight);
}
for (Ncv32u i=0; i<h_TmpClassifierNotRootNodes.size(); i++)
{
HaarFeatureDescriptor32 featureDesc = h_TmpClassifierNotRootNodes[i].getFeatureDesc();
HaarClassifierNodeDescriptor32 nodeLeft = h_TmpClassifierNotRootNodes[i].getLeftNodeDesc();
if (!featureDesc.isLeftNodeLeaf())
{
Ncv32u newOffset = nodeLeft.getNextNodeOffset() + offsetRoot;
nodeLeft.create(newOffset);
}
h_TmpClassifierNotRootNodes[i].setLeftNodeDesc(nodeLeft);
HaarClassifierNodeDescriptor32 nodeRight = h_TmpClassifierNotRootNodes[i].getRightNodeDesc();
if (!featureDesc.isRightNodeLeaf())
{
Ncv32u newOffset = nodeRight.getNextNodeOffset() + offsetRoot;
nodeRight.create(newOffset);
}
h_TmpClassifierNotRootNodes[i].setRightNodeDesc(nodeRight);
haarClassifierNodes.push_back(h_TmpClassifierNotRootNodes[i]);
}
return NCV_SUCCESS;
#endif
}
#define NVBIN_HAAR_SIZERESERVED 16
#define NVBIN_HAAR_VERSION 0x1
static NCVStatus loadFromNVBIN(const cv::String &filename,
HaarClassifierCascadeDescriptor &haar,
std::vector<HaarStage64> &haarStages,
std::vector<HaarClassifierNode128> &haarClassifierNodes,
std::vector<HaarFeature64> &haarFeatures)
{
size_t readCount;
FILE *fp = fopen(filename.c_str(), "rb");
ncvAssertReturn(fp != NULL, NCV_FILE_ERROR);
Ncv32u fileVersion;
readCount = fread(&fileVersion, sizeof(Ncv32u), 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
ncvAssertReturn(fileVersion == NVBIN_HAAR_VERSION, NCV_FILE_ERROR);
Ncv32u fsize;
readCount = fread(&fsize, sizeof(Ncv32u), 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
fseek(fp, 0, SEEK_END);
Ncv32u fsizeActual = ftell(fp);
ncvAssertReturn(fsize == fsizeActual, NCV_FILE_ERROR);
std::vector<unsigned char> fdata;
fdata.resize(fsize);
Ncv32u dataOffset = 0;
fseek(fp, 0, SEEK_SET);
readCount = fread(&fdata[0], fsize, 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
fclose(fp);
//data
dataOffset = NVBIN_HAAR_SIZERESERVED;
haar.NumStages = *(Ncv32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(Ncv32u);
haar.NumClassifierRootNodes = *(Ncv32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(Ncv32u);
haar.NumClassifierTotalNodes = *(Ncv32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(Ncv32u);
haar.NumFeatures = *(Ncv32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(Ncv32u);
haar.ClassifierSize = *(NcvSize32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(NcvSize32u);
haar.bNeedsTiltedII = *(NcvBool *)(&fdata[0]+dataOffset);
dataOffset += sizeof(NcvBool);
haar.bHasStumpsOnly = *(NcvBool *)(&fdata[0]+dataOffset);
dataOffset += sizeof(NcvBool);
haarStages.resize(haar.NumStages);
haarClassifierNodes.resize(haar.NumClassifierTotalNodes);
haarFeatures.resize(haar.NumFeatures);
Ncv32u szStages = haar.NumStages * sizeof(HaarStage64);
Ncv32u szClassifiers = haar.NumClassifierTotalNodes * sizeof(HaarClassifierNode128);
Ncv32u szFeatures = haar.NumFeatures * sizeof(HaarFeature64);
memcpy(&haarStages[0], &fdata[0]+dataOffset, szStages);
dataOffset += szStages;
memcpy(&haarClassifierNodes[0], &fdata[0]+dataOffset, szClassifiers);
dataOffset += szClassifiers;
memcpy(&haarFeatures[0], &fdata[0]+dataOffset, szFeatures);
dataOffset += szFeatures;
return NCV_SUCCESS;
}
NCVStatus ncvHaarGetClassifierSize(const cv::String &filename, Ncv32u &numStages,
Ncv32u &numNodes, Ncv32u &numFeatures)
{
size_t readCount;
NCVStatus ncvStat;
cv::String fext = filename.substr(filename.find_last_of(".") + 1);
fext = fext.toLowerCase();
if (fext == "nvbin")
{
FILE *fp = fopen(filename.c_str(), "rb");
ncvAssertReturn(fp != NULL, NCV_FILE_ERROR);
Ncv32u fileVersion;
readCount = fread(&fileVersion, sizeof(Ncv32u), 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
ncvAssertReturn(fileVersion == NVBIN_HAAR_VERSION, NCV_FILE_ERROR);
fseek(fp, NVBIN_HAAR_SIZERESERVED, SEEK_SET);
Ncv32u tmp;
readCount = fread(&numStages, sizeof(Ncv32u), 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
readCount = fread(&tmp, sizeof(Ncv32u), 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
readCount = fread(&numNodes, sizeof(Ncv32u), 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
readCount = fread(&numFeatures, sizeof(Ncv32u), 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
fclose(fp);
}
else if (fext == "xml")
{
HaarClassifierCascadeDescriptor haar;
std::vector<HaarStage64> haarStages;
std::vector<HaarClassifierNode128> haarNodes;
std::vector<HaarFeature64> haarFeatures;
ncvStat = loadFromXML(filename, haar, haarStages, haarNodes, haarFeatures);
ncvAssertReturnNcvStat(ncvStat);
numStages = haar.NumStages;
numNodes = haar.NumClassifierTotalNodes;
numFeatures = haar.NumFeatures;
}
else
{
return NCV_HAAR_XML_LOADING_EXCEPTION;
}
return NCV_SUCCESS;
}
NCVStatus ncvHaarLoadFromFile_host(const cv::String &filename,
HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarClassifierNode128> &h_HaarNodes,
NCVVector<HaarFeature64> &h_HaarFeatures)
{
ncvAssertReturn(h_HaarStages.memType() == NCVMemoryTypeHostPinned &&
h_HaarNodes.memType() == NCVMemoryTypeHostPinned &&
h_HaarFeatures.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
NCVStatus ncvStat;
cv::String fext = filename.substr(filename.find_last_of(".") + 1);
fext = fext.toLowerCase();
std::vector<HaarStage64> haarStages;
std::vector<HaarClassifierNode128> haarNodes;
std::vector<HaarFeature64> haarFeatures;
if (fext == "nvbin")
{
ncvStat = loadFromNVBIN(filename, haar, haarStages, haarNodes, haarFeatures);
ncvAssertReturnNcvStat(ncvStat);
}
else if (fext == "xml")
{
ncvStat = loadFromXML(filename, haar, haarStages, haarNodes, haarFeatures);
ncvAssertReturnNcvStat(ncvStat);
}
else
{
return NCV_HAAR_XML_LOADING_EXCEPTION;
}
ncvAssertReturn(h_HaarStages.length() >= haarStages.size(), NCV_MEM_INSUFFICIENT_CAPACITY);
ncvAssertReturn(h_HaarNodes.length() >= haarNodes.size(), NCV_MEM_INSUFFICIENT_CAPACITY);
ncvAssertReturn(h_HaarFeatures.length() >= haarFeatures.size(), NCV_MEM_INSUFFICIENT_CAPACITY);
memcpy(h_HaarStages.ptr(), &haarStages[0], haarStages.size()*sizeof(HaarStage64));
memcpy(h_HaarNodes.ptr(), &haarNodes[0], haarNodes.size()*sizeof(HaarClassifierNode128));
memcpy(h_HaarFeatures.ptr(), &haarFeatures[0], haarFeatures.size()*sizeof(HaarFeature64));
return NCV_SUCCESS;
}
NCVStatus ncvHaarStoreNVBIN_host(const cv::String &filename,
HaarClassifierCascadeDescriptor haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarClassifierNode128> &h_HaarNodes,
NCVVector<HaarFeature64> &h_HaarFeatures)
{
ncvAssertReturn(h_HaarStages.length() >= haar.NumStages, NCV_INCONSISTENT_INPUT);
ncvAssertReturn(h_HaarNodes.length() >= haar.NumClassifierTotalNodes, NCV_INCONSISTENT_INPUT);
ncvAssertReturn(h_HaarFeatures.length() >= haar.NumFeatures, NCV_INCONSISTENT_INPUT);
ncvAssertReturn(h_HaarStages.memType() == NCVMemoryTypeHostPinned &&
h_HaarNodes.memType() == NCVMemoryTypeHostPinned &&
h_HaarFeatures.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
Ncv32u szStages = haar.NumStages * sizeof(HaarStage64);
Ncv32u szClassifiers = haar.NumClassifierTotalNodes * sizeof(HaarClassifierNode128);
Ncv32u szFeatures = haar.NumFeatures * sizeof(HaarFeature64);
Ncv32u dataOffset = 0;
std::vector<unsigned char> fdata;
fdata.resize(szStages+szClassifiers+szFeatures+1024, 0);
//header
*(Ncv32u *)(&fdata[0]+dataOffset) = NVBIN_HAAR_VERSION;
//data
dataOffset = NVBIN_HAAR_SIZERESERVED;
*(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumStages;
dataOffset += sizeof(Ncv32u);
*(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumClassifierRootNodes;
dataOffset += sizeof(Ncv32u);
*(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumClassifierTotalNodes;
dataOffset += sizeof(Ncv32u);
*(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumFeatures;
dataOffset += sizeof(Ncv32u);
*(NcvSize32u *)(&fdata[0]+dataOffset) = haar.ClassifierSize;
dataOffset += sizeof(NcvSize32u);
*(NcvBool *)(&fdata[0]+dataOffset) = haar.bNeedsTiltedII;
dataOffset += sizeof(NcvBool);
*(NcvBool *)(&fdata[0]+dataOffset) = haar.bHasStumpsOnly;
dataOffset += sizeof(NcvBool);
memcpy(&fdata[0]+dataOffset, h_HaarStages.ptr(), szStages);
dataOffset += szStages;
memcpy(&fdata[0]+dataOffset, h_HaarNodes.ptr(), szClassifiers);
dataOffset += szClassifiers;
memcpy(&fdata[0]+dataOffset, h_HaarFeatures.ptr(), szFeatures);
dataOffset += szFeatures;
Ncv32u fsize = dataOffset;
//TODO: CRC32 here
//update header
dataOffset = sizeof(Ncv32u);
*(Ncv32u *)(&fdata[0]+dataOffset) = fsize;
FILE *fp = fopen(filename.c_str(), "wb");
ncvAssertReturn(fp != NULL, NCV_FILE_ERROR);
fwrite(&fdata[0], fsize, 1, fp);
fclose(fp);
return NCV_SUCCESS;
}
| 26c5520c434d530dd542dafb4f7f25a205766475.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
////////////////////////////////////////////////////////////////////////////////
//
// NVIDIA CUDA implementation of Viola-Jones Object Detection Framework
//
// The algorithm and code are explained in the upcoming GPU Computing Gems
// chapter in detail:
//
// Anton Obukhov, "Haar Classifiers for Object Detection with CUDA"
// PDF URL placeholder
// email: [email protected], [email protected]
//
// Credits for help with the code to:
// Alexey Mendelenko, Cyril Crassin, and Mikhail Smirnov.
//
////////////////////////////////////////////////////////////////////////////////
#include <algorithm>
#include <cstdio>
#include "opencv2/core/cuda/warp.hpp"
#include "opencv2/core/cuda/warp_shuffle.hpp"
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_OBJDETECT
# include "opencv2/objdetect.hpp"
# include "opencv2/objdetect/objdetect_c.h"
#endif
#include "opencv2/cudalegacy/NCV.hpp"
#include "opencv2/cudalegacy/NPP_staging.hpp"
#include "opencv2/cudalegacy/NCVHaarObjectDetection.hpp"
#include "NCVRuntimeTemplates.hpp"
#include "NCVAlg.hpp"
//==============================================================================
//
// BlockScan file
//
//==============================================================================
NCV_CT_ASSERT(K_WARP_SIZE == 32); //this is required for the manual unroll of the loop in warpScanInclusive
//Almost the same as naive scan1Inclusive, but doesn't need __syncthreads()
//assuming size <= WARP_SIZE and size is power of 2
__device__ Ncv32u warpScanInclusive(Ncv32u idata, volatile Ncv32u *s_Data)
{
#if __CUDA_ARCH__ >= 300
const unsigned int laneId = cv::cuda::device::Warp::laneId();
// scan on shuffl functions
#pragma unroll
for (int i = 1; i <= (K_WARP_SIZE / 2); i *= 2)
{
const Ncv32u n = cv::cuda::device::shfl_up(idata, i);
if (laneId >= i)
idata += n;
}
return idata;
#else
Ncv32u pos = 2 * threadIdx.x - (threadIdx.x & (K_WARP_SIZE - 1));
s_Data[pos] = 0;
pos += K_WARP_SIZE;
s_Data[pos] = idata;
s_Data[pos] += s_Data[pos - 1];
s_Data[pos] += s_Data[pos - 2];
s_Data[pos] += s_Data[pos - 4];
s_Data[pos] += s_Data[pos - 8];
s_Data[pos] += s_Data[pos - 16];
return s_Data[pos];
#endif
}
__device__ __forceinline__ Ncv32u warpScanExclusive(Ncv32u idata, volatile Ncv32u *s_Data)
{
return warpScanInclusive(idata, s_Data) - idata;
}
template <Ncv32u tiNumScanThreads>
__device__ Ncv32u scan1Inclusive(Ncv32u idata, volatile Ncv32u *s_Data)
{
if (tiNumScanThreads > K_WARP_SIZE)
{
//Bottom-level inclusive warp scan
Ncv32u warpResult = warpScanInclusive(idata, s_Data);
//Save top elements of each warp for exclusive warp scan
//sync to wait for warp scans to complete (because s_Data is being overwritten)
__syncthreads();
if( (threadIdx.x & (K_WARP_SIZE - 1)) == (K_WARP_SIZE - 1) )
{
s_Data[threadIdx.x >> K_LOG2_WARP_SIZE] = warpResult;
}
//wait for warp scans to complete
__syncthreads();
if( threadIdx.x < (tiNumScanThreads / K_WARP_SIZE) )
{
//grab top warp elements
Ncv32u val = s_Data[threadIdx.x];
//calculate exclusive scan and write back to shared memory
s_Data[threadIdx.x] = warpScanExclusive(val, s_Data);
}
//return updated warp scans with exclusive scan results
__syncthreads();
return warpResult + s_Data[threadIdx.x >> K_LOG2_WARP_SIZE];
}
else
{
return warpScanInclusive(idata, s_Data);
}
}
//==============================================================================
//
// HaarClassifierCascade file
//
//==============================================================================
const Ncv32u MAX_GRID_DIM = 65535;
const Ncv32u NUM_THREADS_ANCHORSPARALLEL = 64;
#define NUM_THREADS_CLASSIFIERPARALLEL_LOG2 6
#define NUM_THREADS_CLASSIFIERPARALLEL (1 << NUM_THREADS_CLASSIFIERPARALLEL_LOG2)
/** \internal
* Haar features solid array.
*/
texture<uint2, 1, cudaReadModeElementType> texHaarFeatures;
/** \internal
* Haar classifiers flattened trees container.
* Two parts: first contains root nodes, second - nodes that are referred by root nodes.
* Drawback: breaks tree locality (might cause more cache misses
* Advantage: No need to introduce additional 32-bit field to index root nodes offsets
*/
texture<uint4, 1, cudaReadModeElementType> texHaarClassifierNodes;
texture<Ncv32u, 1, cudaReadModeElementType> texIImage;
__device__ HaarStage64 getStage(Ncv32u iStage, HaarStage64 *d_Stages)
{
return d_Stages[iStage];
}
template <NcvBool tbCacheTextureCascade>
__device__ HaarClassifierNode128 getClassifierNode(Ncv32u iNode, HaarClassifierNode128 *d_ClassifierNodes)
{
HaarClassifierNode128 tmpNode;
if (tbCacheTextureCascade)
{
tmpNode._ui4 = tex1Dfetch(texHaarClassifierNodes, iNode);
}
else
{
tmpNode = d_ClassifierNodes[iNode];
}
return tmpNode;
}
template <NcvBool tbCacheTextureCascade>
__device__ void getFeature(Ncv32u iFeature, HaarFeature64 *d_Features,
Ncv32f *weight,
Ncv32u *rectX, Ncv32u *rectY, Ncv32u *rectWidth, Ncv32u *rectHeight)
{
HaarFeature64 feature;
if (tbCacheTextureCascade)
{
feature._ui2 = tex1Dfetch(texHaarFeatures, iFeature);
}
else
{
feature = d_Features[iFeature];
}
feature.getRect(rectX, rectY, rectWidth, rectHeight);
*weight = feature.getWeight();
}
template <NcvBool tbCacheTextureIImg>
__device__ Ncv32u getElemIImg(Ncv32u x, Ncv32u *d_IImg)
{
if (tbCacheTextureIImg)
{
return tex1Dfetch(texIImage, x);
}
else
{
return d_IImg[x];
}
}
__device__ Ncv32u d_outMaskPosition;
__device__ void compactBlockWriteOutAnchorParallel(Ncv32u threadPassFlag, Ncv32u threadElem, Ncv32u *vectorOut)
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110
__shared__ Ncv32u shmem[NUM_THREADS_ANCHORSPARALLEL * 2];
__shared__ Ncv32u numPassed;
__shared__ Ncv32u outMaskOffset;
Ncv32u incScan = scan1Inclusive<NUM_THREADS_ANCHORSPARALLEL>(threadPassFlag, shmem);
__syncthreads();
if (threadIdx.x == NUM_THREADS_ANCHORSPARALLEL-1)
{
numPassed = incScan;
outMaskOffset = atomicAdd(&d_outMaskPosition, incScan);
}
if (threadPassFlag)
{
Ncv32u excScan = incScan - threadPassFlag;
shmem[excScan] = threadElem;
}
__syncthreads();
if (threadIdx.x < numPassed)
{
vectorOut[outMaskOffset + threadIdx.x] = shmem[threadIdx.x];
}
#endif
}
template <NcvBool tbInitMaskPositively,
NcvBool tbCacheTextureIImg,
NcvBool tbCacheTextureCascade,
NcvBool tbReadPixelIndexFromVector,
NcvBool tbDoAtomicCompaction>
__global__ void applyHaarClassifierAnchorParallel(Ncv32u *d_IImg, Ncv32u IImgStride,
Ncv32f *d_weights, Ncv32u weightsStride,
HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea)
{
Ncv32u y_offs;
Ncv32u x_offs;
Ncv32u maskOffset;
Ncv32u outMaskVal;
NcvBool bInactiveThread = false;
if (tbReadPixelIndexFromVector)
{
maskOffset = (MAX_GRID_DIM * blockIdx.y + blockIdx.x) * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x;
if (maskOffset >= mask1Dlen)
{
if (tbDoAtomicCompaction) bInactiveThread = true; else return;
}
if (!tbDoAtomicCompaction || tbDoAtomicCompaction && !bInactiveThread)
{
outMaskVal = d_inMask[maskOffset];
y_offs = outMaskVal >> 16;
x_offs = outMaskVal & 0xFFFF;
}
}
else
{
y_offs = blockIdx.y;
x_offs = blockIdx.x * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x;
if (x_offs >= mask2Dstride)
{
if (tbDoAtomicCompaction) bInactiveThread = true; else return;
}
if (!tbDoAtomicCompaction || tbDoAtomicCompaction && !bInactiveThread)
{
maskOffset = y_offs * mask2Dstride + x_offs;
if ((x_offs >= anchorsRoi.width) ||
(!tbInitMaskPositively &&
d_inMask != d_outMask &&
d_inMask[maskOffset] == OBJDET_MASK_ELEMENT_INVALID_32U))
{
if (tbDoAtomicCompaction)
{
bInactiveThread = true;
}
else
{
d_outMask[maskOffset] = OBJDET_MASK_ELEMENT_INVALID_32U;
return;
}
}
outMaskVal = (y_offs << 16) | x_offs;
}
}
NcvBool bPass = true;
if (!tbDoAtomicCompaction || tbDoAtomicCompaction)
{
Ncv32f pixelStdDev = 0.0f;
if (!bInactiveThread)
pixelStdDev = d_weights[y_offs * weightsStride + x_offs];
for (Ncv32u iStage = startStageInc; iStage < endStageExc; iStage++)
{
Ncv32f curStageSum = 0.0f;
HaarStage64 curStage = getStage(iStage, d_Stages);
Ncv32u numRootNodesInStage = curStage.getNumClassifierRootNodes();
Ncv32u curRootNodeOffset = curStage.getStartClassifierRootNodeOffset();
Ncv32f stageThreshold = curStage.getStageThreshold();
while (numRootNodesInStage--)
{
NcvBool bMoreNodesToTraverse = true;
Ncv32u iNode = curRootNodeOffset;
if (bPass && !bInactiveThread)
{
while (bMoreNodesToTraverse)
{
HaarClassifierNode128 curNode = getClassifierNode<tbCacheTextureCascade>(iNode, d_ClassifierNodes);
HaarFeatureDescriptor32 featuresDesc = curNode.getFeatureDesc();
Ncv32u curNodeFeaturesNum = featuresDesc.getNumFeatures();
Ncv32u iFeature = featuresDesc.getFeaturesOffset();
Ncv32f curNodeVal = 0.0f;
for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++)
{
Ncv32f rectWeight;
Ncv32u rectX, rectY, rectWidth, rectHeight;
getFeature<tbCacheTextureCascade>
(iFeature + iRect, d_Features,
&rectWeight, &rectX, &rectY, &rectWidth, &rectHeight);
Ncv32u iioffsTL = (y_offs + rectY) * IImgStride + (x_offs + rectX);
Ncv32u iioffsTR = iioffsTL + rectWidth;
Ncv32u iioffsBL = iioffsTL + rectHeight * IImgStride;
Ncv32u iioffsBR = iioffsBL + rectWidth;
Ncv32u rectSum = getElemIImg<tbCacheTextureIImg>(iioffsBR, d_IImg) -
getElemIImg<tbCacheTextureIImg>(iioffsBL, d_IImg) +
getElemIImg<tbCacheTextureIImg>(iioffsTL, d_IImg) -
getElemIImg<tbCacheTextureIImg>(iioffsTR, d_IImg);
#if defined CPU_FP_COMPLIANCE || defined DISABLE_MAD_SELECTIVELY
curNodeVal += __fmul_rn((Ncv32f)rectSum, rectWeight);
#else
curNodeVal += (Ncv32f)rectSum * rectWeight;
#endif
}
HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc();
HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc();
Ncv32f nodeThreshold = curNode.getThreshold();
HaarClassifierNodeDescriptor32 nextNodeDescriptor;
NcvBool nextNodeIsLeaf;
if (curNodeVal < scaleArea * pixelStdDev * nodeThreshold)
{
nextNodeDescriptor = nodeLeft;
nextNodeIsLeaf = featuresDesc.isLeftNodeLeaf();
}
else
{
nextNodeDescriptor = nodeRight;
nextNodeIsLeaf = featuresDesc.isRightNodeLeaf();
}
if (nextNodeIsLeaf)
{
Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValue();
curStageSum += tmpLeafValue;
bMoreNodesToTraverse = false;
}
else
{
iNode = nextNodeDescriptor.getNextNodeOffset();
}
}
}
__syncthreads();
curRootNodeOffset++;
}
if (curStageSum < stageThreshold)
{
bPass = false;
outMaskVal = OBJDET_MASK_ELEMENT_INVALID_32U;
}
}
}
__syncthreads();
if (!tbDoAtomicCompaction)
{
if (!tbReadPixelIndexFromVector ||
(tbReadPixelIndexFromVector && (!bPass || d_inMask != d_outMask)))
{
d_outMask[maskOffset] = outMaskVal;
}
}
else
{
compactBlockWriteOutAnchorParallel(bPass && !bInactiveThread,
outMaskVal,
d_outMask);
}
}
template <NcvBool tbCacheTextureIImg,
NcvBool tbCacheTextureCascade,
NcvBool tbDoAtomicCompaction>
__global__ void applyHaarClassifierClassifierParallel(Ncv32u *d_IImg, Ncv32u IImgStride,
Ncv32f *d_weights, Ncv32u weightsStride,
HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea)
{
Ncv32u maskOffset = MAX_GRID_DIM * blockIdx.y + blockIdx.x;
if (maskOffset >= mask1Dlen)
{
return;
}
Ncv32u outMaskVal = d_inMask[maskOffset];
Ncv32u y_offs = outMaskVal >> 16;
Ncv32u x_offs = outMaskVal & 0xFFFF;
Ncv32f pixelStdDev = d_weights[y_offs * weightsStride + x_offs];
NcvBool bPass = true;
for (Ncv32u iStage = startStageInc; iStage<endStageExc; iStage++)
{
//this variable is subject to reduction
Ncv32f curStageSum = 0.0f;
HaarStage64 curStage = getStage(iStage, d_Stages);
Ncv32s numRootNodesInStage = curStage.getNumClassifierRootNodes();
Ncv32u curRootNodeOffset = curStage.getStartClassifierRootNodeOffset() + threadIdx.x;
Ncv32f stageThreshold = curStage.getStageThreshold();
Ncv32u numRootChunks = (numRootNodesInStage + NUM_THREADS_CLASSIFIERPARALLEL - 1) >> NUM_THREADS_CLASSIFIERPARALLEL_LOG2;
for (Ncv32u chunkId=0; chunkId<numRootChunks; chunkId++)
{
NcvBool bMoreNodesToTraverse = true;
if (chunkId * NUM_THREADS_CLASSIFIERPARALLEL + threadIdx.x < numRootNodesInStage)
{
Ncv32u iNode = curRootNodeOffset;
while (bMoreNodesToTraverse)
{
HaarClassifierNode128 curNode = getClassifierNode<tbCacheTextureCascade>(iNode, d_ClassifierNodes);
HaarFeatureDescriptor32 featuresDesc = curNode.getFeatureDesc();
Ncv32u curNodeFeaturesNum = featuresDesc.getNumFeatures();
Ncv32u iFeature = featuresDesc.getFeaturesOffset();
Ncv32f curNodeVal = 0.0f;
//TODO: fetch into shmem if size suffices. Shmem can be shared with reduce
for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++)
{
Ncv32f rectWeight;
Ncv32u rectX, rectY, rectWidth, rectHeight;
getFeature<tbCacheTextureCascade>
(iFeature + iRect, d_Features,
&rectWeight, &rectX, &rectY, &rectWidth, &rectHeight);
Ncv32u iioffsTL = (y_offs + rectY) * IImgStride + (x_offs + rectX);
Ncv32u iioffsTR = iioffsTL + rectWidth;
Ncv32u iioffsBL = iioffsTL + rectHeight * IImgStride;
Ncv32u iioffsBR = iioffsBL + rectWidth;
Ncv32u rectSum = getElemIImg<tbCacheTextureIImg>(iioffsBR, d_IImg) -
getElemIImg<tbCacheTextureIImg>(iioffsBL, d_IImg) +
getElemIImg<tbCacheTextureIImg>(iioffsTL, d_IImg) -
getElemIImg<tbCacheTextureIImg>(iioffsTR, d_IImg);
#if defined CPU_FP_COMPLIANCE || defined DISABLE_MAD_SELECTIVELY
curNodeVal += __fmul_rn((Ncv32f)rectSum, rectWeight);
#else
curNodeVal += (Ncv32f)rectSum * rectWeight;
#endif
}
HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc();
HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc();
Ncv32f nodeThreshold = curNode.getThreshold();
HaarClassifierNodeDescriptor32 nextNodeDescriptor;
NcvBool nextNodeIsLeaf;
if (curNodeVal < scaleArea * pixelStdDev * nodeThreshold)
{
nextNodeDescriptor = nodeLeft;
nextNodeIsLeaf = featuresDesc.isLeftNodeLeaf();
}
else
{
nextNodeDescriptor = nodeRight;
nextNodeIsLeaf = featuresDesc.isRightNodeLeaf();
}
if (nextNodeIsLeaf)
{
Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValue();
curStageSum += tmpLeafValue;
bMoreNodesToTraverse = false;
}
else
{
iNode = nextNodeDescriptor.getNextNodeOffset();
}
}
}
__syncthreads();
curRootNodeOffset += NUM_THREADS_CLASSIFIERPARALLEL;
}
Ncv32f finalStageSum = subReduce<Ncv32f, functorAddValues<Ncv32f>, NUM_THREADS_CLASSIFIERPARALLEL>(curStageSum);
if (finalStageSum < stageThreshold)
{
bPass = false;
outMaskVal = OBJDET_MASK_ELEMENT_INVALID_32U;
break;
}
}
if (!tbDoAtomicCompaction)
{
if (!bPass || d_inMask != d_outMask)
{
if (!threadIdx.x)
{
d_outMask[maskOffset] = outMaskVal;
}
}
}
else
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110
if (bPass && !threadIdx.x)
{
Ncv32u outMaskOffset = atomicAdd(&d_outMaskPosition, 1);
d_outMask[outMaskOffset] = outMaskVal;
}
#endif
}
}
template <NcvBool tbMaskByInmask,
NcvBool tbDoAtomicCompaction>
__global__ void initializeMaskVector(Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u step)
{
Ncv32u y_offs = blockIdx.y;
Ncv32u x_offs = blockIdx.x * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x;
Ncv32u outMaskOffset = y_offs * gridDim.x * blockDim.x + x_offs;
Ncv32u y_offs_upsc = step * y_offs;
Ncv32u x_offs_upsc = step * x_offs;
Ncv32u inMaskOffset = y_offs_upsc * mask2Dstride + x_offs_upsc;
Ncv32u outElem = OBJDET_MASK_ELEMENT_INVALID_32U;
if (x_offs_upsc < anchorsRoi.width &&
(!tbMaskByInmask || d_inMask[inMaskOffset] != OBJDET_MASK_ELEMENT_INVALID_32U))
{
outElem = (y_offs_upsc << 16) | x_offs_upsc;
}
if (!tbDoAtomicCompaction)
{
d_outMask[outMaskOffset] = outElem;
}
else
{
compactBlockWriteOutAnchorParallel(outElem != OBJDET_MASK_ELEMENT_INVALID_32U,
outElem,
d_outMask);
}
}
struct applyHaarClassifierAnchorParallelFunctor
{
dim3 gridConf, blockConf;
cudaStream_t cuStream;
//Kernel arguments are stored as members;
Ncv32u *d_IImg;
Ncv32u IImgStride;
Ncv32f *d_weights;
Ncv32u weightsStride;
HaarFeature64 *d_Features;
HaarClassifierNode128 *d_ClassifierNodes;
HaarStage64 *d_Stages;
Ncv32u *d_inMask;
Ncv32u *d_outMask;
Ncv32u mask1Dlen;
Ncv32u mask2Dstride;
NcvSize32u anchorsRoi;
Ncv32u startStageInc;
Ncv32u endStageExc;
Ncv32f scaleArea;
//Arguments are passed through the constructor
applyHaarClassifierAnchorParallelFunctor(dim3 _gridConf, dim3 _blockConf, cudaStream_t _cuStream,
Ncv32u *_d_IImg, Ncv32u _IImgStride,
Ncv32f *_d_weights, Ncv32u _weightsStride,
HaarFeature64 *_d_Features, HaarClassifierNode128 *_d_ClassifierNodes, HaarStage64 *_d_Stages,
Ncv32u *_d_inMask, Ncv32u *_d_outMask,
Ncv32u _mask1Dlen, Ncv32u _mask2Dstride,
NcvSize32u _anchorsRoi, Ncv32u _startStageInc,
Ncv32u _endStageExc, Ncv32f _scaleArea) :
gridConf(_gridConf),
blockConf(_blockConf),
cuStream(_cuStream),
d_IImg(_d_IImg),
IImgStride(_IImgStride),
d_weights(_d_weights),
weightsStride(_weightsStride),
d_Features(_d_Features),
d_ClassifierNodes(_d_ClassifierNodes),
d_Stages(_d_Stages),
d_inMask(_d_inMask),
d_outMask(_d_outMask),
mask1Dlen(_mask1Dlen),
mask2Dstride(_mask2Dstride),
anchorsRoi(_anchorsRoi),
startStageInc(_startStageInc),
endStageExc(_endStageExc),
scaleArea(_scaleArea)
{}
template<class TList>
void call(TList tl)
{
(void)tl;
applyHaarClassifierAnchorParallel <
Loki::TL::TypeAt<TList, 0>::Result::value,
Loki::TL::TypeAt<TList, 1>::Result::value,
Loki::TL::TypeAt<TList, 2>::Result::value,
Loki::TL::TypeAt<TList, 3>::Result::value,
Loki::TL::TypeAt<TList, 4>::Result::value >
<<<gridConf, blockConf, 0, cuStream>>>
(d_IImg, IImgStride,
d_weights, weightsStride,
d_Features, d_ClassifierNodes, d_Stages,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, startStageInc,
endStageExc, scaleArea);
}
};
void applyHaarClassifierAnchorParallelDynTemplate(NcvBool tbInitMaskPositively,
NcvBool tbCacheTextureIImg,
NcvBool tbCacheTextureCascade,
NcvBool tbReadPixelIndexFromVector,
NcvBool tbDoAtomicCompaction,
dim3 gridConf, dim3 blockConf, cudaStream_t cuStream,
Ncv32u *d_IImg, Ncv32u IImgStride,
Ncv32f *d_weights, Ncv32u weightsStride,
HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u startStageInc,
Ncv32u endStageExc, Ncv32f scaleArea)
{
applyHaarClassifierAnchorParallelFunctor functor(gridConf, blockConf, cuStream,
d_IImg, IImgStride,
d_weights, weightsStride,
d_Features, d_ClassifierNodes, d_Stages,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, startStageInc,
endStageExc, scaleArea);
//Second parameter is the number of "dynamic" template parameters
NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 5, applyHaarClassifierAnchorParallelFunctor>
::call( &functor,
tbInitMaskPositively,
tbCacheTextureIImg,
tbCacheTextureCascade,
tbReadPixelIndexFromVector,
tbDoAtomicCompaction);
}
struct applyHaarClassifierClassifierParallelFunctor
{
dim3 gridConf, blockConf;
cudaStream_t cuStream;
//Kernel arguments are stored as members;
Ncv32u *d_IImg;
Ncv32u IImgStride;
Ncv32f *d_weights;
Ncv32u weightsStride;
HaarFeature64 *d_Features;
HaarClassifierNode128 *d_ClassifierNodes;
HaarStage64 *d_Stages;
Ncv32u *d_inMask;
Ncv32u *d_outMask;
Ncv32u mask1Dlen;
Ncv32u mask2Dstride;
NcvSize32u anchorsRoi;
Ncv32u startStageInc;
Ncv32u endStageExc;
Ncv32f scaleArea;
//Arguments are passed through the constructor
applyHaarClassifierClassifierParallelFunctor(dim3 _gridConf, dim3 _blockConf, cudaStream_t _cuStream,
Ncv32u *_d_IImg, Ncv32u _IImgStride,
Ncv32f *_d_weights, Ncv32u _weightsStride,
HaarFeature64 *_d_Features, HaarClassifierNode128 *_d_ClassifierNodes, HaarStage64 *_d_Stages,
Ncv32u *_d_inMask, Ncv32u *_d_outMask,
Ncv32u _mask1Dlen, Ncv32u _mask2Dstride,
NcvSize32u _anchorsRoi, Ncv32u _startStageInc,
Ncv32u _endStageExc, Ncv32f _scaleArea) :
gridConf(_gridConf),
blockConf(_blockConf),
cuStream(_cuStream),
d_IImg(_d_IImg),
IImgStride(_IImgStride),
d_weights(_d_weights),
weightsStride(_weightsStride),
d_Features(_d_Features),
d_ClassifierNodes(_d_ClassifierNodes),
d_Stages(_d_Stages),
d_inMask(_d_inMask),
d_outMask(_d_outMask),
mask1Dlen(_mask1Dlen),
mask2Dstride(_mask2Dstride),
anchorsRoi(_anchorsRoi),
startStageInc(_startStageInc),
endStageExc(_endStageExc),
scaleArea(_scaleArea)
{}
template<class TList>
void call(TList tl)
{
(void)tl;
applyHaarClassifierClassifierParallel <
Loki::TL::TypeAt<TList, 0>::Result::value,
Loki::TL::TypeAt<TList, 1>::Result::value,
Loki::TL::TypeAt<TList, 2>::Result::value >
<<<gridConf, blockConf, 0, cuStream>>>
(d_IImg, IImgStride,
d_weights, weightsStride,
d_Features, d_ClassifierNodes, d_Stages,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, startStageInc,
endStageExc, scaleArea);
}
};
void applyHaarClassifierClassifierParallelDynTemplate(NcvBool tbCacheTextureIImg,
NcvBool tbCacheTextureCascade,
NcvBool tbDoAtomicCompaction,
dim3 gridConf, dim3 blockConf, cudaStream_t cuStream,
Ncv32u *d_IImg, Ncv32u IImgStride,
Ncv32f *d_weights, Ncv32u weightsStride,
HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u startStageInc,
Ncv32u endStageExc, Ncv32f scaleArea)
{
applyHaarClassifierClassifierParallelFunctor functor(gridConf, blockConf, cuStream,
d_IImg, IImgStride,
d_weights, weightsStride,
d_Features, d_ClassifierNodes, d_Stages,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, startStageInc,
endStageExc, scaleArea);
//Second parameter is the number of "dynamic" template parameters
NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 3, applyHaarClassifierClassifierParallelFunctor>
::call( &functor,
tbCacheTextureIImg,
tbCacheTextureCascade,
tbDoAtomicCompaction);
}
struct initializeMaskVectorFunctor
{
dim3 gridConf, blockConf;
cudaStream_t cuStream;
//Kernel arguments are stored as members;
Ncv32u *d_inMask;
Ncv32u *d_outMask;
Ncv32u mask1Dlen;
Ncv32u mask2Dstride;
NcvSize32u anchorsRoi;
Ncv32u step;
//Arguments are passed through the constructor
initializeMaskVectorFunctor(dim3 _gridConf, dim3 _blockConf, cudaStream_t _cuStream,
Ncv32u *_d_inMask, Ncv32u *_d_outMask,
Ncv32u _mask1Dlen, Ncv32u _mask2Dstride,
NcvSize32u _anchorsRoi, Ncv32u _step) :
gridConf(_gridConf),
blockConf(_blockConf),
cuStream(_cuStream),
d_inMask(_d_inMask),
d_outMask(_d_outMask),
mask1Dlen(_mask1Dlen),
mask2Dstride(_mask2Dstride),
anchorsRoi(_anchorsRoi),
step(_step)
{}
template<class TList>
void call(TList tl)
{
(void)tl;
initializeMaskVector <
Loki::TL::TypeAt<TList, 0>::Result::value,
Loki::TL::TypeAt<TList, 1>::Result::value >
<<<gridConf, blockConf, 0, cuStream>>>
(d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, step);
}
};
void initializeMaskVectorDynTemplate(NcvBool tbMaskByInmask,
NcvBool tbDoAtomicCompaction,
dim3 gridConf, dim3 blockConf, cudaStream_t cuStream,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u step)
{
initializeMaskVectorFunctor functor(gridConf, blockConf, cuStream,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, step);
//Second parameter is the number of "dynamic" template parameters
NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 2, initializeMaskVectorFunctor>
::call( &functor,
tbMaskByInmask,
tbDoAtomicCompaction);
}
Ncv32u getStageNumWithNotLessThanNclassifiers(Ncv32u N, HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages)
{
Ncv32u i = 0;
for (; i<haar.NumStages; i++)
{
if (h_HaarStages.ptr()[i].getNumClassifierRootNodes() >= N)
{
break;
}
}
return i;
}
NCVStatus ncvApplyHaarClassifierCascade_device(NCVMatrix<Ncv32u> &integral,
NCVMatrix<Ncv32f> &d_weights,
NCVMatrixAlloc<Ncv32u> &d_pixelMask,
Ncv32u &numDetections,
HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarStage64> &d_HaarStages,
NCVVector<HaarClassifierNode128> &d_HaarNodes,
NCVVector<HaarFeature64> &d_HaarFeatures,
NcvBool bMaskElements,
NcvSize32u anchorsRoi,
Ncv32u pixelStep,
Ncv32f scaleArea,
INCVMemAllocator &gpuAllocator,
INCVMemAllocator &cpuAllocator,
cudaDeviceProp &devProp,
cudaStream_t cuStream)
{
ncvAssertReturn(integral.memType() == d_weights.memType()&&
integral.memType() == d_pixelMask.memType() &&
integral.memType() == gpuAllocator.memType() &&
(integral.memType() == NCVMemoryTypeDevice ||
integral.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(d_HaarStages.memType() == d_HaarNodes.memType() &&
d_HaarStages.memType() == d_HaarFeatures.memType() &&
(d_HaarStages.memType() == NCVMemoryTypeDevice ||
d_HaarStages.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(h_HaarStages.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(gpuAllocator.isInitialized() && cpuAllocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED);
ncvAssertReturn((integral.ptr() != NULL && d_weights.ptr() != NULL && d_pixelMask.ptr() != NULL &&
h_HaarStages.ptr() != NULL && d_HaarStages.ptr() != NULL && d_HaarNodes.ptr() != NULL &&
d_HaarFeatures.ptr() != NULL) || gpuAllocator.isCounting(), NCV_NULL_PTR);
ncvAssertReturn(anchorsRoi.width > 0 && anchorsRoi.height > 0 &&
d_pixelMask.width() >= anchorsRoi.width && d_pixelMask.height() >= anchorsRoi.height &&
d_weights.width() >= anchorsRoi.width && d_weights.height() >= anchorsRoi.height &&
integral.width() >= anchorsRoi.width + haar.ClassifierSize.width &&
integral.height() >= anchorsRoi.height + haar.ClassifierSize.height, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(scaleArea > 0, NCV_INVALID_SCALE);
ncvAssertReturn(d_HaarStages.length() >= haar.NumStages &&
d_HaarNodes.length() >= haar.NumClassifierTotalNodes &&
d_HaarFeatures.length() >= haar.NumFeatures &&
d_HaarStages.length() == h_HaarStages.length() &&
haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(haar.bNeedsTiltedII == false || gpuAllocator.isCounting(), NCV_NOIMPL_HAAR_TILTED_FEATURES);
ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP);
NCV_SET_SKIP_COND(gpuAllocator.isCounting());
#if defined _SELF_TEST_
NCVStatus ncvStat;
NCVMatrixAlloc<Ncv32u> h_integralImage(cpuAllocator, integral.width, integral.height, integral.pitch);
ncvAssertReturn(h_integralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32f> h_weights(cpuAllocator, d_weights.width, d_weights.height, d_weights.pitch);
ncvAssertReturn(h_weights.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32u> h_pixelMask(cpuAllocator, d_pixelMask.width, d_pixelMask.height, d_pixelMask.pitch);
ncvAssertReturn(h_pixelMask.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<HaarClassifierNode128> h_HaarNodes(cpuAllocator, d_HaarNodes.length);
ncvAssertReturn(h_HaarNodes.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<HaarFeature64> h_HaarFeatures(cpuAllocator, d_HaarFeatures.length);
ncvAssertReturn(h_HaarFeatures.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32u> h_pixelMask_d(cpuAllocator, d_pixelMask.width, d_pixelMask.height, d_pixelMask.pitch);
ncvAssertReturn(h_pixelMask_d.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCV_SKIP_COND_BEGIN
ncvStat = d_pixelMask.copySolid(h_pixelMask, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = integral.copySolid(h_integralImage, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = d_weights.copySolid(h_weights, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = d_HaarNodes.copySolid(h_HaarNodes, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = d_HaarFeatures.copySolid(h_HaarFeatures, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvAssertCUDAReturn(cudaStreamSynchronize(0), NCV_CUDA_ERROR);
for (Ncv32u i=0; i<(Ncv32u)anchorsRoi.height; i++)
{
for (Ncv32u j=0; j<d_pixelMask.stride(); j++)
{
if ((i%pixelStep==0) && (j%pixelStep==0) && (j<(Ncv32u)anchorsRoi.width))
{
if (!bMaskElements || h_pixelMask.ptr[i*d_pixelMask.stride()+j] != OBJDET_MASK_ELEMENT_INVALID_32U)
{
h_pixelMask.ptr[i*d_pixelMask.stride()+j] = (i << 16) | j;
}
}
else
{
h_pixelMask.ptr[i*d_pixelMask.stride()+j] = OBJDET_MASK_ELEMENT_INVALID_32U;
}
}
}
NCV_SKIP_COND_END
#endif
NCVVectorReuse<Ncv32u> d_vecPixelMask(d_pixelMask.getSegment(), anchorsRoi.height * d_pixelMask.stride());
ncvAssertReturn(d_vecPixelMask.isMemReused(), NCV_ALLOCATOR_BAD_REUSE);
NCVVectorAlloc<Ncv32u> d_vecPixelMaskTmp(gpuAllocator, static_cast<Ncv32u>(d_vecPixelMask.length()));
ncvAssertReturn(d_vecPixelMaskTmp.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<Ncv32u> hp_pool32u(cpuAllocator, 2);
ncvAssertReturn(hp_pool32u.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
Ncv32u *hp_zero = &hp_pool32u.ptr()[0];
Ncv32u *hp_numDet = &hp_pool32u.ptr()[1];
NCV_SKIP_COND_BEGIN
*hp_zero = 0;
*hp_numDet = 0;
NCV_SKIP_COND_END
Ncv32f scaleAreaPixels = scaleArea * ((haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER) *
(haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER));
NcvBool bTexCacheCascade = devProp.major < 2;
NcvBool bTexCacheIImg = true; //this works better even on Fermi so far
NcvBool bDoAtomicCompaction = devProp.major >= 2 || (devProp.major == 1 && devProp.minor >= 3);
NCVVector<Ncv32u> *d_ptrNowData = &d_vecPixelMask;
NCVVector<Ncv32u> *d_ptrNowTmp = &d_vecPixelMaskTmp;
Ncv32u szNppCompactTmpBuf;
nppsStCompactGetSize_32u(static_cast<Ncv32u>(d_vecPixelMask.length()), &szNppCompactTmpBuf, devProp);
if (bDoAtomicCompaction)
{
szNppCompactTmpBuf = 0;
}
NCVVectorAlloc<Ncv8u> d_tmpBufCompact(gpuAllocator, szNppCompactTmpBuf);
NCV_SKIP_COND_BEGIN
if (bTexCacheIImg)
{
cudaChannelFormatDesc cfdTexIImage;
cfdTexIImage = cudaCreateChannelDesc<Ncv32u>();
size_t alignmentOffset;
ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, texIImage, integral.ptr(), cfdTexIImage,
(anchorsRoi.height + haar.ClassifierSize.height) * integral.pitch()), NCV_CUDA_ERROR);
ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR);
}
if (bTexCacheCascade)
{
cudaChannelFormatDesc cfdTexHaarFeatures;
cudaChannelFormatDesc cfdTexHaarClassifierNodes;
cfdTexHaarFeatures = cudaCreateChannelDesc<uint2>();
cfdTexHaarClassifierNodes = cudaCreateChannelDesc<uint4>();
size_t alignmentOffset;
ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, texHaarFeatures,
d_HaarFeatures.ptr(), cfdTexHaarFeatures,sizeof(HaarFeature64) * haar.NumFeatures), NCV_CUDA_ERROR);
ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, texHaarClassifierNodes,
d_HaarNodes.ptr(), cfdTexHaarClassifierNodes, sizeof(HaarClassifierNode128) * haar.NumClassifierTotalNodes), NCV_CUDA_ERROR);
ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR);
}
Ncv32u stageStartAnchorParallel = 0;
Ncv32u stageMiddleSwitch = getStageNumWithNotLessThanNclassifiers(NUM_THREADS_CLASSIFIERPARALLEL,
haar, h_HaarStages);
Ncv32u stageEndClassifierParallel = haar.NumStages;
if (stageMiddleSwitch == 0)
{
stageMiddleSwitch = 1;
}
//create stages subdivision for pixel-parallel processing
const Ncv32u compactEveryNstage = bDoAtomicCompaction ? 7 : 1;
Ncv32u curStop = stageStartAnchorParallel;
std::vector<Ncv32u> pixParallelStageStops;
while (curStop < stageMiddleSwitch)
{
pixParallelStageStops.push_back(curStop);
curStop += compactEveryNstage;
}
if (curStop > compactEveryNstage && curStop - stageMiddleSwitch > compactEveryNstage / 2)
{
pixParallelStageStops[pixParallelStageStops.size()-1] =
(stageMiddleSwitch - (curStop - 2 * compactEveryNstage)) / 2;
}
pixParallelStageStops.push_back(stageMiddleSwitch);
Ncv32u pixParallelStageStopsIndex = 0;
if (pixelStep != 1 || bMaskElements)
{
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(cudaMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u),
0, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
dim3 gridInit((((anchorsRoi.width + pixelStep - 1) / pixelStep + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL),
(anchorsRoi.height + pixelStep - 1) / pixelStep);
dim3 blockInit(NUM_THREADS_ANCHORSPARALLEL);
if (gridInit.x == 0 || gridInit.y == 0)
{
numDetections = 0;
return NCV_SUCCESS;
}
initializeMaskVectorDynTemplate(bMaskElements,
bDoAtomicCompaction,
gridInit, blockInit, cuStream,
d_ptrNowData->ptr(),
d_ptrNowTmp->ptr(),
static_cast<Ncv32u>(d_vecPixelMask.length()), d_pixelMask.stride(),
anchorsRoi, pixelStep);
ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u),
0, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
swap(d_ptrNowData, d_ptrNowTmp);
}
else
{
NCVStatus nppSt;
nppSt = nppsStCompact_32u(d_ptrNowTmp->ptr(), static_cast<Ncv32u>(d_vecPixelMask.length()),
d_ptrNowData->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U,
d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp);
ncvAssertReturn(nppSt == NPPST_SUCCESS, NCV_NPP_ERROR);
}
numDetections = *hp_numDet;
}
else
{
//
// 1. Run the first pixel-input pixel-parallel classifier for few stages
//
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(cudaMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u),
0, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
dim3 grid1(((d_pixelMask.stride() + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL),
anchorsRoi.height);
dim3 block1(NUM_THREADS_ANCHORSPARALLEL);
applyHaarClassifierAnchorParallelDynTemplate(
true, //tbInitMaskPositively
bTexCacheIImg, //tbCacheTextureIImg
bTexCacheCascade, //tbCacheTextureCascade
pixParallelStageStops[pixParallelStageStopsIndex] != 0,//tbReadPixelIndexFromVector
bDoAtomicCompaction, //tbDoAtomicCompaction
grid1,
block1,
cuStream,
integral.ptr(), integral.stride(),
d_weights.ptr(), d_weights.stride(),
d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(),
d_ptrNowData->ptr(),
bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(),
0,
d_pixelMask.stride(),
anchorsRoi,
pixParallelStageStops[pixParallelStageStopsIndex],
pixParallelStageStops[pixParallelStageStopsIndex+1],
scaleAreaPixels);
ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u),
0, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
else
{
NCVStatus nppSt;
nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), static_cast<Ncv32u>(d_vecPixelMask.length()),
d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U,
d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp);
ncvAssertReturnNcvStat(nppSt);
}
swap(d_ptrNowData, d_ptrNowTmp);
numDetections = *hp_numDet;
pixParallelStageStopsIndex++;
}
//
// 2. Run pixel-parallel stages
//
for (; pixParallelStageStopsIndex < pixParallelStageStops.size()-1; pixParallelStageStopsIndex++)
{
if (numDetections == 0)
{
break;
}
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(cudaMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u),
0, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
dim3 grid2((numDetections + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL);
if (numDetections > MAX_GRID_DIM)
{
grid2.x = MAX_GRID_DIM;
grid2.y = (numDetections + MAX_GRID_DIM - 1) / MAX_GRID_DIM;
}
dim3 block2(NUM_THREADS_ANCHORSPARALLEL);
applyHaarClassifierAnchorParallelDynTemplate(
false, //tbInitMaskPositively
bTexCacheIImg, //tbCacheTextureIImg
bTexCacheCascade, //tbCacheTextureCascade
pixParallelStageStops[pixParallelStageStopsIndex] != 0 || pixelStep != 1 || bMaskElements,//tbReadPixelIndexFromVector
bDoAtomicCompaction, //tbDoAtomicCompaction
grid2,
block2,
cuStream,
integral.ptr(), integral.stride(),
d_weights.ptr(), d_weights.stride(),
d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(),
d_ptrNowData->ptr(),
bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(),
numDetections,
d_pixelMask.stride(),
anchorsRoi,
pixParallelStageStops[pixParallelStageStopsIndex],
pixParallelStageStops[pixParallelStageStopsIndex+1],
scaleAreaPixels);
ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u),
0, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
else
{
NCVStatus nppSt;
nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), numDetections,
d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U,
d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp);
ncvAssertReturnNcvStat(nppSt);
}
swap(d_ptrNowData, d_ptrNowTmp);
numDetections = *hp_numDet;
}
//
// 3. Run all left stages in one stage-parallel kernel
//
if (numDetections > 0 && stageMiddleSwitch < stageEndClassifierParallel)
{
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(cudaMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u),
0, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
dim3 grid3(numDetections);
if (numDetections > MAX_GRID_DIM)
{
grid3.x = MAX_GRID_DIM;
grid3.y = (numDetections + MAX_GRID_DIM - 1) / MAX_GRID_DIM;
}
dim3 block3(NUM_THREADS_CLASSIFIERPARALLEL);
applyHaarClassifierClassifierParallelDynTemplate(
bTexCacheIImg, //tbCacheTextureIImg
bTexCacheCascade, //tbCacheTextureCascade
bDoAtomicCompaction, //tbDoAtomicCompaction
grid3,
block3,
cuStream,
integral.ptr(), integral.stride(),
d_weights.ptr(), d_weights.stride(),
d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(),
d_ptrNowData->ptr(),
bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(),
numDetections,
d_pixelMask.stride(),
anchorsRoi,
stageMiddleSwitch,
stageEndClassifierParallel,
scaleAreaPixels);
ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u),
0, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
else
{
NCVStatus nppSt;
nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), numDetections,
d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U,
d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp);
ncvAssertReturnNcvStat(nppSt);
}
swap(d_ptrNowData, d_ptrNowTmp);
numDetections = *hp_numDet;
}
if (d_ptrNowData != &d_vecPixelMask)
{
d_vecPixelMaskTmp.copySolid(d_vecPixelMask, cuStream);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
#if defined _SELF_TEST_
ncvStat = d_pixelMask.copySolid(h_pixelMask_d, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
std::sort(h_pixelMask_d.ptr, h_pixelMask_d.ptr + numDetections);
}
Ncv32u fpu_oldcw, fpu_cw;
_controlfp_s(&fpu_cw, 0, 0);
fpu_oldcw = fpu_cw;
_controlfp_s(&fpu_cw, _PC_24, _MCW_PC);
Ncv32u numDetGold;
ncvStat = ncvApplyHaarClassifierCascade_host(h_integralImage, h_weights, h_pixelMask, numDetGold, haar,
h_HaarStages, h_HaarNodes, h_HaarFeatures,
bMaskElements, anchorsRoi, pixelStep, scaleArea);
ncvAssertReturnNcvStat(ncvStat);
_controlfp_s(&fpu_cw, fpu_oldcw, _MCW_PC);
bool bPass = true;
if (numDetGold != numDetections)
{
printf("NCVHaarClassifierCascade::applyHaarClassifierCascade numdetections don't match: cpu=%d, gpu=%d\n", numDetGold, numDetections);
bPass = false;
}
else
{
for (Ncv32u i=0; i<std::max(numDetGold, numDetections) && bPass; i++)
{
if (h_pixelMask.ptr[i] != h_pixelMask_d.ptr[i])
{
printf("NCVHaarClassifierCascade::applyHaarClassifierCascade self test failed: i=%d, cpu=%d, gpu=%d\n", i, h_pixelMask.ptr[i], h_pixelMask_d.ptr[i]);
bPass = false;
}
}
}
printf("NCVHaarClassifierCascade::applyHaarClassifierCascade %s\n", bPass?"PASSED":"FAILED");
#endif
NCV_SKIP_COND_END
return NCV_SUCCESS;
}
//==============================================================================
//
// HypothesesOperations file
//
//==============================================================================
const Ncv32u NUM_GROW_THREADS = 128;
__device__ __host__ NcvRect32u pixelToRect(Ncv32u pixel, Ncv32u width, Ncv32u height, Ncv32f scale)
{
NcvRect32u res;
res.x = (Ncv32u)(scale * (pixel & 0xFFFF));
res.y = (Ncv32u)(scale * (pixel >> 16));
res.width = (Ncv32u)(scale * width);
res.height = (Ncv32u)(scale * height);
return res;
}
__global__ void growDetectionsKernel(Ncv32u *pixelMask, Ncv32u numElements,
NcvRect32u *hypotheses,
Ncv32u rectWidth, Ncv32u rectHeight, Ncv32f curScale)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
Ncv32u elemAddr = blockId * NUM_GROW_THREADS + threadIdx.x;
if (elemAddr >= numElements)
{
return;
}
hypotheses[elemAddr] = pixelToRect(pixelMask[elemAddr], rectWidth, rectHeight, curScale);
}
NCVStatus ncvGrowDetectionsVector_device(NCVVector<Ncv32u> &pixelMask,
Ncv32u numPixelMaskDetections,
NCVVector<NcvRect32u> &hypotheses,
Ncv32u &totalDetections,
Ncv32u totalMaxDetections,
Ncv32u rectWidth,
Ncv32u rectHeight,
Ncv32f curScale,
cudaStream_t cuStream)
{
ncvAssertReturn(pixelMask.ptr() != NULL && hypotheses.ptr() != NULL, NCV_NULL_PTR);
ncvAssertReturn(pixelMask.memType() == hypotheses.memType() &&
pixelMask.memType() == NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(rectWidth > 0 && rectHeight > 0 && curScale > 0, NCV_INVALID_ROI);
ncvAssertReturn(curScale > 0, NCV_INVALID_SCALE);
ncvAssertReturn(totalMaxDetections <= hypotheses.length() &&
numPixelMaskDetections <= pixelMask.length() &&
totalMaxDetections <= totalMaxDetections, NCV_INCONSISTENT_INPUT);
NCVStatus ncvStat = NCV_SUCCESS;
Ncv32u numDetsToCopy = numPixelMaskDetections;
if (numDetsToCopy == 0)
{
return ncvStat;
}
if (totalDetections + numPixelMaskDetections > totalMaxDetections)
{
ncvStat = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW;
numDetsToCopy = totalMaxDetections - totalDetections;
}
dim3 block(NUM_GROW_THREADS);
dim3 grid((numDetsToCopy + NUM_GROW_THREADS - 1) / NUM_GROW_THREADS);
if (grid.x > 65535)
{
grid.y = (grid.x + 65534) / 65535;
grid.x = 65535;
}
growDetectionsKernel<<<grid, block, 0, cuStream>>>(pixelMask.ptr(), numDetsToCopy,
hypotheses.ptr() + totalDetections,
rectWidth, rectHeight, curScale);
ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR);
totalDetections += numDetsToCopy;
return ncvStat;
}
//==============================================================================
//
// Pipeline file
//
//==============================================================================
NCVStatus ncvDetectObjectsMultiScale_device(NCVMatrix<Ncv8u> &d_srcImg,
NcvSize32u srcRoi,
NCVVector<NcvRect32u> &d_dstRects,
Ncv32u &dstNumRects,
HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarStage64> &d_HaarStages,
NCVVector<HaarClassifierNode128> &d_HaarNodes,
NCVVector<HaarFeature64> &d_HaarFeatures,
NcvSize32u minObjSize,
Ncv32u minNeighbors, //default 4
Ncv32f scaleStep, //default 1.2f
Ncv32u pixelStep, //default 1
Ncv32u flags, //default NCVPipeObjDet_Default
INCVMemAllocator &gpuAllocator,
INCVMemAllocator &cpuAllocator,
cudaDeviceProp &devProp,
cudaStream_t cuStream)
{
ncvAssertReturn(d_srcImg.memType() == d_dstRects.memType() &&
d_srcImg.memType() == gpuAllocator.memType() &&
(d_srcImg.memType() == NCVMemoryTypeDevice ||
d_srcImg.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(d_HaarStages.memType() == d_HaarNodes.memType() &&
d_HaarStages.memType() == d_HaarFeatures.memType() &&
(d_HaarStages.memType() == NCVMemoryTypeDevice ||
d_HaarStages.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(h_HaarStages.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(gpuAllocator.isInitialized() && cpuAllocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED);
ncvAssertReturn((d_srcImg.ptr() != NULL && d_dstRects.ptr() != NULL &&
h_HaarStages.ptr() != NULL && d_HaarStages.ptr() != NULL && d_HaarNodes.ptr() != NULL &&
d_HaarFeatures.ptr() != NULL) || gpuAllocator.isCounting(), NCV_NULL_PTR);
ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0 &&
d_srcImg.width() >= srcRoi.width && d_srcImg.height() >= srcRoi.height &&
srcRoi.width >= minObjSize.width && srcRoi.height >= minObjSize.height &&
d_dstRects.length() >= 1, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(scaleStep > 1.0f, NCV_INVALID_SCALE);
ncvAssertReturn(d_HaarStages.length() >= haar.NumStages &&
d_HaarNodes.length() >= haar.NumClassifierTotalNodes &&
d_HaarFeatures.length() >= haar.NumFeatures &&
d_HaarStages.length() == h_HaarStages.length() &&
haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(haar.bNeedsTiltedII == false, NCV_NOIMPL_HAAR_TILTED_FEATURES);
ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP);
//TODO: set NPP active stream to cuStream
NCVStatus ncvStat;
NCV_SET_SKIP_COND(gpuAllocator.isCounting());
Ncv32u integralWidth = d_srcImg.width() + 1;
Ncv32u integralHeight = d_srcImg.height() + 1;
NCVMatrixAlloc<Ncv32u> integral(gpuAllocator, integralWidth, integralHeight);
ncvAssertReturn(integral.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv64u> d_sqIntegralImage(gpuAllocator, integralWidth, integralHeight);
ncvAssertReturn(d_sqIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32f> d_rectStdDev(gpuAllocator, d_srcImg.width(), d_srcImg.height());
ncvAssertReturn(d_rectStdDev.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32u> d_pixelMask(gpuAllocator, d_srcImg.width(), d_srcImg.height());
ncvAssertReturn(d_pixelMask.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32u> d_scaledIntegralImage(gpuAllocator, integralWidth, integralHeight);
ncvAssertReturn(d_scaledIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv64u> d_scaledSqIntegralImage(gpuAllocator, integralWidth, integralHeight);
ncvAssertReturn(d_scaledSqIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<NcvRect32u> d_hypothesesIntermediate(gpuAllocator, d_srcImg.width() * d_srcImg.height());
ncvAssertReturn(d_hypothesesIntermediate.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<NcvRect32u> h_hypothesesIntermediate(cpuAllocator, d_srcImg.width() * d_srcImg.height());
ncvAssertReturn(h_hypothesesIntermediate.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVStatus nppStat;
Ncv32u szTmpBufIntegral, szTmpBufSqIntegral;
nppStat = nppiStIntegralGetSize_8u32u(NcvSize32u(d_srcImg.width(), d_srcImg.height()), &szTmpBufIntegral, devProp);
ncvAssertReturnNcvStat(nppStat);
nppStat = nppiStSqrIntegralGetSize_8u64u(NcvSize32u(d_srcImg.width(), d_srcImg.height()), &szTmpBufSqIntegral, devProp);
ncvAssertReturnNcvStat(nppStat);
NCVVectorAlloc<Ncv8u> d_tmpIIbuf(gpuAllocator, std::max(szTmpBufIntegral, szTmpBufSqIntegral));
ncvAssertReturn(d_tmpIIbuf.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCV_SKIP_COND_BEGIN
nppStat = nppiStIntegral_8u32u_C1R(d_srcImg.ptr(), d_srcImg.pitch(),
integral.ptr(), integral.pitch(),
NcvSize32u(d_srcImg.width(), d_srcImg.height()),
d_tmpIIbuf.ptr(), szTmpBufIntegral, devProp);
ncvAssertReturnNcvStat(nppStat);
nppStat = nppiStSqrIntegral_8u64u_C1R(d_srcImg.ptr(), d_srcImg.pitch(),
d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(),
NcvSize32u(d_srcImg.width(), d_srcImg.height()),
d_tmpIIbuf.ptr(), szTmpBufSqIntegral, devProp);
ncvAssertReturnNcvStat(nppStat);
NCV_SKIP_COND_END
dstNumRects = 0;
Ncv32u lastCheckedScale = 0;
NcvBool bReverseTraverseScale = ((flags & NCVPipeObjDet_FindLargestObject) != 0);
std::vector<Ncv32u> scalesVector;
NcvBool bFoundLargestFace = false;
for (Ncv32f scaleIter = 1.0f; ; scaleIter *= scaleStep)
{
Ncv32u scale = (Ncv32u)scaleIter;
if (lastCheckedScale == scale)
{
continue;
}
lastCheckedScale = scale;
if (haar.ClassifierSize.width * (Ncv32s)scale < minObjSize.width ||
haar.ClassifierSize.height * (Ncv32s)scale < minObjSize.height)
{
continue;
}
NcvSize32s srcRoi_, srcIIRo_i, scaledIIRoi, searchRoi;
srcRoi_.width = d_srcImg.width();
srcRoi_.height = d_srcImg.height();
srcIIRo_i.width = srcRoi_.width + 1;
srcIIRo_i.height = srcRoi_.height + 1;
scaledIIRoi.width = srcIIRo_i.width / scale;
scaledIIRoi.height = srcIIRo_i.height / scale;
searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width;
searchRoi.height = scaledIIRoi.height - haar.ClassifierSize.height;
if (searchRoi.width <= 0 || searchRoi.height <= 0)
{
break;
}
scalesVector.push_back(scale);
if (gpuAllocator.isCounting())
{
break;
}
}
if (bReverseTraverseScale)
{
std::reverse(scalesVector.begin(), scalesVector.end());
}
//TODO: handle _fair_scale_ flag
for (Ncv32u i=0; i<scalesVector.size(); i++)
{
Ncv32u scale = scalesVector[i];
NcvSize32u srcRoi_, scaledIIRoi, searchRoi;
NcvSize32u srcIIRoi;
srcRoi_.width = d_srcImg.width();
srcRoi_.height = d_srcImg.height();
srcIIRoi.width = srcRoi_.width + 1;
srcIIRoi.height = srcRoi_.height + 1;
scaledIIRoi.width = srcIIRoi.width / scale;
scaledIIRoi.height = srcIIRoi.height / scale;
searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width;
searchRoi.height = scaledIIRoi.height - haar.ClassifierSize.height;
NCV_SKIP_COND_BEGIN
nppStat = nppiStDecimate_32u_C1R(
integral.ptr(), integral.pitch(),
d_scaledIntegralImage.ptr(), d_scaledIntegralImage.pitch(),
srcIIRoi, scale, true);
ncvAssertReturnNcvStat(nppStat);
nppStat = nppiStDecimate_64u_C1R(
d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(),
d_scaledSqIntegralImage.ptr(), d_scaledSqIntegralImage.pitch(),
srcIIRoi, scale, true);
ncvAssertReturnNcvStat(nppStat);
const NcvRect32u rect(
HAAR_STDDEV_BORDER,
HAAR_STDDEV_BORDER,
haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER,
haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER);
nppStat = nppiStRectStdDev_32f_C1R(
d_scaledIntegralImage.ptr(), d_scaledIntegralImage.pitch(),
d_scaledSqIntegralImage.ptr(), d_scaledSqIntegralImage.pitch(),
d_rectStdDev.ptr(), d_rectStdDev.pitch(),
NcvSize32u(searchRoi.width, searchRoi.height), rect,
(Ncv32f)scale*scale, true);
ncvAssertReturnNcvStat(nppStat);
NCV_SKIP_COND_END
Ncv32u detectionsOnThisScale;
ncvStat = ncvApplyHaarClassifierCascade_device(
d_scaledIntegralImage, d_rectStdDev, d_pixelMask,
detectionsOnThisScale,
haar, h_HaarStages, d_HaarStages, d_HaarNodes, d_HaarFeatures, false,
searchRoi, pixelStep, (Ncv32f)scale*scale,
gpuAllocator, cpuAllocator, devProp, cuStream);
ncvAssertReturnNcvStat(nppStat);
NCV_SKIP_COND_BEGIN
NCVVectorReuse<Ncv32u> d_vecPixelMask(d_pixelMask.getSegment());
ncvStat = ncvGrowDetectionsVector_device(
d_vecPixelMask,
detectionsOnThisScale,
d_hypothesesIntermediate,
dstNumRects,
static_cast<Ncv32u>(d_hypothesesIntermediate.length()),
haar.ClassifierSize.width,
haar.ClassifierSize.height,
(Ncv32f)scale,
cuStream);
ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat);
if (flags & NCVPipeObjDet_FindLargestObject)
{
if (dstNumRects == 0)
{
continue;
}
if (dstNumRects != 0)
{
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvStat = d_hypothesesIntermediate.copySolid(h_hypothesesIntermediate, cuStream,
dstNumRects * sizeof(NcvRect32u));
ncvAssertReturnNcvStat(ncvStat);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
Ncv32u numStrongHypothesesNow = dstNumRects;
ncvStat = ncvGroupRectangles_host(
h_hypothesesIntermediate,
numStrongHypothesesNow,
minNeighbors,
RECT_SIMILARITY_PROPORTION,
NULL);
ncvAssertReturnNcvStat(ncvStat);
if (numStrongHypothesesNow > 0)
{
NcvRect32u maxRect = h_hypothesesIntermediate.ptr()[0];
for (Ncv32u j=1; j<numStrongHypothesesNow; j++)
{
if (maxRect.width < h_hypothesesIntermediate.ptr()[j].width)
{
maxRect = h_hypothesesIntermediate.ptr()[j];
}
}
h_hypothesesIntermediate.ptr()[0] = maxRect;
dstNumRects = 1;
ncvStat = h_hypothesesIntermediate.copySolid(d_dstRects, cuStream, sizeof(NcvRect32u));
ncvAssertReturnNcvStat(ncvStat);
bFoundLargestFace = true;
break;
}
}
NCV_SKIP_COND_END
if (gpuAllocator.isCounting())
{
break;
}
}
NCVStatus ncvRetCode = NCV_SUCCESS;
NCV_SKIP_COND_BEGIN
if (flags & NCVPipeObjDet_FindLargestObject)
{
if (!bFoundLargestFace)
{
dstNumRects = 0;
}
}
else
{
//TODO: move hypotheses filtration to GPU pipeline (the only CPU-resident element of the pipeline left)
if (dstNumRects != 0)
{
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvStat = d_hypothesesIntermediate.copySolid(h_hypothesesIntermediate, cuStream,
dstNumRects * sizeof(NcvRect32u));
ncvAssertReturnNcvStat(ncvStat);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
ncvStat = ncvGroupRectangles_host(
h_hypothesesIntermediate,
dstNumRects,
minNeighbors,
RECT_SIMILARITY_PROPORTION,
NULL);
ncvAssertReturnNcvStat(ncvStat);
if (dstNumRects > d_dstRects.length())
{
ncvRetCode = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW;
dstNumRects = static_cast<Ncv32u>(d_dstRects.length());
}
if (dstNumRects != 0)
{
ncvStat = h_hypothesesIntermediate.copySolid(d_dstRects, cuStream,
dstNumRects * sizeof(NcvRect32u));
ncvAssertReturnNcvStat(ncvStat);
}
}
if (flags & NCVPipeObjDet_VisualizeInPlace)
{
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvDrawRects_8u_device(d_srcImg.ptr(), d_srcImg.stride(),
d_srcImg.width(), d_srcImg.height(),
d_dstRects.ptr(), dstNumRects, 255, cuStream);
}
NCV_SKIP_COND_END
return ncvRetCode;
}
//==============================================================================
//
// Purely Host code: classifier IO, mock-ups
//
//==============================================================================
#ifdef _SELF_TEST_
#include <float.h>
#endif
NCVStatus ncvApplyHaarClassifierCascade_host(NCVMatrix<Ncv32u> &h_integralImage,
NCVMatrix<Ncv32f> &h_weights,
NCVMatrixAlloc<Ncv32u> &h_pixelMask,
Ncv32u &numDetections,
HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarClassifierNode128> &h_HaarNodes,
NCVVector<HaarFeature64> &h_HaarFeatures,
NcvBool bMaskElements,
NcvSize32u anchorsRoi,
Ncv32u pixelStep,
Ncv32f scaleArea)
{
ncvAssertReturn(h_integralImage.memType() == h_weights.memType() &&
h_integralImage.memType() == h_pixelMask.memType() &&
(h_integralImage.memType() == NCVMemoryTypeHostPageable ||
h_integralImage.memType() == NCVMemoryTypeHostPinned), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(h_HaarStages.memType() == h_HaarNodes.memType() &&
h_HaarStages.memType() == h_HaarFeatures.memType() &&
(h_HaarStages.memType() == NCVMemoryTypeHostPageable ||
h_HaarStages.memType() == NCVMemoryTypeHostPinned), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(h_integralImage.ptr() != NULL && h_weights.ptr() != NULL && h_pixelMask.ptr() != NULL &&
h_HaarStages.ptr() != NULL && h_HaarNodes.ptr() != NULL && h_HaarFeatures.ptr() != NULL, NCV_NULL_PTR);
ncvAssertReturn(anchorsRoi.width > 0 && anchorsRoi.height > 0 &&
h_pixelMask.width() >= anchorsRoi.width && h_pixelMask.height() >= anchorsRoi.height &&
h_weights.width() >= anchorsRoi.width && h_weights.height() >= anchorsRoi.height &&
h_integralImage.width() >= anchorsRoi.width + haar.ClassifierSize.width &&
h_integralImage.height() >= anchorsRoi.height + haar.ClassifierSize.height, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(scaleArea > 0, NCV_INVALID_SCALE);
ncvAssertReturn(h_HaarStages.length() >= haar.NumStages &&
h_HaarNodes.length() >= haar.NumClassifierTotalNodes &&
h_HaarFeatures.length() >= haar.NumFeatures &&
h_HaarStages.length() == h_HaarStages.length() &&
haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(haar.bNeedsTiltedII == false, NCV_NOIMPL_HAAR_TILTED_FEATURES);
ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP);
Ncv32f scaleAreaPixels = scaleArea * ((haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER) *
(haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER));
for (Ncv32u i=0; i<anchorsRoi.height; i++)
{
for (Ncv32u j=0; j<h_pixelMask.stride(); j++)
{
if (i % pixelStep != 0 || j % pixelStep != 0 || j >= anchorsRoi.width)
{
h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = OBJDET_MASK_ELEMENT_INVALID_32U;
}
else
{
for (Ncv32u iStage = 0; iStage < haar.NumStages; iStage++)
{
Ncv32f curStageSum = 0.0f;
Ncv32u numRootNodesInStage = h_HaarStages.ptr()[iStage].getNumClassifierRootNodes();
Ncv32u curRootNodeOffset = h_HaarStages.ptr()[iStage].getStartClassifierRootNodeOffset();
if (iStage == 0)
{
if (bMaskElements && h_pixelMask.ptr()[i * h_pixelMask.stride() + j] == OBJDET_MASK_ELEMENT_INVALID_32U)
{
break;
}
else
{
h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = ((i << 16) | j);
}
}
else if (h_pixelMask.ptr()[i * h_pixelMask.stride() + j] == OBJDET_MASK_ELEMENT_INVALID_32U)
{
break;
}
while (numRootNodesInStage--)
{
NcvBool bMoreNodesToTraverse = true;
Ncv32u curNodeOffset = curRootNodeOffset;
while (bMoreNodesToTraverse)
{
HaarClassifierNode128 curNode = h_HaarNodes.ptr()[curNodeOffset];
HaarFeatureDescriptor32 curFeatDesc = curNode.getFeatureDesc();
Ncv32u curNodeFeaturesNum = curFeatDesc.getNumFeatures();
Ncv32u curNodeFeaturesOffs = curFeatDesc.getFeaturesOffset();
Ncv32f curNodeVal = 0.f;
for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++)
{
HaarFeature64 feature = h_HaarFeatures.ptr()[curNodeFeaturesOffs + iRect];
Ncv32u rectX, rectY, rectWidth, rectHeight;
feature.getRect(&rectX, &rectY, &rectWidth, &rectHeight);
Ncv32f rectWeight = feature.getWeight();
Ncv32u iioffsTL = (i + rectY) * h_integralImage.stride() + (j + rectX);
Ncv32u iioffsTR = iioffsTL + rectWidth;
Ncv32u iioffsBL = iioffsTL + rectHeight * h_integralImage.stride();
Ncv32u iioffsBR = iioffsBL + rectWidth;
Ncv32u iivalTL = h_integralImage.ptr()[iioffsTL];
Ncv32u iivalTR = h_integralImage.ptr()[iioffsTR];
Ncv32u iivalBL = h_integralImage.ptr()[iioffsBL];
Ncv32u iivalBR = h_integralImage.ptr()[iioffsBR];
Ncv32u rectSum = iivalBR - iivalBL + iivalTL - iivalTR;
curNodeVal += (Ncv32f)rectSum * rectWeight;
}
HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc();
HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc();
Ncv32f nodeThreshold = curNode.getThreshold();
HaarClassifierNodeDescriptor32 nextNodeDescriptor;
NcvBool nextNodeIsLeaf;
if (curNodeVal < scaleAreaPixels * h_weights.ptr()[i * h_weights.stride() + j] * nodeThreshold)
{
nextNodeDescriptor = nodeLeft;
nextNodeIsLeaf = curFeatDesc.isLeftNodeLeaf();
}
else
{
nextNodeDescriptor = nodeRight;
nextNodeIsLeaf = curFeatDesc.isRightNodeLeaf();
}
if (nextNodeIsLeaf)
{
Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValueHost();
curStageSum += tmpLeafValue;
bMoreNodesToTraverse = false;
}
else
{
curNodeOffset = nextNodeDescriptor.getNextNodeOffset();
}
}
curRootNodeOffset++;
}
Ncv32f tmpStageThreshold = h_HaarStages.ptr()[iStage].getStageThreshold();
if (curStageSum < tmpStageThreshold)
{
//drop
h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = OBJDET_MASK_ELEMENT_INVALID_32U;
break;
}
}
}
}
}
std::sort(h_pixelMask.ptr(), h_pixelMask.ptr() + anchorsRoi.height * h_pixelMask.stride());
Ncv32u i = 0;
for (; i<anchorsRoi.height * h_pixelMask.stride(); i++)
{
if (h_pixelMask.ptr()[i] == OBJDET_MASK_ELEMENT_INVALID_32U)
{
break;
}
}
numDetections = i;
return NCV_SUCCESS;
}
NCVStatus ncvGrowDetectionsVector_host(NCVVector<Ncv32u> &pixelMask,
Ncv32u numPixelMaskDetections,
NCVVector<NcvRect32u> &hypotheses,
Ncv32u &totalDetections,
Ncv32u totalMaxDetections,
Ncv32u rectWidth,
Ncv32u rectHeight,
Ncv32f curScale)
{
ncvAssertReturn(pixelMask.ptr() != NULL && hypotheses.ptr() != NULL, NCV_NULL_PTR);
ncvAssertReturn(pixelMask.memType() == hypotheses.memType() &&
pixelMask.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(rectWidth > 0 && rectHeight > 0 && curScale > 0, NCV_INVALID_ROI);
ncvAssertReturn(curScale > 0, NCV_INVALID_SCALE);
ncvAssertReturn(totalMaxDetections <= hypotheses.length() &&
numPixelMaskDetections <= pixelMask.length() &&
totalMaxDetections <= totalMaxDetections, NCV_INCONSISTENT_INPUT);
NCVStatus ncvStat = NCV_SUCCESS;
Ncv32u numDetsToCopy = numPixelMaskDetections;
if (numDetsToCopy == 0)
{
return ncvStat;
}
if (totalDetections + numPixelMaskDetections > totalMaxDetections)
{
ncvStat = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW;
numDetsToCopy = totalMaxDetections - totalDetections;
}
for (Ncv32u i=0; i<numDetsToCopy; i++)
{
hypotheses.ptr()[totalDetections + i] = pixelToRect(pixelMask.ptr()[i], rectWidth, rectHeight, curScale);
}
totalDetections += numDetsToCopy;
return ncvStat;
}
static NCVStatus loadFromXML(const cv::String &filename,
HaarClassifierCascadeDescriptor &haar,
std::vector<HaarStage64> &haarStages,
std::vector<HaarClassifierNode128> &haarClassifierNodes,
std::vector<HaarFeature64> &haarFeatures)
{
#ifndef HAVE_OPENCV_OBJDETECT
(void) filename;
(void) haar;
(void) haarStages;
(void) haarClassifierNodes;
(void) haarFeatures;
CV_Error(cv::Error::StsNotImplemented, "This functionality requires objdetect module");
return NCV_HAAR_XML_LOADING_EXCEPTION;
#else
NCVStatus ncvStat;
haar.NumStages = 0;
haar.NumClassifierRootNodes = 0;
haar.NumClassifierTotalNodes = 0;
haar.NumFeatures = 0;
haar.ClassifierSize.width = 0;
haar.ClassifierSize.height = 0;
haar.bHasStumpsOnly = true;
haar.bNeedsTiltedII = false;
Ncv32u curMaxTreeDepth = 0;
std::vector<HaarClassifierNode128> h_TmpClassifierNotRootNodes;
haarStages.resize(0);
haarClassifierNodes.resize(0);
haarFeatures.resize(0);
cv::Ptr<CvHaarClassifierCascade> oldCascade((CvHaarClassifierCascade*)cvLoad(filename.c_str(), 0, 0, 0));
if (!oldCascade)
{
return NCV_HAAR_XML_LOADING_EXCEPTION;
}
haar.ClassifierSize.width = oldCascade->orig_window_size.width;
haar.ClassifierSize.height = oldCascade->orig_window_size.height;
int stagesCound = oldCascade->count;
for(int s = 0; s < stagesCound; ++s) // by stages
{
HaarStage64 curStage;
curStage.setStartClassifierRootNodeOffset(static_cast<Ncv32u>(haarClassifierNodes.size()));
curStage.setStageThreshold(oldCascade->stage_classifier[s].threshold);
int treesCount = oldCascade->stage_classifier[s].count;
for(int t = 0; t < treesCount; ++t) // by trees
{
Ncv32u nodeId = 0;
CvHaarClassifier* tree = &oldCascade->stage_classifier[s].classifier[t];
int nodesCount = tree->count;
for(int n = 0; n < nodesCount; ++n) //by features
{
CvHaarFeature* feature = &tree->haar_feature[n];
HaarClassifierNode128 curNode;
curNode.setThreshold(tree->threshold[n]);
NcvBool bIsLeftNodeLeaf = false;
NcvBool bIsRightNodeLeaf = false;
HaarClassifierNodeDescriptor32 nodeLeft;
if ( tree->left[n] <= 0 )
{
Ncv32f leftVal = tree->alpha[-tree->left[n]];
ncvStat = nodeLeft.create(leftVal);
ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat);
bIsLeftNodeLeaf = true;
}
else
{
Ncv32u leftNodeOffset = tree->left[n];
nodeLeft.create((Ncv32u)(h_TmpClassifierNotRootNodes.size() + leftNodeOffset - 1));
haar.bHasStumpsOnly = false;
}
curNode.setLeftNodeDesc(nodeLeft);
HaarClassifierNodeDescriptor32 nodeRight;
if ( tree->right[n] <= 0 )
{
Ncv32f rightVal = tree->alpha[-tree->right[n]];
ncvStat = nodeRight.create(rightVal);
ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat);
bIsRightNodeLeaf = true;
}
else
{
Ncv32u rightNodeOffset = tree->right[n];
nodeRight.create((Ncv32u)(h_TmpClassifierNotRootNodes.size() + rightNodeOffset - 1));
haar.bHasStumpsOnly = false;
}
curNode.setRightNodeDesc(nodeRight);
Ncv32u tiltedVal = feature->tilted;
haar.bNeedsTiltedII = (tiltedVal != 0);
Ncv32u featureId = 0;
for(int l = 0; l < CV_HAAR_FEATURE_MAX; ++l) //by rects
{
Ncv32u rectX = feature->rect[l].r.x;
Ncv32u rectY = feature->rect[l].r.y;
Ncv32u rectWidth = feature->rect[l].r.width;
Ncv32u rectHeight = feature->rect[l].r.height;
Ncv32f rectWeight = feature->rect[l].weight;
if (rectWeight == 0/* && rectX == 0 &&rectY == 0 && rectWidth == 0 && rectHeight == 0*/)
break;
HaarFeature64 curFeature;
ncvStat = curFeature.setRect(rectX, rectY, rectWidth, rectHeight, haar.ClassifierSize.width, haar.ClassifierSize.height);
curFeature.setWeight(rectWeight);
ncvAssertReturn(NCV_SUCCESS == ncvStat, ncvStat);
haarFeatures.push_back(curFeature);
featureId++;
}
HaarFeatureDescriptor32 tmpFeatureDesc;
ncvStat = tmpFeatureDesc.create(haar.bNeedsTiltedII, bIsLeftNodeLeaf, bIsRightNodeLeaf,
featureId, static_cast<Ncv32u>(haarFeatures.size()) - featureId);
ncvAssertReturn(NCV_SUCCESS == ncvStat, ncvStat);
curNode.setFeatureDesc(tmpFeatureDesc);
if (!nodeId)
{
//root node
haarClassifierNodes.push_back(curNode);
curMaxTreeDepth = 1;
}
else
{
//other node
h_TmpClassifierNotRootNodes.push_back(curNode);
curMaxTreeDepth++;
}
nodeId++;
}
}
curStage.setNumClassifierRootNodes(treesCount);
haarStages.push_back(curStage);
}
//fill in cascade stats
haar.NumStages = static_cast<Ncv32u>(haarStages.size());
haar.NumClassifierRootNodes = static_cast<Ncv32u>(haarClassifierNodes.size());
haar.NumClassifierTotalNodes = static_cast<Ncv32u>(haar.NumClassifierRootNodes + h_TmpClassifierNotRootNodes.size());
haar.NumFeatures = static_cast<Ncv32u>(haarFeatures.size());
//merge root and leaf nodes in one classifiers array
Ncv32u offsetRoot = static_cast<Ncv32u>(haarClassifierNodes.size());
for (Ncv32u i=0; i<haarClassifierNodes.size(); i++)
{
HaarFeatureDescriptor32 featureDesc = haarClassifierNodes[i].getFeatureDesc();
HaarClassifierNodeDescriptor32 nodeLeft = haarClassifierNodes[i].getLeftNodeDesc();
if (!featureDesc.isLeftNodeLeaf())
{
Ncv32u newOffset = nodeLeft.getNextNodeOffset() + offsetRoot;
nodeLeft.create(newOffset);
}
haarClassifierNodes[i].setLeftNodeDesc(nodeLeft);
HaarClassifierNodeDescriptor32 nodeRight = haarClassifierNodes[i].getRightNodeDesc();
if (!featureDesc.isRightNodeLeaf())
{
Ncv32u newOffset = nodeRight.getNextNodeOffset() + offsetRoot;
nodeRight.create(newOffset);
}
haarClassifierNodes[i].setRightNodeDesc(nodeRight);
}
for (Ncv32u i=0; i<h_TmpClassifierNotRootNodes.size(); i++)
{
HaarFeatureDescriptor32 featureDesc = h_TmpClassifierNotRootNodes[i].getFeatureDesc();
HaarClassifierNodeDescriptor32 nodeLeft = h_TmpClassifierNotRootNodes[i].getLeftNodeDesc();
if (!featureDesc.isLeftNodeLeaf())
{
Ncv32u newOffset = nodeLeft.getNextNodeOffset() + offsetRoot;
nodeLeft.create(newOffset);
}
h_TmpClassifierNotRootNodes[i].setLeftNodeDesc(nodeLeft);
HaarClassifierNodeDescriptor32 nodeRight = h_TmpClassifierNotRootNodes[i].getRightNodeDesc();
if (!featureDesc.isRightNodeLeaf())
{
Ncv32u newOffset = nodeRight.getNextNodeOffset() + offsetRoot;
nodeRight.create(newOffset);
}
h_TmpClassifierNotRootNodes[i].setRightNodeDesc(nodeRight);
haarClassifierNodes.push_back(h_TmpClassifierNotRootNodes[i]);
}
return NCV_SUCCESS;
#endif
}
#define NVBIN_HAAR_SIZERESERVED 16
#define NVBIN_HAAR_VERSION 0x1
static NCVStatus loadFromNVBIN(const cv::String &filename,
HaarClassifierCascadeDescriptor &haar,
std::vector<HaarStage64> &haarStages,
std::vector<HaarClassifierNode128> &haarClassifierNodes,
std::vector<HaarFeature64> &haarFeatures)
{
size_t readCount;
FILE *fp = fopen(filename.c_str(), "rb");
ncvAssertReturn(fp != NULL, NCV_FILE_ERROR);
Ncv32u fileVersion;
readCount = fread(&fileVersion, sizeof(Ncv32u), 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
ncvAssertReturn(fileVersion == NVBIN_HAAR_VERSION, NCV_FILE_ERROR);
Ncv32u fsize;
readCount = fread(&fsize, sizeof(Ncv32u), 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
fseek(fp, 0, SEEK_END);
Ncv32u fsizeActual = ftell(fp);
ncvAssertReturn(fsize == fsizeActual, NCV_FILE_ERROR);
std::vector<unsigned char> fdata;
fdata.resize(fsize);
Ncv32u dataOffset = 0;
fseek(fp, 0, SEEK_SET);
readCount = fread(&fdata[0], fsize, 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
fclose(fp);
//data
dataOffset = NVBIN_HAAR_SIZERESERVED;
haar.NumStages = *(Ncv32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(Ncv32u);
haar.NumClassifierRootNodes = *(Ncv32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(Ncv32u);
haar.NumClassifierTotalNodes = *(Ncv32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(Ncv32u);
haar.NumFeatures = *(Ncv32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(Ncv32u);
haar.ClassifierSize = *(NcvSize32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(NcvSize32u);
haar.bNeedsTiltedII = *(NcvBool *)(&fdata[0]+dataOffset);
dataOffset += sizeof(NcvBool);
haar.bHasStumpsOnly = *(NcvBool *)(&fdata[0]+dataOffset);
dataOffset += sizeof(NcvBool);
haarStages.resize(haar.NumStages);
haarClassifierNodes.resize(haar.NumClassifierTotalNodes);
haarFeatures.resize(haar.NumFeatures);
Ncv32u szStages = haar.NumStages * sizeof(HaarStage64);
Ncv32u szClassifiers = haar.NumClassifierTotalNodes * sizeof(HaarClassifierNode128);
Ncv32u szFeatures = haar.NumFeatures * sizeof(HaarFeature64);
memcpy(&haarStages[0], &fdata[0]+dataOffset, szStages);
dataOffset += szStages;
memcpy(&haarClassifierNodes[0], &fdata[0]+dataOffset, szClassifiers);
dataOffset += szClassifiers;
memcpy(&haarFeatures[0], &fdata[0]+dataOffset, szFeatures);
dataOffset += szFeatures;
return NCV_SUCCESS;
}
NCVStatus ncvHaarGetClassifierSize(const cv::String &filename, Ncv32u &numStages,
Ncv32u &numNodes, Ncv32u &numFeatures)
{
size_t readCount;
NCVStatus ncvStat;
cv::String fext = filename.substr(filename.find_last_of(".") + 1);
fext = fext.toLowerCase();
if (fext == "nvbin")
{
FILE *fp = fopen(filename.c_str(), "rb");
ncvAssertReturn(fp != NULL, NCV_FILE_ERROR);
Ncv32u fileVersion;
readCount = fread(&fileVersion, sizeof(Ncv32u), 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
ncvAssertReturn(fileVersion == NVBIN_HAAR_VERSION, NCV_FILE_ERROR);
fseek(fp, NVBIN_HAAR_SIZERESERVED, SEEK_SET);
Ncv32u tmp;
readCount = fread(&numStages, sizeof(Ncv32u), 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
readCount = fread(&tmp, sizeof(Ncv32u), 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
readCount = fread(&numNodes, sizeof(Ncv32u), 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
readCount = fread(&numFeatures, sizeof(Ncv32u), 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
fclose(fp);
}
else if (fext == "xml")
{
HaarClassifierCascadeDescriptor haar;
std::vector<HaarStage64> haarStages;
std::vector<HaarClassifierNode128> haarNodes;
std::vector<HaarFeature64> haarFeatures;
ncvStat = loadFromXML(filename, haar, haarStages, haarNodes, haarFeatures);
ncvAssertReturnNcvStat(ncvStat);
numStages = haar.NumStages;
numNodes = haar.NumClassifierTotalNodes;
numFeatures = haar.NumFeatures;
}
else
{
return NCV_HAAR_XML_LOADING_EXCEPTION;
}
return NCV_SUCCESS;
}
NCVStatus ncvHaarLoadFromFile_host(const cv::String &filename,
HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarClassifierNode128> &h_HaarNodes,
NCVVector<HaarFeature64> &h_HaarFeatures)
{
ncvAssertReturn(h_HaarStages.memType() == NCVMemoryTypeHostPinned &&
h_HaarNodes.memType() == NCVMemoryTypeHostPinned &&
h_HaarFeatures.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
NCVStatus ncvStat;
cv::String fext = filename.substr(filename.find_last_of(".") + 1);
fext = fext.toLowerCase();
std::vector<HaarStage64> haarStages;
std::vector<HaarClassifierNode128> haarNodes;
std::vector<HaarFeature64> haarFeatures;
if (fext == "nvbin")
{
ncvStat = loadFromNVBIN(filename, haar, haarStages, haarNodes, haarFeatures);
ncvAssertReturnNcvStat(ncvStat);
}
else if (fext == "xml")
{
ncvStat = loadFromXML(filename, haar, haarStages, haarNodes, haarFeatures);
ncvAssertReturnNcvStat(ncvStat);
}
else
{
return NCV_HAAR_XML_LOADING_EXCEPTION;
}
ncvAssertReturn(h_HaarStages.length() >= haarStages.size(), NCV_MEM_INSUFFICIENT_CAPACITY);
ncvAssertReturn(h_HaarNodes.length() >= haarNodes.size(), NCV_MEM_INSUFFICIENT_CAPACITY);
ncvAssertReturn(h_HaarFeatures.length() >= haarFeatures.size(), NCV_MEM_INSUFFICIENT_CAPACITY);
memcpy(h_HaarStages.ptr(), &haarStages[0], haarStages.size()*sizeof(HaarStage64));
memcpy(h_HaarNodes.ptr(), &haarNodes[0], haarNodes.size()*sizeof(HaarClassifierNode128));
memcpy(h_HaarFeatures.ptr(), &haarFeatures[0], haarFeatures.size()*sizeof(HaarFeature64));
return NCV_SUCCESS;
}
NCVStatus ncvHaarStoreNVBIN_host(const cv::String &filename,
HaarClassifierCascadeDescriptor haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarClassifierNode128> &h_HaarNodes,
NCVVector<HaarFeature64> &h_HaarFeatures)
{
ncvAssertReturn(h_HaarStages.length() >= haar.NumStages, NCV_INCONSISTENT_INPUT);
ncvAssertReturn(h_HaarNodes.length() >= haar.NumClassifierTotalNodes, NCV_INCONSISTENT_INPUT);
ncvAssertReturn(h_HaarFeatures.length() >= haar.NumFeatures, NCV_INCONSISTENT_INPUT);
ncvAssertReturn(h_HaarStages.memType() == NCVMemoryTypeHostPinned &&
h_HaarNodes.memType() == NCVMemoryTypeHostPinned &&
h_HaarFeatures.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
Ncv32u szStages = haar.NumStages * sizeof(HaarStage64);
Ncv32u szClassifiers = haar.NumClassifierTotalNodes * sizeof(HaarClassifierNode128);
Ncv32u szFeatures = haar.NumFeatures * sizeof(HaarFeature64);
Ncv32u dataOffset = 0;
std::vector<unsigned char> fdata;
fdata.resize(szStages+szClassifiers+szFeatures+1024, 0);
//header
*(Ncv32u *)(&fdata[0]+dataOffset) = NVBIN_HAAR_VERSION;
//data
dataOffset = NVBIN_HAAR_SIZERESERVED;
*(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumStages;
dataOffset += sizeof(Ncv32u);
*(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumClassifierRootNodes;
dataOffset += sizeof(Ncv32u);
*(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumClassifierTotalNodes;
dataOffset += sizeof(Ncv32u);
*(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumFeatures;
dataOffset += sizeof(Ncv32u);
*(NcvSize32u *)(&fdata[0]+dataOffset) = haar.ClassifierSize;
dataOffset += sizeof(NcvSize32u);
*(NcvBool *)(&fdata[0]+dataOffset) = haar.bNeedsTiltedII;
dataOffset += sizeof(NcvBool);
*(NcvBool *)(&fdata[0]+dataOffset) = haar.bHasStumpsOnly;
dataOffset += sizeof(NcvBool);
memcpy(&fdata[0]+dataOffset, h_HaarStages.ptr(), szStages);
dataOffset += szStages;
memcpy(&fdata[0]+dataOffset, h_HaarNodes.ptr(), szClassifiers);
dataOffset += szClassifiers;
memcpy(&fdata[0]+dataOffset, h_HaarFeatures.ptr(), szFeatures);
dataOffset += szFeatures;
Ncv32u fsize = dataOffset;
//TODO: CRC32 here
//update header
dataOffset = sizeof(Ncv32u);
*(Ncv32u *)(&fdata[0]+dataOffset) = fsize;
FILE *fp = fopen(filename.c_str(), "wb");
ncvAssertReturn(fp != NULL, NCV_FILE_ERROR);
fwrite(&fdata[0], fsize, 1, fp);
fclose(fp);
return NCV_SUCCESS;
}
|
73d99c0fecd67975a89cd55180edf5ca0e21c46c.hip | // !!! This is a file automatically generated by hipify!!!
#include "SimplePursuitMultiplier.h"
#include <hip/hip_runtime.h>
#include "OpenSteer/VehicleData.h"
#include "CUDAKernelOptions.cu"
#include <iostream>
using namespace OpenSteer;
using namespace std;
__global__ void
simplePursuitMultiplierKernel(float3 position, float3 velocity, float3 *positions, float3 *velocities);
OpenSteer::SimplePursuitMultiplier::SimplePursuitMultiplier()
{
d_pursuitVelocity = NULL;
d_pursuitPosition = NULL;
threadsPerBlock = 128;
change = true;
newPosition = make_float3(0, 0, 0);
newVelocity = make_float3(0, 0, 0);
}
OpenSteer::SimplePursuitMultiplier::~SimplePursuitMultiplier() {}
void OpenSteer::SimplePursuitMultiplier::init()
{
// device memory for position data
mem_size_position = getNumberOfAgents()*sizeof(float3);
hipError_t retval = hipMalloc((void **)&d_pursuitPosition, mem_size_position);
if (retval != hipSuccess)
cout << "Error while allocating d_pursuitPosition memory: " << hipGetErrorString(retval) << endl;
// device memory for velocity data
mem_size_velocity = getNumberOfAgents()*sizeof(float3);
retval = hipMalloc((void **)&d_pursuitVelocity, mem_size_velocity);
if (retval != hipSuccess)
cout << "Error while allocating d_pursuitVelocity memory: " << hipGetErrorString(retval) << endl;
}
void OpenSteer::SimplePursuitMultiplier::run()
{
if (change) {
hipLaunchKernelGGL(( simplePursuitMultiplierKernel), dim3(gridDim()), dim3(blockDim()), 0, 0, newPosition, newVelocity, d_pursuitPosition, d_pursuitVelocity);
change = false;
}
}
void OpenSteer::SimplePursuitMultiplier::close()
{
if (d_pursuitVelocity != NULL) {
hipFree(d_pursuitVelocity);
d_pursuitVelocity = NULL;
}
if (d_pursuitPosition != NULL) {
hipFree(d_pursuitPosition);
d_pursuitPosition = NULL;
}
}
void OpenSteer::SimplePursuitMultiplier::setNewPosition(float3 newPosition)
{
this->newPosition = newPosition;
change = true;
}
void OpenSteer::SimplePursuitMultiplier::setNewVelocity(float3 newVelocity)
{
this->newVelocity= newVelocity;
change = true;
} | 73d99c0fecd67975a89cd55180edf5ca0e21c46c.cu | #include "SimplePursuitMultiplier.h"
#include <cuda_runtime.h>
#include "OpenSteer/VehicleData.h"
#include "CUDAKernelOptions.cu"
#include <iostream>
using namespace OpenSteer;
using namespace std;
__global__ void
simplePursuitMultiplierKernel(float3 position, float3 velocity, float3 *positions, float3 *velocities);
OpenSteer::SimplePursuitMultiplier::SimplePursuitMultiplier()
{
d_pursuitVelocity = NULL;
d_pursuitPosition = NULL;
threadsPerBlock = 128;
change = true;
newPosition = make_float3(0, 0, 0);
newVelocity = make_float3(0, 0, 0);
}
OpenSteer::SimplePursuitMultiplier::~SimplePursuitMultiplier() {}
void OpenSteer::SimplePursuitMultiplier::init()
{
// device memory for position data
mem_size_position = getNumberOfAgents()*sizeof(float3);
cudaError_t retval = cudaMalloc((void **)&d_pursuitPosition, mem_size_position);
if (retval != cudaSuccess)
cout << "Error while allocating d_pursuitPosition memory: " << cudaGetErrorString(retval) << endl;
// device memory for velocity data
mem_size_velocity = getNumberOfAgents()*sizeof(float3);
retval = cudaMalloc((void **)&d_pursuitVelocity, mem_size_velocity);
if (retval != cudaSuccess)
cout << "Error while allocating d_pursuitVelocity memory: " << cudaGetErrorString(retval) << endl;
}
void OpenSteer::SimplePursuitMultiplier::run()
{
if (change) {
simplePursuitMultiplierKernel<<<gridDim(), blockDim()>>>(newPosition, newVelocity, d_pursuitPosition, d_pursuitVelocity);
change = false;
}
}
void OpenSteer::SimplePursuitMultiplier::close()
{
if (d_pursuitVelocity != NULL) {
cudaFree(d_pursuitVelocity);
d_pursuitVelocity = NULL;
}
if (d_pursuitPosition != NULL) {
cudaFree(d_pursuitPosition);
d_pursuitPosition = NULL;
}
}
void OpenSteer::SimplePursuitMultiplier::setNewPosition(float3 newPosition)
{
this->newPosition = newPosition;
change = true;
}
void OpenSteer::SimplePursuitMultiplier::setNewVelocity(float3 newVelocity)
{
this->newVelocity= newVelocity;
change = true;
} |
1f1320d13dc9bc6e8e1c9a53a68f3682177a1be7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "GPU_Math_Func.h"
void gpu_gemm(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(GPUEnv::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
void gpu_gemv(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(GPUEnv::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
void gpu_axpy(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(GPUEnv::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
void gpu_scal(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(GPUEnv::cublas_handle(), N, &alpha, X, 1));
}
void gpu_dot(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(GPUEnv::cublas_handle(), n, x, 1, y, 1, out));
}
void gpu_scale(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(GPUEnv::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(GPUEnv::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
void gpu_set(const int N, const double alpha, double* Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(double) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<double>), dim3(GET_BLOCKS(N)), dim3(CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
void gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(GET_BLOCKS(N)), dim3(CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <typename Dtype>
__global__ void selfAdd_kernel(const int n, const Dtype* a,
Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += a[index];
}
}
void gpu_add(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double>), dim3(GET_BLOCKS(N)), dim3(CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
void gpu_selfAdd(const int N, const double* a, const double* b, double* y){
hipLaunchKernelGGL(( selfAdd_kernel<double>), dim3(GET_BLOCKS(N)), dim3(CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
void gpu_sub(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(GET_BLOCKS(N)), dim3(CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
void gpu_mul(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(GET_BLOCKS(N)), dim3(CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
void gpu_abs(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double>), dim3(GET_BLOCKS(N)), dim3(CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
#if 0
void gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(hiprandGenerate(curand_generator(), r, n));
}
void gpu_rng_uniform(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(hiprandGenerateUniformDouble(curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
gpu_add_scalar(n, a, r);
}
}
void gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
hiprandGenerateNormalDouble(GPUEnv::curand_generator(), r, n, mu, sigma));
}
#endif
| 1f1320d13dc9bc6e8e1c9a53a68f3682177a1be7.cu | #include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "GPU_Math_Func.h"
void gpu_gemm(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(GPUEnv::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
void gpu_gemv(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(GPUEnv::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
void gpu_axpy(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(GPUEnv::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
void gpu_scal(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(GPUEnv::cublas_handle(), N, &alpha, X, 1));
}
void gpu_dot(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(GPUEnv::cublas_handle(), n, x, 1, y, 1, out));
}
void gpu_scale(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(GPUEnv::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(GPUEnv::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
void gpu_set(const int N, const double alpha, double* Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(double) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<double><<<GET_BLOCKS(N), CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
void gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double><<<GET_BLOCKS(N), CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <typename Dtype>
__global__ void selfAdd_kernel(const int n, const Dtype* a,
Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += a[index];
}
}
void gpu_add(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double><<<GET_BLOCKS(N), CUDA_NUM_THREADS>>>(
N, a, b, y);
}
void gpu_selfAdd(const int N, const double* a, const double* b, double* y){
selfAdd_kernel<double><<<GET_BLOCKS(N), CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
void gpu_sub(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<GET_BLOCKS(N), CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
void gpu_mul(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<GET_BLOCKS(N), CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
void gpu_abs(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double><<<GET_BLOCKS(N), CUDA_NUM_THREADS>>>(
N, a, y);
}
#if 0
void gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(curandGenerate(curand_generator(), r, n));
}
void gpu_rng_uniform(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(curandGenerateUniformDouble(curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
gpu_add_scalar(n, a, r);
}
}
void gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
curandGenerateNormalDouble(GPUEnv::curand_generator(), r, n, mu, sigma));
}
#endif
|
dc9f95e886de3e361e53c5b66d8b96ff6618d88c.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <hip/hip_runtime.h>
#include "unary_elementwise_ops_impl.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cu_inc/unary_elementwise_impl.cuh"
namespace onnxruntime {
namespace cuda {
#define OP(name, expr) \
template <typename T> \
struct OP_##name { \
__device__ __inline__ T operator()(const T& a) const { \
return expr; \
} \
};
#define UNARY_ELEMENTWISE_IMPL(name) \
UNARY_ELEMENTWISE_IMPL_DECLARATION(name) { \
UnaryElementWiseImpl(input_data, \
output_data, \
OP_##name<T>(), \
count); \
}
#define SPECIALIZED_UNARY_ELEMENTWISE_IMPL(name, T) \
template void Impl_##name<T>(const T* input_data, T* output_data, size_t count);
#define UNARY_OP_NAME_EXPR(name, expr) \
OP(name, expr) \
UNARY_ELEMENTWISE_IMPL(name)
UNARY_OPS()
#undef UNARY_OP_NAME_EXPR
// the postfix of means the types supported by the op:
// B: uint8_t
// W: uint16_t
// U: uint32_t
// Z: uint64_t
// C: int8_t
// S: int16_t
// I: int32_t
// L: int64_t
// H: float16
// F: float
// D: double
// O: bool
#define SPECIALIZED_UNARY_ELEMENTWISE_IMPL_HFD(name) \
SPECIALIZED_UNARY_ELEMENTWISE_IMPL(name, half) \
SPECIALIZED_UNARY_ELEMENTWISE_IMPL(name, float) \
SPECIALIZED_UNARY_ELEMENTWISE_IMPL(name, double)
#define SPECIALIZED_UNARY_ELEMENTWISE_IMPL_CSILHFD(name) \
SPECIALIZED_UNARY_ELEMENTWISE_IMPL(name, int8_t) \
SPECIALIZED_UNARY_ELEMENTWISE_IMPL(name, int16_t) \
SPECIALIZED_UNARY_ELEMENTWISE_IMPL(name, int32_t) \
SPECIALIZED_UNARY_ELEMENTWISE_IMPL(name, int64_t) \
SPECIALIZED_UNARY_ELEMENTWISE_IMPL_HFD(name)
#define SPECIALIZED_UNARY_ELEMENTWISE_IMPL_BWUZCSILHFD(name) \
SPECIALIZED_UNARY_ELEMENTWISE_IMPL(name, uint8_t) \
SPECIALIZED_UNARY_ELEMENTWISE_IMPL(name, uint16_t) \
SPECIALIZED_UNARY_ELEMENTWISE_IMPL(name, uint32_t) \
SPECIALIZED_UNARY_ELEMENTWISE_IMPL(name, uint64_t) \
SPECIALIZED_UNARY_ELEMENTWISE_IMPL_CSILHFD(name)
SPECIALIZED_UNARY_ELEMENTWISE_IMPL_BWUZCSILHFD(Abs)
SPECIALIZED_UNARY_ELEMENTWISE_IMPL_CSILHFD(Neg)
SPECIALIZED_UNARY_ELEMENTWISE_IMPL_HFD(Floor)
SPECIALIZED_UNARY_ELEMENTWISE_IMPL_HFD(Ceil)
SPECIALIZED_UNARY_ELEMENTWISE_IMPL_HFD(Reciprocal)
SPECIALIZED_UNARY_ELEMENTWISE_IMPL_HFD(Sqrt)
SPECIALIZED_UNARY_ELEMENTWISE_IMPL_HFD(Log)
SPECIALIZED_UNARY_ELEMENTWISE_IMPL_HFD(Exp)
SPECIALIZED_UNARY_ELEMENTWISE_IMPL_HFD(Erf)
SPECIALIZED_UNARY_ELEMENTWISE_IMPL(Not, bool)
// When casting, half needs to be converted via float type from most other types
template <typename T>
struct ViaTypeMap {
typedef T ViaT;
};
template <>
struct ViaTypeMap<half> {
typedef float ViaT;
};
template <typename InT, typename OutT>
struct OP_Cast {
__device__ __inline__ OutT operator()(const InT& a) const {
const bool any_float16 = std::is_same<half, InT>::value || std::is_same<half, OutT>::value;
typedef typename std::conditional<any_float16, half, OutT>::type T;
typedef typename ViaTypeMap<T>::ViaT ViaT;
return (OutT)((ViaT)a);
}
};
template <typename InT, typename OutT>
void Impl_Cast(
const InT* input_data,
OutT* output_data,
size_t count) {
UnaryElementWiseImpl(input_data,
output_data,
OP_Cast<InT, OutT>(),
count);
}
#define SPECIALIZED_CAST_IMPL2(InT, OutT) \
template void Impl_Cast<InT, OutT>(const InT* input_data, OutT* output_data, size_t count);
#define SPECIALIZED_CAST_FROM(T) \
SPECIALIZED_CAST_IMPL2(T, half) \
SPECIALIZED_CAST_IMPL2(T, float) \
SPECIALIZED_CAST_IMPL2(T, double) \
SPECIALIZED_CAST_IMPL2(T, int8_t) \
SPECIALIZED_CAST_IMPL2(T, int16_t) \
SPECIALIZED_CAST_IMPL2(T, int32_t) \
SPECIALIZED_CAST_IMPL2(T, int64_t) \
SPECIALIZED_CAST_IMPL2(T, uint8_t) \
SPECIALIZED_CAST_IMPL2(T, uint16_t) \
SPECIALIZED_CAST_IMPL2(T, uint32_t) \
SPECIALIZED_CAST_IMPL2(T, uint64_t) \
SPECIALIZED_CAST_IMPL2(T, bool)
SPECIALIZED_CAST_FROM(half)
SPECIALIZED_CAST_FROM(float)
SPECIALIZED_CAST_FROM(double)
SPECIALIZED_CAST_FROM(int8_t)
SPECIALIZED_CAST_FROM(int16_t)
SPECIALIZED_CAST_FROM(int32_t)
SPECIALIZED_CAST_FROM(int64_t)
SPECIALIZED_CAST_FROM(uint8_t)
SPECIALIZED_CAST_FROM(uint16_t)
SPECIALIZED_CAST_FROM(uint32_t)
SPECIALIZED_CAST_FROM(uint64_t)
SPECIALIZED_CAST_FROM(bool)
} // namespace cuda
} // namespace onnxruntime
| dc9f95e886de3e361e53c5b66d8b96ff6618d88c.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <cuda_runtime.h>
#include "unary_elementwise_ops_impl.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cu_inc/unary_elementwise_impl.cuh"
namespace onnxruntime {
namespace cuda {
#define OP(name, expr) \
template <typename T> \
struct OP_##name { \
__device__ __inline__ T operator()(const T& a) const { \
return expr; \
} \
};
#define UNARY_ELEMENTWISE_IMPL(name) \
UNARY_ELEMENTWISE_IMPL_DECLARATION(name) { \
UnaryElementWiseImpl(input_data, \
output_data, \
OP_##name<T>(), \
count); \
}
#define SPECIALIZED_UNARY_ELEMENTWISE_IMPL(name, T) \
template void Impl_##name<T>(const T* input_data, T* output_data, size_t count);
#define UNARY_OP_NAME_EXPR(name, expr) \
OP(name, expr) \
UNARY_ELEMENTWISE_IMPL(name)
UNARY_OPS()
#undef UNARY_OP_NAME_EXPR
// the postfix of means the types supported by the op:
// B: uint8_t
// W: uint16_t
// U: uint32_t
// Z: uint64_t
// C: int8_t
// S: int16_t
// I: int32_t
// L: int64_t
// H: float16
// F: float
// D: double
// O: bool
#define SPECIALIZED_UNARY_ELEMENTWISE_IMPL_HFD(name) \
SPECIALIZED_UNARY_ELEMENTWISE_IMPL(name, half) \
SPECIALIZED_UNARY_ELEMENTWISE_IMPL(name, float) \
SPECIALIZED_UNARY_ELEMENTWISE_IMPL(name, double)
#define SPECIALIZED_UNARY_ELEMENTWISE_IMPL_CSILHFD(name) \
SPECIALIZED_UNARY_ELEMENTWISE_IMPL(name, int8_t) \
SPECIALIZED_UNARY_ELEMENTWISE_IMPL(name, int16_t) \
SPECIALIZED_UNARY_ELEMENTWISE_IMPL(name, int32_t) \
SPECIALIZED_UNARY_ELEMENTWISE_IMPL(name, int64_t) \
SPECIALIZED_UNARY_ELEMENTWISE_IMPL_HFD(name)
#define SPECIALIZED_UNARY_ELEMENTWISE_IMPL_BWUZCSILHFD(name) \
SPECIALIZED_UNARY_ELEMENTWISE_IMPL(name, uint8_t) \
SPECIALIZED_UNARY_ELEMENTWISE_IMPL(name, uint16_t) \
SPECIALIZED_UNARY_ELEMENTWISE_IMPL(name, uint32_t) \
SPECIALIZED_UNARY_ELEMENTWISE_IMPL(name, uint64_t) \
SPECIALIZED_UNARY_ELEMENTWISE_IMPL_CSILHFD(name)
SPECIALIZED_UNARY_ELEMENTWISE_IMPL_BWUZCSILHFD(Abs)
SPECIALIZED_UNARY_ELEMENTWISE_IMPL_CSILHFD(Neg)
SPECIALIZED_UNARY_ELEMENTWISE_IMPL_HFD(Floor)
SPECIALIZED_UNARY_ELEMENTWISE_IMPL_HFD(Ceil)
SPECIALIZED_UNARY_ELEMENTWISE_IMPL_HFD(Reciprocal)
SPECIALIZED_UNARY_ELEMENTWISE_IMPL_HFD(Sqrt)
SPECIALIZED_UNARY_ELEMENTWISE_IMPL_HFD(Log)
SPECIALIZED_UNARY_ELEMENTWISE_IMPL_HFD(Exp)
SPECIALIZED_UNARY_ELEMENTWISE_IMPL_HFD(Erf)
SPECIALIZED_UNARY_ELEMENTWISE_IMPL(Not, bool)
// When casting, half needs to be converted via float type from most other types
template <typename T>
struct ViaTypeMap {
typedef T ViaT;
};
template <>
struct ViaTypeMap<half> {
typedef float ViaT;
};
template <typename InT, typename OutT>
struct OP_Cast {
__device__ __inline__ OutT operator()(const InT& a) const {
const bool any_float16 = std::is_same<half, InT>::value || std::is_same<half, OutT>::value;
typedef typename std::conditional<any_float16, half, OutT>::type T;
typedef typename ViaTypeMap<T>::ViaT ViaT;
return (OutT)((ViaT)a);
}
};
template <typename InT, typename OutT>
void Impl_Cast(
const InT* input_data,
OutT* output_data,
size_t count) {
UnaryElementWiseImpl(input_data,
output_data,
OP_Cast<InT, OutT>(),
count);
}
#define SPECIALIZED_CAST_IMPL2(InT, OutT) \
template void Impl_Cast<InT, OutT>(const InT* input_data, OutT* output_data, size_t count);
#define SPECIALIZED_CAST_FROM(T) \
SPECIALIZED_CAST_IMPL2(T, half) \
SPECIALIZED_CAST_IMPL2(T, float) \
SPECIALIZED_CAST_IMPL2(T, double) \
SPECIALIZED_CAST_IMPL2(T, int8_t) \
SPECIALIZED_CAST_IMPL2(T, int16_t) \
SPECIALIZED_CAST_IMPL2(T, int32_t) \
SPECIALIZED_CAST_IMPL2(T, int64_t) \
SPECIALIZED_CAST_IMPL2(T, uint8_t) \
SPECIALIZED_CAST_IMPL2(T, uint16_t) \
SPECIALIZED_CAST_IMPL2(T, uint32_t) \
SPECIALIZED_CAST_IMPL2(T, uint64_t) \
SPECIALIZED_CAST_IMPL2(T, bool)
SPECIALIZED_CAST_FROM(half)
SPECIALIZED_CAST_FROM(float)
SPECIALIZED_CAST_FROM(double)
SPECIALIZED_CAST_FROM(int8_t)
SPECIALIZED_CAST_FROM(int16_t)
SPECIALIZED_CAST_FROM(int32_t)
SPECIALIZED_CAST_FROM(int64_t)
SPECIALIZED_CAST_FROM(uint8_t)
SPECIALIZED_CAST_FROM(uint16_t)
SPECIALIZED_CAST_FROM(uint32_t)
SPECIALIZED_CAST_FROM(uint64_t)
SPECIALIZED_CAST_FROM(bool)
} // namespace cuda
} // namespace onnxruntime
|
3147817946a6fec05536427adacf5cd9a915bb02.hip | // !!! This is a file automatically generated by hipify!!!
#include <assert.h>
#include <err.h>
#include <errno.h>
#include <fcntl.h>
#include <functional>
#include <math.h>
#include <memory>
#include <random>
#include <stdint.h>
#include <stdio.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <time.h>
#include <unistd.h>
#include <cblas.h>
#include <algorithm>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <cudnn.h>
using namespace std;
#ifndef MAP_FILE
#define MAP_FILE MAP_SHARED
#endif
int fsize(int fd) {
struct stat stat;
int res = fstat(fd, &stat);
return stat.st_size;
}
int printll(char *s) {
while (*s != '\n' && *s != ',' && *s != '\t') {
putchar(*s++);
}
return 0;
}
long hash(char *str0, int len) {
unsigned char *str = (unsigned char *)str0;
unsigned long hash = 5381;
int c;
while ((c = *str++) && len--)
hash = ((hash << 5) + hash) + c; /* hash * 33 + c */
return hash;
}
long HEAP_SIZE_CPU = 1073741826; // 1048576; // 536870912; // 268435456; // 2097152; 1610612739; // 4294967304; //
void *mallocBase = calloc(HEAP_SIZE_CPU, 1);
void *mallocAddr = mallocBase;
void *waterMark = mallocBase;
void *myMalloc(size_t bytes) {
void *res = mallocAddr;
mallocAddr = (void *)((char *)mallocAddr + bytes);
if ((long)mallocAddr >= (long)mallocBase + HEAP_SIZE_CPU)
fprintf(stderr, "CPU memory breached limit of HEAP_SIZE_CPU\n");
return res;
}
long HEAP_SIZE = 4294967304; // this is for GPU
int timeval_subtract(struct timeval *result, struct timeval *t2, struct timeval *t1) {
long int diff = (t2->tv_usec + 1000000 * t2->tv_sec) - (t1->tv_usec + 1000000 * t1->tv_sec);
result->tv_sec = diff / 1000000;
result->tv_usec = diff % 1000000;
return (diff < 0);
}
#define CUDA_CALL(f) { \
hipError_t err = (f); \
if (err != hipSuccess) { \
fprintf(stderr, "CUDA error occurred: %s (%s:%d)\n", \
hipGetErrorString(err), __FILE__, __LINE__); \
exit(err); \
} \
}
#define CUBLAS_CALL(f) { \
hipblasStatus_t stat = (f); \
if (stat != HIPBLAS_STATUS_SUCCESS) { \
fprintf(stderr, "cuBLAS error occurred: %d (%s:%d)\n", \
stat, __FILE__, __LINE__); \
exit(stat); \
} \
}
void *gpuMallocBase;
void *gpuMallocAddr;
// Alignment boundary size, in bytes.
constexpr int N = 4; // 16
void *myGpuMalloc(size_t bytes) {
bytes = ((bytes + (1 << N) - 1) >> N) << N;
void *res = gpuMallocAddr;
gpuMallocAddr = (void *)((char *)gpuMallocAddr + bytes);
if ((long)gpuMallocAddr >= (long)gpuMallocBase + HEAP_SIZE)
fprintf(stderr, "GPU breached memory limit of HEAP_SIZE\n");
return res;
}
template <typename T>
__global__ void arrayUpdate(T *data, int index, T value) {
data[index] = value;
}
__global__ void arrayFill(float *data, float value) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
data[tid] = value;
}
__global__ void arrayFill_greg(float* data, float value, int size) {
int stride = gridDim.x * blockDim.x;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < size; i += stride) data[i] = value;
}
__global__ void hardTanh(float* in, float* out, float min_val, float max_val, int size) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = gridDim.x * blockDim.x;
for (int i = tid; i < size; i += stride) {
out[i] = in[i] < min_val ? min_val : (in[i] > max_val ? max_val : in[i]);
}
}
__global__ void hardTanh_grad(float* in_x, float* in_d, float* out_d, float min_val, float max_val, int size, bool inplace) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = gridDim.x * blockDim.x;
for (int i = tid; i < size; i += stride) {
if (inplace) {
if (in_x[i] < min_val || in_x[i] > max_val) in_d[i] = 0;
} else {
if (in_x[i] >= min_val && in_x[i] <= max_val) in_d[i] += out_d[i];
}
}
}
__global__ void nllLoss(float *x, int x_stride, float *y, int* target) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int offset = tid * x_stride + target[tid];
y[tid] = -1 * x[offset];
}
__global__ void nllLoss_grad(int x_stride, float *yGrad, int* target, float* xGrad) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int offset = tid * x_stride + target[tid];
xGrad[offset] += -1 * yGrad[tid];
}
// only for 4D tensor in and 3D tensor out
__global__ void sum_grad(float* in, int inSize0, int inSize1, int inSize2, int inSize3, int nElement,
float* out, int outStride0, int outStride1, int outStride2, int dim) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (int i = tid; i < nElement; i += stride) {
int inOff2 = i / inSize3;
int inDim3 = i - inOff2 * inSize3;
int inOff1 = inOff2 / inSize2;
int inDim2 = inOff2 - inOff1 * inSize2;
int inDim0 = inOff1 / inSize1;
int inDim1 = inOff1 - inDim0 * inSize1;
int outOff = 0;
if (dim == 0) outOff = inDim1 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2;
if (dim == 1) outOff = inDim0 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2;
if (dim == 2) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim3 * outStride2;
if (dim == 3) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim2 * outStride2;
in[i] += out[outOff];
}
}
//following - https://github.com/torch/cutorch/blob/master/lib/THC/THCTensorMath.cuh#L49
static inline __device__ int compute(int outputSize0, int outputSize1, int outputSize2, int outputSize3,
int outputStride0, int outputStride1, int outputStride2, int outputStride3,
const int dimSize, const int concatDim, int linearIndex) {
int offset = 0;
int curDimSize = 3 == concatDim ? dimSize : outputSize3;
int nextDimIndex = linearIndex / curDimSize;
int curDimIndex = linearIndex - curDimSize * nextDimIndex;
int curDimOffset = curDimIndex * outputStride3;
offset += curDimOffset;
linearIndex = nextDimIndex;
curDimSize = 2 == concatDim ? dimSize : outputSize2;
nextDimIndex = linearIndex / curDimSize;
curDimIndex = linearIndex - curDimSize * nextDimIndex;
curDimOffset = curDimIndex * outputStride2;
offset += curDimOffset;
linearIndex = nextDimIndex;
curDimSize = 1 == concatDim ? dimSize : outputSize1;
nextDimIndex = linearIndex / curDimSize;
curDimIndex = linearIndex - curDimSize * nextDimIndex;
curDimOffset = curDimIndex * outputStride1;
offset += curDimOffset;
linearIndex = nextDimIndex;
return offset + linearIndex * outputStride0;
// for (int i = 3; i >= 1; i--) {
// int curDimSize = i == concatDim ? dimSize : outputSize[i];
// int nextDimIndex = linearIndex / curDimSize;
// int curDimIndex = linearIndex - curDimSize * nextDimIndex;
// int curDimOffset = curDimIndex * outputStride[i];
// offset += curDimOffset;
// linearIndex = nextDimIndex;
// }
// return offset + linearIndex * outputStride[0];
}
// TODO: Only for Dim of rank 4, and only for 2 inputs, and only for concat at dim = 1
__global__ void concat2D_1D_greg(float* in1, int dimSize1, int nElement1,
float* in2, int dimSize2, int nElement2,
float* out, int concatDim,
int outSize0, int outSize1, int outSize2, int outSize3,
int outStride0, int outStride1, int outStride2, int outStride3) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int nElement = blockIdx.y == 0 ? nElement1 : nElement2;
if (tid >= nElement) return;
float* data = blockIdx.y == 0 ? in1 : in2;
int offset = blockIdx.y == 0 ? 0 : dimSize1;
int dimSize = blockIdx.y == 0 ? dimSize1 : dimSize2;
int dataOffset = offset * outStride1;
int stride = gridDim.x * blockDim.x;
while (tid < nElement) {
int elementOffset = compute(outSize0, outSize1, outSize2, outSize3,
outStride0, outStride1, outStride2, outStride3, dimSize, concatDim, tid);
out[dataOffset + elementOffset] = data[tid];
tid += stride;
}
}
// TODO: Only for Dim of rank 4, and only for 2 inputs, and only for concat at dim = 1
__global__ void concat2D_1D_greg_grad(float* in1, int dimSize1, int nElement1,
float* in2, int dimSize2, int nElement2,
float* out, int concatDim,
int outSize0, int outSize1, int outSize2, int outSize3,
int outStride0, int outStride1, int outStride2, int outStride3) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int nElement = blockIdx.y == 0 ? nElement1 : nElement2;
if (tid >= nElement) return;
float* data = blockIdx.y == 0 ? in1 : in2;
int offset = blockIdx.y == 0 ? 0 : dimSize1;
int dimSize = blockIdx.y == 0 ? dimSize1 : dimSize2;
int dataOffset = offset * outStride1;
int stride = gridDim.x * blockDim.x;
while (tid < nElement) {
int elementOffset = compute(outSize0, outSize1, outSize2, outSize3,
outStride0, outStride1, outStride2, outStride3, dimSize, concatDim, tid);
data[tid] += out[dataOffset + elementOffset];
tid += stride;
}
}
__global__ void concat2D_1D_loop(float* in1, float* in2, float* out, int sizeLow, int sizeHigh, int sizeDim1, int sizeDim2) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= sizeLow) return;
if (blockIdx.y < sizeHigh) { // the first input
int index_out = tid + blockIdx.y * sizeLow * (sizeDim1 + sizeDim2);
int index_in1 = tid + blockIdx.y * sizeLow * sizeDim1;
for (int i = 0; i < sizeDim1; i++) {
out[index_out] = in1[index_in1];
index_out += sizeLow; index_in1 += sizeLow;
}
} else { // the second input
int index_out = tid + (blockIdx.y - sizeHigh) * sizeLow * (sizeDim1 + sizeDim2) + sizeLow * sizeDim1;
int index_in2 = tid + (blockIdx.y - sizeHigh) * sizeLow * sizeDim2;
for (int i = 0; i < sizeDim2; i++) {
out[index_out] = in2[index_in2];
index_out += sizeLow; index_in2 += sizeLow;
}
}
}
__global__ void concat2D_1D_loop_grad(float* in1, float* in2, float* out, int sizeLow, int sizeHigh, int sizeDim1, int sizeDim2) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= sizeLow) return;
if (blockIdx.y < sizeHigh) { // the first input
int index_out = tid + blockIdx.y * sizeLow * (sizeDim1 + sizeDim2);
int index_in1 = tid + blockIdx.y * sizeLow * sizeDim1;
for (int i = 0; i < sizeDim1; i++) {
in1[index_in1] += out[index_out];
index_out += sizeLow; index_in1 += sizeLow;
}
} else { // the second input
int index_out = tid + (blockIdx.y - sizeHigh) * sizeLow * (sizeDim1 + sizeDim2) + sizeLow * sizeDim1;
int index_in2 = tid + (blockIdx.y - sizeHigh) * sizeLow * sizeDim2;
for (int i = 0; i < sizeDim2; i++) {
in2[index_in2] += out[index_out];
index_out += sizeLow; index_in2 += sizeLow;
}
}
}
__global__ void concat2D_1D(float* in1, float* in2, float* out, int dim2, int bound) {
int tid = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
if (blockIdx.x < bound * dim2) {
int subid = blockIdx.y * bound * dim2 * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
out[tid] = in1[subid];
} else {
int subid = blockIdx.y * (gridDim.x - bound * dim2) * blockDim.x + (blockIdx.x - bound * dim2) * blockDim.x + threadIdx.x;
out[tid] = in2[subid];
}
}
__global__ void concat2D_1D_grad(float* in1, float* in2, float* out, int dim2, int bound) {
int tid = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
if (blockIdx.x < bound * dim2) {
int subid = blockIdx.y * bound * dim2 * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
in1[subid] += out[tid];
} else {
int subid = blockIdx.y * (gridDim.x - bound * dim2) * blockDim.x + (blockIdx.x - bound * dim2) * blockDim.x + threadIdx.x;
in2[subid] += out[tid];
}
}
__global__ void adagrad_update_1D_1D(float* x, float* d, float* m, float clip, float lr, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
if (d[tid] > clip) d[tid] = clip;
if (d[tid] < -clip) d[tid] = -clip;
m[tid] += d[tid] * d[tid];
x[tid] -= lr * d[tid] / sqrt(m[tid] + 0.00000001);
d[tid] = 0;
}
}
__global__ void elementwise_1D_1D_mul(float* in1, float* in2, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) out[tid] = in1[tid] * in2[tid];
}
__global__ void elementwise_1D_1D_mul_mutate(float* in1, float* in2, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) out[tid] += in1[tid] * in2[tid];
}
__global__ void elementwise_1D_1D_add(float* in1, float* in2, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) out[tid] = in1[tid] + in2[tid];
}
__global__ void elementwise_1D_1D_minus(float* in1, float* in2, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) out[tid] = in1[tid] - in2[tid];
}
__global__ void elementwise_1D_1D_div(float* in1, float* in2, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) out[tid] = in1[tid] / in2[tid];
}
__global__ void elementwise_1D_1D_exp(float* in, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) out[tid] = exp(in[tid]);
}
__global__ void elementwise_1D_1D_log(float* in, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) out[tid] = log(in[tid]);
}
__global__ void elementwise_1D_1D_sqrt(float* in, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) out[tid] = sqrt(in[tid]);
}
__global__ void elementwise_1D_1D_square(float* in, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) out[tid] = in[tid] * in[tid];
}
__global__ void elementwise_1D_1D_exp_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) in_d[tid] += out_d[tid] * out_x[tid];
}
__global__ void elementwise_1D_1D_log_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) in_d[tid] += out_d[tid] / in_x[tid];
}
__global__ void elementwise_1D_1D_sqrt_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) in_d[tid] += out_d[tid] / out_x[tid] / 2;
}
__global__ void elementwise_1D_1D_square_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) in_d[tid] += out_d[tid] * 2 * in_x[tid];
}
// From: https://github.com/pytorch/pytorch/blob/master/aten/src/THC/THCIntegerDivider.cuh
// Result of div/mod operation stored together.
template <typename Value>
struct DivMod {
Value div, mod;
__host__ __device__ DivMod(Value div, Value mod) : div(div), mod(mod) { }
};
// Base case: we only have an implementation for uint32_t for now. For
// everything else, we use plain division.
template <typename Value>
struct IntDivider {
IntDivider() { } // Dummy constructor for arrays.
IntDivider(Value d) : divisor(d) { }
__host__ __device__ inline Value div(Value n) const { return n / divisor; }
__host__ __device__ inline Value mod(Value n) const { return n % divisor; }
__host__ __device__ inline DivMod<Value> divmod(Value n) const {
return DivMod<Value>(n / divisor, n % divisor);
}
Value divisor;
};
// Implement fast integer division.
template <>
struct IntDivider<unsigned int> {
static_assert(sizeof(unsigned int) == 4, "Assumes 32-bit unsigned int.");
IntDivider() { } // Dummy constructor for arrays.
IntDivider(unsigned int d) : divisor(d) {
assert(divisor >= 1 && divisor <= INT32_MAX);
// TODO: gcc/clang has __builtin_clz() but it's not portable.
for (shift = 0; shift < 32; shift++) if ((1U << shift) >= divisor) break;
uint64_t one = 1;
uint64_t magic = ((one << 32) * ((one << shift) - divisor)) / divisor + 1;
m1 = magic;
assert(m1 > 0 && m1 == magic); // m1 must fit in 32 bits.
}
__host__ __device__ inline unsigned int div(unsigned int n) const {
#ifdef __CUDA_ARCH__
// 't' is the higher 32-bits of unsigned 32-bit multiplication of 'n' and
// 'm1'.
unsigned int t = __umulhi(n, m1);
return (t + n) >> shift;
#else
// Using uint64_t so that the addition does not overflow.
uint64_t t = ((uint64_t) n * m1) >> 32;
return (t + n) >> shift;
#endif
}
__host__ __device__ inline unsigned int mod(unsigned int n) const {
return n - div(n) * divisor;
}
__host__ __device__ inline DivMod<unsigned int> divmod(unsigned int n) const {
unsigned int q = div(n);
return DivMod<unsigned int>(q, n - q * divisor);
}
unsigned int divisor; // d above.
unsigned int m1; // Magic number: m' above.
unsigned int shift; // Shift amounts.
};
// From: https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/cuda/detail/OffsetCalculator.cuh
/// OffsetCalculator calculates the offset in bytes of a linear index for NARGS
/// operands that share the same shape, but may have different strides.
template <int NARGS>
struct OffsetCalculator {
static constexpr int MAX_DIMS = 25;
// The offset for each argument (in bytes). Wrapper around fixed-size array.
struct offsets_t {
__host__ __device__ uint32_t& operator[](int idx) {
return values[idx];
}
uint32_t values[NARGS];
};
// OffsetCalculator(int dims, const int64_t* sizes, const int64_t* const* strides) : dims(dims) {
OffsetCalculator(int dims, const int32_t* sizes, const int32_t* const* strides) : dims(dims) {
for (int i = 0; i < MAX_DIMS; ++i) {
if (i < dims) {
sizes_[i] = IntDivider<uint32_t>(sizes[i]);
} else {
sizes_[i] = IntDivider<uint32_t>(1);
}
for (int arg = 0; arg < NARGS; arg++) {
strides_[i][arg] = i < dims ? strides[arg][i] : 0;
}
}
}
__host__ __device__ offsets_t get(uint32_t linear_idx) const {
offsets_t offsets;
#pragma unroll
for (int arg = 0; arg < NARGS; arg++) {
offsets[arg] = 0;
}
#pragma unroll
for (int dim = 0; dim < MAX_DIMS; ++dim) {
if (dim == dims) {
break;
}
auto divmod = sizes_[dim].divmod(linear_idx);
linear_idx = divmod.div;
#pragma unroll
for (int arg = 0; arg < NARGS; arg++) {
offsets[arg] += divmod.mod * strides_[dim][arg];
}
}
return offsets;
}
void print() {
for (auto i = 1; i < 128; i++) {
auto offsets = get(i);
printf("offsets[%d]: ", i);
for (auto arg = 0; arg < NARGS; arg++) {
printf("%d ", offsets[arg]);
}
printf("\n");
}
}
int dims;
IntDivider<uint32_t> sizes_[MAX_DIMS];
uint32_t strides_[MAX_DIMS][NARGS];
};
// From: https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/cuda/Loops.cuh
template<int nt, int vt, typename func_t>
__launch_bounds__(nt, 4)
__global__ void elementwise_kernel(int N, func_t f) {
int tid = threadIdx.x;
int nv = nt * vt;
int idx = nv * blockIdx.x + tid;
#pragma unroll
for (int i = 0; i < vt; i++) {
if (idx < N) {
f(idx);
idx += nt;
}
}
}
template<int nt, int vt, typename func_t>
static void launch_kernel(int64_t N, const func_t& f) {
if (N == 0) {
return;
}
dim3 block(nt);
dim3 grid((N + block.x * vt - 1) / (block.x * vt));
hipLaunchKernelGGL(( elementwise_kernel<nt, vt, func_t>), dim3(grid), dim3(block), 0, 0, N, f);
}
template<typename func_t>
void gpu_unary_kernel(float *res, float *x,
int32_t resRank, const int32_t resScalarCount,
const int32_t* resShape, const int32_t* const* strides,
const func_t& f) {
OffsetCalculator<2> calc(resRank, resShape, strides);
launch_kernel<128, 4>(resScalarCount, [=]__device__(int idx) {
auto offsets = calc.get(idx);
float* out = &res[offsets[0]];
float* in = &x[offsets[1]];
*out = f(*in);
});
}
template<typename func_t>
void gpu_binary_kernel(float *res, float *x, float *y,
int32_t resRank, const int32_t resScalarCount,
const int32_t* resShape, const int32_t* const* strides,
const func_t& f) {
OffsetCalculator<3> calc(resRank, resShape, strides);
launch_kernel<128, 4>(resScalarCount, [=]__device__(int idx) {
auto offsets = calc.get(idx);
float* out = &res[offsets[0]];
float* in1 = &x[offsets[1]];
float* in2 = &y[offsets[2]];
*out = f(*in1, *in2);
});
}
#define CUDNN_CALL(f) { \
cudnnStatus_t stat = (f); \
if (stat != CUDNN_STATUS_SUCCESS) { \
fprintf(stderr, "cuDNN error occurred: %d (%s:%d)\n", \
stat, __FILE__, __LINE__); \
exit(stat); \
} \
}
void Snippet(char *);
std::random_device rd{};
std::mt19937 gen{rd()};
std::normal_distribution<> d{0, 0.01};
int main(int argc, char *argv[]) {
if (argc != 2) {
printf("usage: query <filename>\n");
return 0;
}
Snippet(argv[1]);
return 0;
}
/*****************************************
Emitting C Generated Code
*******************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
void Snippet(char* x0) {
// Backend setup.
hipblasHandle_t cublasHandle;
CUBLAS_CALL(hipblasCreate(&cublasHandle));
CUDA_CALL(hipMalloc(&gpuMallocBase, HEAP_SIZE));
CUDA_CALL(hipMemset(gpuMallocBase, 0, HEAP_SIZE));
gpuMallocAddr = gpuMallocBase;
cudnnHandle_t cudnnHandle;
CUDNN_CALL(cudnnCreate(&cudnnHandle));
// Tensor 'toGPU' invocation.
float* x275 = (float*)myGpuMalloc(262144 * sizeof(float));
int32_t x4 = open("/u/data/u99/wang603/TiarkMlEnv/Lantern/src/out/PLDI19evaluation/resnet50/resnet50.onnx.bin",0);
int32_t x5 = fsize(x4);
float* x6 = (float*)mmap(0, x5, PROT_READ | PROT_WRITE, MAP_FILE | MAP_PRIVATE, x4, 0);
float* x7 = x6+5205440;
CUDA_CALL(hipMemcpy(x275, x7, 262144 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x278 = (float*)myGpuMalloc(256 * sizeof(float));
float* x8 = x6+148672;
CUDA_CALL(hipMemcpy(x278, x8, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x281 = (float*)myGpuMalloc(128 * sizeof(float));
float* x9 = x6+816064;
CUDA_CALL(hipMemcpy(x281, x9, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x284 = (float*)myGpuMalloc(128 * sizeof(float));
float* x10 = x6+950080;
CUDA_CALL(hipMemcpy(x284, x10, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x287 = (float*)myGpuMalloc(64 * sizeof(float));
float* x11 = x6+94784;
CUDA_CALL(hipMemcpy(x287, x11, 64 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x290 = (float*)myGpuMalloc(32768 * sizeof(float));
float* x12 = x6+220608;
CUDA_CALL(hipMemcpy(x290, x12, 32768 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x293 = (float*)myGpuMalloc(512 * sizeof(float));
float* x13 = x6+22495680;
CUDA_CALL(hipMemcpy(x293, x13, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x296 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x14 = x6+2964928;
CUDA_CALL(hipMemcpy(x296, x14, 262144 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x299 = (float*)myGpuMalloc(589824 * sizeof(float));
float* x15 = x6+4348352;
CUDA_CALL(hipMemcpy(x299, x15, 589824 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x302 = (float*)myGpuMalloc(512 * sizeof(float));
float* x16 = x6+20133312;
CUDA_CALL(hipMemcpy(x302, x16, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x305 = (float*)myGpuMalloc(256 * sizeof(float));
float* x17 = x6+2169536;
CUDA_CALL(hipMemcpy(x305, x17, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x308 = (float*)myGpuMalloc(128 * sizeof(float));
float* x18 = x6+668224;
CUDA_CALL(hipMemcpy(x308, x18, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x311 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x19 = x6+2432448;
CUDA_CALL(hipMemcpy(x311, x19, 1024 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x314 = (float*)myGpuMalloc(512 * sizeof(float));
float* x20 = x6+1446336;
CUDA_CALL(hipMemcpy(x314, x20, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x317 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x21 = x6+4081088;
CUDA_CALL(hipMemcpy(x317, x21, 1024 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x320 = (float*)myGpuMalloc(256 * sizeof(float));
float* x22 = x6+1578688;
CUDA_CALL(hipMemcpy(x320, x22, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x323 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x23 = x6+6325696;
CUDA_CALL(hipMemcpy(x323, x23, 262144 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x326 = (float*)myGpuMalloc(512 * sizeof(float));
float* x24 = x6+602048;
CUDA_CALL(hipMemcpy(x326, x24, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x329 = (float*)myGpuMalloc(64 * sizeof(float));
float* x25 = x6+165888;
CUDA_CALL(hipMemcpy(x329, x25, 64 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x332 = (float*)myGpuMalloc(512 * sizeof(float));
float* x26 = x6+1164736;
CUDA_CALL(hipMemcpy(x332, x26, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x335 = (float*)myGpuMalloc(64 * sizeof(float));
float* x27 = x6+6080;
CUDA_CALL(hipMemcpy(x335, x27, 64 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x338 = (float*)myGpuMalloc(147456 * sizeof(float));
float* x28 = x6+253888;
CUDA_CALL(hipMemcpy(x338, x28, 147456 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x341 = (float*)myGpuMalloc(2359296 * sizeof(float));
float* x29 = x6+20135360;
CUDA_CALL(hipMemcpy(x341, x29, 2359296 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x344 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x30 = x6+2960832;
CUDA_CALL(hipMemcpy(x344, x30, 1024 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x347 = (float*)myGpuMalloc(256 * sizeof(float));
float* x31 = x6+3227072;
CUDA_CALL(hipMemcpy(x347, x31, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x350 = (float*)myGpuMalloc(589824 * sizeof(float));
float* x32 = x6+3228096;
CUDA_CALL(hipMemcpy(x350, x32, 589824 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x353 = (float*)myGpuMalloc(16384 * sizeof(float));
float* x33 = x6+43456;
CUDA_CALL(hipMemcpy(x353, x33, 16384 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x356 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x34 = x6+22496704;
CUDA_CALL(hipMemcpy(x356, x34, 1048576 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x359 = (float*)myGpuMalloc(2359296 * sizeof(float));
float* x35 = x6+9092544;
CUDA_CALL(hipMemcpy(x359, x35, 2359296 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x362 = (float*)myGpuMalloc(128 * sizeof(float));
float* x36 = x6+816320;
CUDA_CALL(hipMemcpy(x362, x36, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x365 = (float*)myGpuMalloc(256 * sizeof(float));
float* x37 = x6+60608;
CUDA_CALL(hipMemcpy(x365, x37, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x368 = (float*)myGpuMalloc(256 * sizeof(float));
float* x38 = x6+219584;
CUDA_CALL(hipMemcpy(x368, x38, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x371 = (float*)myGpuMalloc(128 * sizeof(float));
float* x39 = x6+1379392;
CUDA_CALL(hipMemcpy(x371, x39, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x374 = (float*)myGpuMalloc(128 * sizeof(float));
float* x40 = x6+1231296;
CUDA_CALL(hipMemcpy(x374, x40, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x377 = (float*)myGpuMalloc(64 * sizeof(float));
float* x41 = x6+1856;
CUDA_CALL(hipMemcpy(x377, x41, 64 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x380 = (float*)myGpuMalloc(65536 * sizeof(float));
float* x42 = x6+1098176;
CUDA_CALL(hipMemcpy(x380, x42, 65536 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x383 = (float*)myGpuMalloc(512 * sizeof(float));
float* x43 = x6+601536;
CUDA_CALL(hipMemcpy(x383, x43, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x386 = (float*)myGpuMalloc(128 * sizeof(float));
float* x44 = x6+401728;
CUDA_CALL(hipMemcpy(x386, x44, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x389 = (float*)myGpuMalloc(64 * sizeof(float));
float* x45 = x6+131904;
CUDA_CALL(hipMemcpy(x389, x45, 64 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x392 = (float*)myGpuMalloc(128 * sizeof(float));
float* x46 = x6+949696;
CUDA_CALL(hipMemcpy(x392, x46, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x395 = (float*)myGpuMalloc(512 * sizeof(float));
float* x47 = x6+15664576;
CUDA_CALL(hipMemcpy(x395, x47, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x398 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x48 = x6+18027968;
CUDA_CALL(hipMemcpy(x398, x48, 1048576 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x401 = (float*)myGpuMalloc(10 * sizeof(float));
float* x49 = x6+23573952;
CUDA_CALL(hipMemcpy(x401, x49, 10 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x404 = (float*)myGpuMalloc(64 * sizeof(float));
float* x50 = x6+43264;
CUDA_CALL(hipMemcpy(x404, x50, 64 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x407 = (float*)myGpuMalloc(512 * sizeof(float));
float* x51 = x6+11453376;
CUDA_CALL(hipMemcpy(x407, x51, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x410 = (float*)myGpuMalloc(64 * sizeof(float));
float* x52 = x6+6272;
CUDA_CALL(hipMemcpy(x410, x52, 64 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x413 = (float*)myGpuMalloc(512 * sizeof(float));
float* x53 = x6+882112;
CUDA_CALL(hipMemcpy(x413, x53, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x416 = (float*)myGpuMalloc(64 * sizeof(float));
float* x54 = x6+6144;
CUDA_CALL(hipMemcpy(x416, x54, 64 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x419 = (float*)myGpuMalloc(512 * sizeof(float));
float* x55 = x6+1445824;
CUDA_CALL(hipMemcpy(x419, x55, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x422 = (float*)myGpuMalloc(65536 * sizeof(float));
float* x56 = x6+1379776;
CUDA_CALL(hipMemcpy(x422, x56, 65536 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x425 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x57 = x6+3818944;
CUDA_CALL(hipMemcpy(x425, x57, 262144 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x428 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x58 = x6+5202368;
CUDA_CALL(hipMemcpy(x428, x58, 1024 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x431 = (float*)myGpuMalloc(256 * sizeof(float));
float* x59 = x6+148416;
CUDA_CALL(hipMemcpy(x431, x59, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x434 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x60 = x6+7441856;
CUDA_CALL(hipMemcpy(x434, x60, 1024 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x437 = (float*)myGpuMalloc(64 * sizeof(float));
float* x61 = x6+94720;
CUDA_CALL(hipMemcpy(x437, x61, 64 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x440 = (float*)myGpuMalloc(128 * sizeof(float));
float* x62 = x6+1097792;
CUDA_CALL(hipMemcpy(x440, x62, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x443 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x63 = x6+12504512;
CUDA_CALL(hipMemcpy(x443, x63, 2048 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x446 = (float*)myGpuMalloc(256 * sizeof(float));
float* x64 = x6+4938944;
CUDA_CALL(hipMemcpy(x446, x64, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x449 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x65 = x6+14611904;
CUDA_CALL(hipMemcpy(x449, x65, 2048 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x452 = (float*)myGpuMalloc(512 * sizeof(float));
float* x66 = x6+15666112;
CUDA_CALL(hipMemcpy(x452, x66, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x455 = (float*)myGpuMalloc(512 * sizeof(float));
float* x67 = x6+18026432;
CUDA_CALL(hipMemcpy(x455, x67, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x458 = (float*)myGpuMalloc(512 * sizeof(float));
float* x68 = x6+9091520;
CUDA_CALL(hipMemcpy(x458, x68, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x461 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x69 = x6+19080640;
CUDA_CALL(hipMemcpy(x461, x69, 2048 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x464 = (float*)myGpuMalloc(256 * sizeof(float));
float* x70 = x6+6588608;
CUDA_CALL(hipMemcpy(x464, x70, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x467 = (float*)myGpuMalloc(256 * sizeof(float));
float* x71 = x6+8299456;
CUDA_CALL(hipMemcpy(x467, x71, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x470 = (float*)myGpuMalloc(256 * sizeof(float));
float* x72 = x6+60352;
CUDA_CALL(hipMemcpy(x470, x72, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x473 = (float*)myGpuMalloc(64 * sizeof(float));
float* x73 = x6+202944;
CUDA_CALL(hipMemcpy(x473, x73, 64 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x476 = (float*)myGpuMalloc(36864 * sizeof(float));
float* x74 = x6+166080;
CUDA_CALL(hipMemcpy(x476, x74, 36864 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x479 = (float*)myGpuMalloc(256 * sizeof(float));
float* x75 = x6+6058432;
CUDA_CALL(hipMemcpy(x479, x75, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x482 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x76 = x6+2436544;
CUDA_CALL(hipMemcpy(x482, x76, 524288 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x485 = (float*)myGpuMalloc(256 * sizeof(float));
float* x77 = x6+77248;
CUDA_CALL(hipMemcpy(x485, x77, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x488 = (float*)myGpuMalloc(256 * sizeof(float));
float* x78 = x6+6587840;
CUDA_CALL(hipMemcpy(x488, x78, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x491 = (float*)myGpuMalloc(512 * sizeof(float));
float* x79 = x6+20133824;
CUDA_CALL(hipMemcpy(x491, x79, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x494 = (float*)myGpuMalloc(128 * sizeof(float));
float* x80 = x6+1379264;
CUDA_CALL(hipMemcpy(x494, x80, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x497 = (float*)myGpuMalloc(256 * sizeof(float));
float* x81 = x6+7708608;
CUDA_CALL(hipMemcpy(x497, x81, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x500 = (float*)myGpuMalloc(64 * sizeof(float));
float* x82 = x6+165824;
CUDA_CALL(hipMemcpy(x500, x82, 64 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x503 = (float*)myGpuMalloc(512 * sizeof(float));
float* x83 = x6+1164224;
CUDA_CALL(hipMemcpy(x503, x83, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x506 = (float*)myGpuMalloc(36864 * sizeof(float));
float* x84 = x6+94912;
CUDA_CALL(hipMemcpy(x506, x84, 36864 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x509 = (float*)myGpuMalloc(128 * sizeof(float));
float* x85 = x6+253376;
CUDA_CALL(hipMemcpy(x509, x85, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x512 = (float*)myGpuMalloc(256 * sizeof(float));
float* x86 = x6+7708096;
CUDA_CALL(hipMemcpy(x512, x86, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x515 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x87 = x6+2962880;
CUDA_CALL(hipMemcpy(x515, x87, 1024 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x518 = (float*)myGpuMalloc(16384 * sizeof(float));
float* x88 = x6+203200;
CUDA_CALL(hipMemcpy(x518, x88, 16384 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x521 = (float*)myGpuMalloc(512 * sizeof(float));
float* x89 = x6+883648;
CUDA_CALL(hipMemcpy(x521, x89, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x524 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x90 = x6+6059456;
CUDA_CALL(hipMemcpy(x524, x90, 262144 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x527 = (float*)myGpuMalloc(36864 * sizeof(float));
float* x91 = x6+6336;
CUDA_CALL(hipMemcpy(x527, x91, 36864 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x530 = (float*)myGpuMalloc(256 * sizeof(float));
float* x92 = x6+148928;
CUDA_CALL(hipMemcpy(x530, x92, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x533 = (float*)myGpuMalloc(256 * sizeof(float));
float* x93 = x6+5467584;
CUDA_CALL(hipMemcpy(x533, x93, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x536 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x94 = x6+8563136;
CUDA_CALL(hipMemcpy(x536, x94, 1024 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x539 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x95 = x6+19076544;
CUDA_CALL(hipMemcpy(x539, x95, 2048 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x542 = (float*)myGpuMalloc(128 * sizeof(float));
float* x96 = x6+816192;
CUDA_CALL(hipMemcpy(x542, x96, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x545 = (float*)myGpuMalloc(256 * sizeof(float));
float* x97 = x6+3818176;
CUDA_CALL(hipMemcpy(x545, x97, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x548 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x98 = x6+8299968;
CUDA_CALL(hipMemcpy(x548, x98, 262144 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x551 = (float*)myGpuMalloc(256 * sizeof(float));
float* x99 = x6+5468352;
CUDA_CALL(hipMemcpy(x551, x99, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x554 = (float*)myGpuMalloc(256 * sizeof(float));
float* x100 = x6+2170048;
CUDA_CALL(hipMemcpy(x554, x100, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x557 = (float*)myGpuMalloc(128 * sizeof(float));
float* x101 = x6+668352;
CUDA_CALL(hipMemcpy(x557, x101, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x560 = (float*)myGpuMalloc(512 * sizeof(float));
float* x102 = x6+468928;
CUDA_CALL(hipMemcpy(x560, x102, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x563 = (float*)myGpuMalloc(64 * sizeof(float));
float* x103 = x6+94848;
CUDA_CALL(hipMemcpy(x563, x103, 64 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x566 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x104 = x6+23545280;
CUDA_CALL(hipMemcpy(x566, x104, 2048 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x569 = (float*)myGpuMalloc(256 * sizeof(float));
float* x105 = x6+7179456;
CUDA_CALL(hipMemcpy(x569, x105, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x572 = (float*)myGpuMalloc(64 * sizeof(float));
float* x106 = x6+43328;
CUDA_CALL(hipMemcpy(x572, x106, 64 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x575 = (float*)myGpuMalloc(65536 * sizeof(float));
float* x107 = x6+401856;
CUDA_CALL(hipMemcpy(x575, x107, 65536 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x578 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x108 = x6+14609856;
CUDA_CALL(hipMemcpy(x578, x108, 2048 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x581 = (float*)myGpuMalloc(256 * sizeof(float));
float* x109 = x6+2169280;
CUDA_CALL(hipMemcpy(x581, x109, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x584 = (float*)myGpuMalloc(256 * sizeof(float));
float* x110 = x6+7178944;
CUDA_CALL(hipMemcpy(x584, x110, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x587 = (float*)myGpuMalloc(64 * sizeof(float));
float* x111 = x6+1920;
CUDA_CALL(hipMemcpy(x587, x111, 64 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x590 = (float*)myGpuMalloc(65536 * sizeof(float));
float* x112 = x6+816576;
CUDA_CALL(hipMemcpy(x590, x112, 65536 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x593 = (float*)myGpuMalloc(128 * sizeof(float));
float* x113 = x6+949952;
CUDA_CALL(hipMemcpy(x593, x113, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x596 = (float*)myGpuMalloc(512 * sizeof(float));
float* x114 = x6+11452864;
CUDA_CALL(hipMemcpy(x596, x114, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x599 = (float*)myGpuMalloc(64 * sizeof(float));
float* x115 = x6+6208;
CUDA_CALL(hipMemcpy(x599, x115, 64 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x602 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x116 = x6+12506560;
CUDA_CALL(hipMemcpy(x602, x116, 2048 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x605 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x117 = x6+4939200;
CUDA_CALL(hipMemcpy(x605, x117, 262144 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x608 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x118 = x6+2433472;
CUDA_CALL(hipMemcpy(x608, x118, 1024 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x611 = (float*)myGpuMalloc(64 * sizeof(float));
float* x119 = x6+203136;
CUDA_CALL(hipMemcpy(x611, x119, 64 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x614 = (float*)myGpuMalloc(512 * sizeof(float));
float* x120 = x6+601024;
CUDA_CALL(hipMemcpy(x614, x120, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x617 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x121 = x6+7442880;
CUDA_CALL(hipMemcpy(x617, x121, 1024 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x620 = (float*)myGpuMalloc(512 * sizeof(float));
float* x122 = x6+9092032;
CUDA_CALL(hipMemcpy(x620, x122, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x623 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x123 = x6+8564160;
CUDA_CALL(hipMemcpy(x623, x123, 1024 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x626 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x124 = x6+23551424;
CUDA_CALL(hipMemcpy(x626, x124, 2048 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x629 = (float*)myGpuMalloc(256 * sizeof(float));
float* x125 = x6+4938688;
CUDA_CALL(hipMemcpy(x629, x125, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x632 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x126 = x6+14613952;
CUDA_CALL(hipMemcpy(x632, x126, 2048 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x635 = (float*)myGpuMalloc(256 * sizeof(float));
float* x127 = x6+60096;
CUDA_CALL(hipMemcpy(x635, x127, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x638 = (float*)myGpuMalloc(128 * sizeof(float));
float* x128 = x6+1097664;
CUDA_CALL(hipMemcpy(x638, x128, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x641 = (float*)myGpuMalloc(128 * sizeof(float));
float* x129 = x6+401600;
CUDA_CALL(hipMemcpy(x641, x129, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x644 = (float*)myGpuMalloc(256 * sizeof(float));
float* x130 = x6+4347328;
CUDA_CALL(hipMemcpy(x644, x130, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x647 = (float*)myGpuMalloc(16384 * sizeof(float));
float* x131 = x6+132032;
CUDA_CALL(hipMemcpy(x647, x131, 16384 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x650 = (float*)myGpuMalloc(256 * sizeof(float));
float* x132 = x6+1578944;
CUDA_CALL(hipMemcpy(x650, x132, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x653 = (float*)myGpuMalloc(65536 * sizeof(float));
float* x133 = x6+1165760;
CUDA_CALL(hipMemcpy(x653, x133, 65536 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x656 = (float*)myGpuMalloc(256 * sizeof(float));
float* x134 = x6+220352;
CUDA_CALL(hipMemcpy(x656, x134, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x659 = (float*)myGpuMalloc(128 * sizeof(float));
float* x135 = x6+253760;
CUDA_CALL(hipMemcpy(x659, x135, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x662 = (float*)myGpuMalloc(64 * sizeof(float));
float* x136 = x6+203008;
CUDA_CALL(hipMemcpy(x662, x136, 64 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x665 = (float*)myGpuMalloc(256 * sizeof(float));
float* x137 = x6+6058688;
CUDA_CALL(hipMemcpy(x665, x137, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x668 = (float*)myGpuMalloc(512 * sizeof(float));
float* x138 = x6+15665088;
CUDA_CALL(hipMemcpy(x668, x138, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x671 = (float*)myGpuMalloc(512 * sizeof(float));
float* x139 = x6+18026944;
CUDA_CALL(hipMemcpy(x671, x139, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x674 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x140 = x6+8566208;
CUDA_CALL(hipMemcpy(x674, x140, 524288 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x677 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x141 = x6+5203392;
CUDA_CALL(hipMemcpy(x677, x141, 1024 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x680 = (float*)myGpuMalloc(256 * sizeof(float));
float* x142 = x6+8298944;
CUDA_CALL(hipMemcpy(x680, x142, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x683 = (float*)myGpuMalloc(64 * sizeof(float));
float* x143 = x6+94656;
CUDA_CALL(hipMemcpy(x683, x143, 64 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x686 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x144 = x6+4084160;
CUDA_CALL(hipMemcpy(x686, x144, 1024 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x689 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x145 = x6+19078592;
CUDA_CALL(hipMemcpy(x689, x145, 2048 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x692 = (float*)myGpuMalloc(512 * sizeof(float));
float* x146 = x6+467392;
CUDA_CALL(hipMemcpy(x692, x146, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x695 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x147 = x6+6322624;
CUDA_CALL(hipMemcpy(x695, x147, 1024 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x698 = (float*)myGpuMalloc(512 * sizeof(float));
float* x148 = x6+883136;
CUDA_CALL(hipMemcpy(x698, x148, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x701 = (float*)myGpuMalloc(128 * sizeof(float));
float* x149 = x6+1379648;
CUDA_CALL(hipMemcpy(x701, x149, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x704 = (float*)myGpuMalloc(512 * sizeof(float));
float* x150 = x6+468416;
CUDA_CALL(hipMemcpy(x704, x150, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x707 = (float*)myGpuMalloc(16384 * sizeof(float));
float* x151 = x6+149440;
CUDA_CALL(hipMemcpy(x707, x151, 16384 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x710 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x152 = x6+7445952;
CUDA_CALL(hipMemcpy(x710, x152, 262144 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x713 = (float*)myGpuMalloc(1728 * sizeof(float));
float* x153 = x6+0;
CUDA_CALL(hipMemcpy(x713, x153, 1728 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x716 = (float*)myGpuMalloc(64 * sizeof(float));
float* x154 = x6+131840;
CUDA_CALL(hipMemcpy(x716, x154, 64 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x719 = (float*)myGpuMalloc(512 * sizeof(float));
float* x155 = x6+15665600;
CUDA_CALL(hipMemcpy(x719, x155, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x722 = (float*)myGpuMalloc(2359296 * sizeof(float));
float* x156 = x6+15666624;
CUDA_CALL(hipMemcpy(x722, x156, 2359296 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x725 = (float*)myGpuMalloc(512 * sizeof(float));
float* x157 = x6+1445312;
CUDA_CALL(hipMemcpy(x725, x157, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x728 = (float*)myGpuMalloc(256 * sizeof(float));
float* x158 = x6+3227840;
CUDA_CALL(hipMemcpy(x728, x158, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x731 = (float*)myGpuMalloc(64 * sizeof(float));
float* x159 = x6+43392;
CUDA_CALL(hipMemcpy(x731, x159, 64 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x734 = (float*)myGpuMalloc(512 * sizeof(float));
float* x160 = x6+11452352;
CUDA_CALL(hipMemcpy(x734, x160, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x737 = (float*)myGpuMalloc(512 * sizeof(float));
float* x161 = x6+18025920;
CUDA_CALL(hipMemcpy(x737, x161, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x740 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x162 = x6+6324672;
CUDA_CALL(hipMemcpy(x740, x162, 1024 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x743 = (float*)myGpuMalloc(16384 * sizeof(float));
float* x163 = x6+60864;
CUDA_CALL(hipMemcpy(x743, x163, 16384 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x746 = (float*)myGpuMalloc(256 * sizeof(float));
float* x164 = x6+5468096;
CUDA_CALL(hipMemcpy(x746, x164, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x749 = (float*)myGpuMalloc(64 * sizeof(float));
float* x165 = x6+43200;
CUDA_CALL(hipMemcpy(x749, x165, 64 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x752 = (float*)myGpuMalloc(147456 * sizeof(float));
float* x166 = x6+1231808;
CUDA_CALL(hipMemcpy(x752, x166, 147456 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x755 = (float*)myGpuMalloc(256 * sizeof(float));
float* x167 = x6+149184;
CUDA_CALL(hipMemcpy(x755, x167, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x758 = (float*)myGpuMalloc(512 * sizeof(float));
float* x168 = x6+1163712;
CUDA_CALL(hipMemcpy(x758, x168, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x761 = (float*)myGpuMalloc(256 * sizeof(float));
float* x169 = x6+7178688;
CUDA_CALL(hipMemcpy(x761, x169, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x764 = (float*)myGpuMalloc(512 * sizeof(float));
float* x170 = x6+22495168;
CUDA_CALL(hipMemcpy(x764, x170, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x767 = (float*)myGpuMalloc(128 * sizeof(float));
float* x171 = x6+949824;
CUDA_CALL(hipMemcpy(x767, x171, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x770 = (float*)myGpuMalloc(16384 * sizeof(float));
float* x172 = x6+78272;
CUDA_CALL(hipMemcpy(x770, x172, 16384 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x773 = (float*)myGpuMalloc(128 * sizeof(float));
float* x173 = x6+253504;
CUDA_CALL(hipMemcpy(x773, x173, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x776 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x174 = x6+14607808;
CUDA_CALL(hipMemcpy(x776, x174, 2048 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x779 = (float*)myGpuMalloc(256 * sizeof(float));
float* x175 = x6+4348096;
CUDA_CALL(hipMemcpy(x779, x175, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x782 = (float*)myGpuMalloc(589824 * sizeof(float));
float* x176 = x6+1579456;
CUDA_CALL(hipMemcpy(x782, x176, 589824 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x785 = (float*)myGpuMalloc(256 * sizeof(float));
float* x177 = x6+7708864;
CUDA_CALL(hipMemcpy(x785, x177, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x788 = (float*)myGpuMalloc(128 * sizeof(float));
float* x178 = x6+668480;
CUDA_CALL(hipMemcpy(x788, x178, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x791 = (float*)myGpuMalloc(256 * sizeof(float));
float* x179 = x6+4347840;
CUDA_CALL(hipMemcpy(x791, x179, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x794 = (float*)myGpuMalloc(64 * sizeof(float));
float* x180 = x6+203072;
CUDA_CALL(hipMemcpy(x794, x180, 64 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x797 = (float*)myGpuMalloc(131072 * sizeof(float));
float* x181 = x6+1447360;
CUDA_CALL(hipMemcpy(x797, x181, 131072 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x800 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x182 = x6+23547328;
CUDA_CALL(hipMemcpy(x800, x182, 2048 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x803 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x183 = x6+4083136;
CUDA_CALL(hipMemcpy(x803, x183, 1024 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x806 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x184 = x6+8565184;
CUDA_CALL(hipMemcpy(x806, x184, 1024 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x809 = (float*)myGpuMalloc(256 * sizeof(float));
float* x185 = x6+220096;
CUDA_CALL(hipMemcpy(x809, x185, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x812 = (float*)myGpuMalloc(256 * sizeof(float));
float* x186 = x6+6588096;
CUDA_CALL(hipMemcpy(x812, x186, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x815 = (float*)myGpuMalloc(256 * sizeof(float));
float* x187 = x6+6058944;
CUDA_CALL(hipMemcpy(x815, x187, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x818 = (float*)myGpuMalloc(64 * sizeof(float));
float* x188 = x6+166016;
CUDA_CALL(hipMemcpy(x818, x188, 64 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x821 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x189 = x6+5204416;
CUDA_CALL(hipMemcpy(x821, x189, 1024 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x824 = (float*)myGpuMalloc(256 * sizeof(float));
float* x190 = x6+8299200;
CUDA_CALL(hipMemcpy(x824, x190, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x827 = (float*)myGpuMalloc(128 * sizeof(float));
float* x191 = x6+401472;
CUDA_CALL(hipMemcpy(x827, x191, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x830 = (float*)myGpuMalloc(147456 * sizeof(float));
float* x192 = x6+950208;
CUDA_CALL(hipMemcpy(x830, x192, 147456 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x833 = (float*)myGpuMalloc(256 * sizeof(float));
float* x193 = x6+4938432;
CUDA_CALL(hipMemcpy(x833, x193, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x836 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x194 = x6+12508608;
CUDA_CALL(hipMemcpy(x836, x194, 2048 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x839 = (float*)myGpuMalloc(512 * sizeof(float));
float* x195 = x6+22494656;
CUDA_CALL(hipMemcpy(x839, x195, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x842 = (float*)myGpuMalloc(512 * sizeof(float));
float* x196 = x6+18027456;
CUDA_CALL(hipMemcpy(x842, x196, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x845 = (float*)myGpuMalloc(65536 * sizeof(float));
float* x197 = x6+884160;
CUDA_CALL(hipMemcpy(x845, x197, 65536 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x848 = (float*)myGpuMalloc(256 * sizeof(float));
float* x198 = x6+4347584;
CUDA_CALL(hipMemcpy(x848, x198, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x851 = (float*)myGpuMalloc(256 * sizeof(float));
float* x199 = x6+1579200;
CUDA_CALL(hipMemcpy(x851, x199, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x854 = (float*)myGpuMalloc(256 * sizeof(float));
float* x200 = x6+59840;
CUDA_CALL(hipMemcpy(x854, x200, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x857 = (float*)myGpuMalloc(256 * sizeof(float));
float* x201 = x6+3818432;
CUDA_CALL(hipMemcpy(x857, x201, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x860 = (float*)myGpuMalloc(512 * sizeof(float));
float* x202 = x6+9090496;
CUDA_CALL(hipMemcpy(x860, x202, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x863 = (float*)myGpuMalloc(512 * sizeof(float));
float* x203 = x6+22496192;
CUDA_CALL(hipMemcpy(x863, x203, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x866 = (float*)myGpuMalloc(256 * sizeof(float));
float* x204 = x6+77504;
CUDA_CALL(hipMemcpy(x866, x204, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x869 = (float*)myGpuMalloc(128 * sizeof(float));
float* x205 = x6+253632;
CUDA_CALL(hipMemcpy(x869, x205, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x872 = (float*)myGpuMalloc(512 * sizeof(float));
float* x206 = x6+11451840;
CUDA_CALL(hipMemcpy(x872, x206, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x875 = (float*)myGpuMalloc(64 * sizeof(float));
float* x207 = x6+1728;
CUDA_CALL(hipMemcpy(x875, x207, 64 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x878 = (float*)myGpuMalloc(512 * sizeof(float));
float* x208 = x6+600512;
CUDA_CALL(hipMemcpy(x878, x208, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x881 = (float*)myGpuMalloc(64 * sizeof(float));
float* x209 = x6+131776;
CUDA_CALL(hipMemcpy(x881, x209, 64 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x884 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x210 = x6+7443904;
CUDA_CALL(hipMemcpy(x884, x210, 1024 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x887 = (float*)myGpuMalloc(512 * sizeof(float));
float* x211 = x6+467904;
CUDA_CALL(hipMemcpy(x887, x211, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x890 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x212 = x6+2963904;
CUDA_CALL(hipMemcpy(x890, x212, 1024 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x893 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x213 = x6+11453888;
CUDA_CALL(hipMemcpy(x893, x213, 1048576 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x896 = (float*)myGpuMalloc(512 * sizeof(float));
float* x214 = x6+20134336;
CUDA_CALL(hipMemcpy(x896, x214, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x899 = (float*)myGpuMalloc(2097152 * sizeof(float));
float* x215 = x6+12510656;
CUDA_CALL(hipMemcpy(x899, x215, 2097152 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x902 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x216 = x6+14616000;
CUDA_CALL(hipMemcpy(x902, x216, 1048576 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x905 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x217 = x6+2434496;
CUDA_CALL(hipMemcpy(x905, x217, 1024 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x908 = (float*)myGpuMalloc(128 * sizeof(float));
float* x218 = x6+1097920;
CUDA_CALL(hipMemcpy(x908, x218, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x911 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x219 = x6+4085184;
CUDA_CALL(hipMemcpy(x911, x219, 262144 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x914 = (float*)myGpuMalloc(256 * sizeof(float));
float* x220 = x6+3227328;
CUDA_CALL(hipMemcpy(x914, x220, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x917 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x221 = x6+2961856;
CUDA_CALL(hipMemcpy(x917, x221, 1024 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x920 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x222 = x6+7179712;
CUDA_CALL(hipMemcpy(x920, x222, 262144 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x923 = (float*)myGpuMalloc(128 * sizeof(float));
float* x223 = x6+668096;
CUDA_CALL(hipMemcpy(x923, x223, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x926 = (float*)myGpuMalloc(512 * sizeof(float));
float* x224 = x6+1165248;
CUDA_CALL(hipMemcpy(x926, x224, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x929 = (float*)myGpuMalloc(512 * sizeof(float));
float* x225 = x6+9091008;
CUDA_CALL(hipMemcpy(x929, x225, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x932 = (float*)myGpuMalloc(128 * sizeof(float));
float* x226 = x6+816448;
CUDA_CALL(hipMemcpy(x932, x226, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x935 = (float*)myGpuMalloc(589824 * sizeof(float));
float* x227 = x6+7709120;
CUDA_CALL(hipMemcpy(x935, x227, 589824 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x938 = (float*)myGpuMalloc(20480 * sizeof(float));
float* x228 = x6+23553472;
CUDA_CALL(hipMemcpy(x938, x228, 20480 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x941 = (float*)myGpuMalloc(256 * sizeof(float));
float* x229 = x6+4938176;
CUDA_CALL(hipMemcpy(x941, x229, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x944 = (float*)myGpuMalloc(256 * sizeof(float));
float* x230 = x6+2169792;
CUDA_CALL(hipMemcpy(x944, x230, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x947 = (float*)myGpuMalloc(256 * sizeof(float));
float* x231 = x6+6059200;
CUDA_CALL(hipMemcpy(x947, x231, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x950 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x232 = x6+6323648;
CUDA_CALL(hipMemcpy(x950, x232, 1024 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x953 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x233 = x6+4082112;
CUDA_CALL(hipMemcpy(x953, x233, 1024 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x956 = (float*)myGpuMalloc(4096 * sizeof(float));
float* x234 = x6+1984;
CUDA_CALL(hipMemcpy(x956, x234, 4096 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x959 = (float*)myGpuMalloc(512 * sizeof(float));
float* x235 = x6+1446848;
CUDA_CALL(hipMemcpy(x959, x235, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x962 = (float*)myGpuMalloc(147456 * sizeof(float));
float* x236 = x6+668608;
CUDA_CALL(hipMemcpy(x962, x236, 147456 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x965 = (float*)myGpuMalloc(128 * sizeof(float));
float* x237 = x6+1231552;
CUDA_CALL(hipMemcpy(x965, x237, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x968 = (float*)myGpuMalloc(256 * sizeof(float));
float* x238 = x6+3818688;
CUDA_CALL(hipMemcpy(x968, x238, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x971 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x239 = x6+6321600;
CUDA_CALL(hipMemcpy(x971, x239, 1024 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x974 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x240 = x6+12502464;
CUDA_CALL(hipMemcpy(x974, x240, 2048 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x977 = (float*)myGpuMalloc(256 * sizeof(float));
float* x241 = x6+8299712;
CUDA_CALL(hipMemcpy(x977, x241, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x980 = (float*)myGpuMalloc(256 * sizeof(float));
float* x242 = x6+5467840;
CUDA_CALL(hipMemcpy(x980, x242, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x983 = (float*)myGpuMalloc(128 * sizeof(float));
float* x243 = x6+1231424;
CUDA_CALL(hipMemcpy(x983, x243, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x986 = (float*)myGpuMalloc(256 * sizeof(float));
float* x244 = x6+78016;
CUDA_CALL(hipMemcpy(x986, x244, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x989 = (float*)myGpuMalloc(64 * sizeof(float));
float* x245 = x6+131968;
CUDA_CALL(hipMemcpy(x989, x245, 64 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x992 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x246 = x6+19082688;
CUDA_CALL(hipMemcpy(x992, x246, 2048 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x995 = (float*)myGpuMalloc(512 * sizeof(float));
float* x247 = x6+882624;
CUDA_CALL(hipMemcpy(x995, x247, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x998 = (float*)myGpuMalloc(256 * sizeof(float));
float* x248 = x6+219840;
CUDA_CALL(hipMemcpy(x998, x248, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1001 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x249 = x6+8562112;
CUDA_CALL(hipMemcpy(x1001, x249, 1024 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1004 = (float*)myGpuMalloc(589824 * sizeof(float));
float* x250 = x6+5468608;
CUDA_CALL(hipMemcpy(x1004, x250, 589824 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1007 = (float*)myGpuMalloc(256 * sizeof(float));
float* x251 = x6+7179200;
CUDA_CALL(hipMemcpy(x1007, x251, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1010 = (float*)myGpuMalloc(64 * sizeof(float));
float* x252 = x6+1792;
CUDA_CALL(hipMemcpy(x1010, x252, 64 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1013 = (float*)myGpuMalloc(128 * sizeof(float));
float* x253 = x6+401344;
CUDA_CALL(hipMemcpy(x1013, x253, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1016 = (float*)myGpuMalloc(256 * sizeof(float));
float* x254 = x6+7708352;
CUDA_CALL(hipMemcpy(x1016, x254, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1019 = (float*)myGpuMalloc(256 * sizeof(float));
float* x255 = x6+6588352;
CUDA_CALL(hipMemcpy(x1019, x255, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1022 = (float*)myGpuMalloc(512 * sizeof(float));
float* x256 = x6+20134848;
CUDA_CALL(hipMemcpy(x1022, x256, 512 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1025 = (float*)myGpuMalloc(65536 * sizeof(float));
float* x257 = x6+602560;
CUDA_CALL(hipMemcpy(x1025, x257, 65536 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1028 = (float*)myGpuMalloc(64 * sizeof(float));
float* x258 = x6+165952;
CUDA_CALL(hipMemcpy(x1028, x258, 64 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1031 = (float*)myGpuMalloc(131072 * sizeof(float));
float* x259 = x6+469440;
CUDA_CALL(hipMemcpy(x1031, x259, 131072 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1034 = (float*)myGpuMalloc(256 * sizeof(float));
float* x260 = x6+3227584;
CUDA_CALL(hipMemcpy(x1034, x260, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1037 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x261 = x6+23549376;
CUDA_CALL(hipMemcpy(x1037, x261, 2048 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1040 = (float*)myGpuMalloc(128 * sizeof(float));
float* x262 = x6+1231680;
CUDA_CALL(hipMemcpy(x1040, x262, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1043 = (float*)myGpuMalloc(589824 * sizeof(float));
float* x263 = x6+6588864;
CUDA_CALL(hipMemcpy(x1043, x263, 589824 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1046 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x264 = x6+5201344;
CUDA_CALL(hipMemcpy(x1046, x264, 1024 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1049 = (float*)myGpuMalloc(256 * sizeof(float));
float* x265 = x6+77760;
CUDA_CALL(hipMemcpy(x1049, x265, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1052 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x266 = x6+19084736;
CUDA_CALL(hipMemcpy(x1052, x266, 1048576 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1055 = (float*)myGpuMalloc(128 * sizeof(float));
float* x267 = x6+1098048;
CUDA_CALL(hipMemcpy(x1055, x267, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1058 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x268 = x6+2435520;
CUDA_CALL(hipMemcpy(x1058, x268, 1024 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1061 = (float*)myGpuMalloc(128 * sizeof(float));
float* x269 = x6+1379520;
CUDA_CALL(hipMemcpy(x1061, x269, 128 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1064 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x270 = x6+2170304;
CUDA_CALL(hipMemcpy(x1064, x270, 262144 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1067 = (float*)myGpuMalloc(256 * sizeof(float));
float* x271 = x6+1578432;
CUDA_CALL(hipMemcpy(x1067, x271, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1070 = (float*)myGpuMalloc(256 * sizeof(float));
float* x272 = x6+3817920;
CUDA_CALL(hipMemcpy(x1070, x272, 256 * sizeof(float), hipMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1073 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x273 = x6+7444928;
CUDA_CALL(hipMemcpy(x1073, x273, 1024 * sizeof(float), hipMemcpyHostToDevice));
int32_t x1075 = open("../../cifar10_data/cifar-10-batches-bin/data_batch_1.bin",0);
int32_t x1076 = fsize(x1075);
int64_t x1078 = (int64_t)x1076;
int64_t x1079 = x1078 / 3073LL;
int32_t x1080 = (int32_t)x1079;
int32_t x1081 = x1080 * 3072;
float* x1082 = (float*)myMalloc(x1081 * sizeof(float));;
int* x1083 = (int32_t*)myMalloc(x1080 * sizeof(int32_t));;
char* x1077 = (char*)mmap(0, x1076, PROT_READ | PROT_WRITE, MAP_FILE | MAP_PRIVATE, x1075, 0);
for(int x1085=0; x1085 < x1080; x1085++) {
int32_t x1086 = x1085 * 3073;
char x1087 = x1077[x1086];
int32_t x1088 = (int32_t)(unsigned char)x1087;
x1083[x1085] = x1088;
int32_t x1094 = x1086 + 1;
int32_t x1092 = x1085 * 3072;
for(int x1091=0; x1091 < 3072; x1091++) {
int32_t x1095 = x1094 + x1091;
char x1096 = x1077[x1095];
int32_t x1093 = x1092 + x1091;
float x1097 = (float)(unsigned char)x1096;
float x1098 = x1097 / 255.0f;
x1082[x1093] = x1098;
}
}
int32_t x1104 = x1080 / 64;
for(int x1106=0; x1106 < x1104; x1106++) {
int32_t x1107 = x1106 * 64;
int32_t x1108 = x1107 * 3072;
float* x1109 = x1082+x1108;
int* x1110 = x1083+x1107;
printf("input (size 64 x 3 x 32 x 32)\n");
float x1112 = 0.0f;
for(int x1114=0; x1114 < 196608; x1114++) {
float x1115 = x1112;
float x1116 = x1109[x1114];
float x1117 = fabs(x1116);
float x1118 = fabs(x1115);
bool x1119 = x1117 > x1118;
float x1120;
if (x1119) {
x1120 = x1116;
} else {
x1120 = x1115;
}
x1112 = x1120;
}
float x1124 = x1112;
printf("Max Abs: %.5f || ",x1124);
for(int x1127=0; x1127 < 10; x1127++) {
float x1128 = x1109[x1127];
printf("%.5f ",x1128);
}
printf("\n");
// Tensor 'toGPU' invocation.
float* x1134 = (float*)myGpuMalloc(196608 * sizeof(float));
CUDA_CALL(hipMemcpy(x1134, x1109, 196608 * sizeof(float), hipMemcpyHostToDevice));
float* x1136 = (float*)myGpuMalloc(4194304 * sizeof(float));
float* x1137 = (float*)myMalloc(1 * sizeof(float));;
x1137[0] = 0.0f;
float* x1139 = (float*)myMalloc(1 * sizeof(float));;
x1139[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 3, 32, 32));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 3, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 32, 32));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1139, in_desc, x1134, filt_desc, x713,
conv_desc, algo, ws_data, ws_size,
x1137, out_desc, x1136));
};
float* x1142 = (float*)myGpuMalloc(4194304 * sizeof(float));
float* x1143 = (float*)myMalloc(1 * sizeof(float));;
x1143[0] = 0.0f;
float* x1145 = (float*)myMalloc(1 * sizeof(float));;
x1145[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 32, 32));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 32, 32));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 64, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1145, x1145, in_desc, x1136, out_desc, x1142, sbmv_desc, x875,
x1010, x377, x587, 1.0E-5));
};
float* x1148 = (float*)myMalloc(1 * sizeof(float));;
x1148[0] = 0.0f;
float* x1150 = (float*)myMalloc(1 * sizeof(float));;
x1150[0] = 1.0f;
float* x1152 = (float*)myGpuMalloc(4194304 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 32, 32));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1150, x_desc, x1142, x1148, x_desc, x1152));
};
float* x1154 = (float*)myMalloc(1 * sizeof(float));;
x1154[0] = 0.0f;
float* x1156 = (float*)myMalloc(1 * sizeof(float));;
x1156[0] = 1.0f;
float* x1158 = (float*)myGpuMalloc(1048576 * sizeof(float));
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 32, 32));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnPoolingDescriptor_t poolingDesc;
CUDNN_CALL(cudnnCreatePoolingDescriptor(&poolingDesc));
CUDNN_CALL(cudnnSetPooling2dDescriptor(
poolingDesc, CUDNN_POOLING_MAX, CUDNN_NOT_PROPAGATE_NAN,
2, 2, 0,
0, 2, 2
));
CUDNN_CALL(cudnnPoolingForward(
cudnnHandle,
poolingDesc,
x1156, in_desc, x1152, x1154, out_desc, x1158));
};
float* x1160 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1161 = (float*)myMalloc(1 * sizeof(float));;
x1161[0] = 0.0f;
float* x1163 = (float*)myMalloc(1 * sizeof(float));;
x1163[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 64, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1163, in_desc, x1158, filt_desc, x956,
conv_desc, algo, ws_data, ws_size,
x1161, out_desc, x1160));
};
float* x1166 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1167 = (float*)myMalloc(1 * sizeof(float));;
x1167[0] = 0.0f;
float* x1169 = (float*)myMalloc(1 * sizeof(float));;
x1169[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 64, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1169, x1169, in_desc, x1160, out_desc, x1166, sbmv_desc, x335,
x416, x599, x410, 1.0E-5));
};
float* x1172 = (float*)myMalloc(1 * sizeof(float));;
x1172[0] = 0.0f;
float* x1174 = (float*)myMalloc(1 * sizeof(float));;
x1174[0] = 1.0f;
float* x1176 = (float*)myGpuMalloc(1048576 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1174, x_desc, x1166, x1172, x_desc, x1176));
};
float* x1178 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1179 = (float*)myMalloc(1 * sizeof(float));;
x1179[0] = 0.0f;
float* x1181 = (float*)myMalloc(1 * sizeof(float));;
x1181[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 64, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1181, in_desc, x1176, filt_desc, x527,
conv_desc, algo, ws_data, ws_size,
x1179, out_desc, x1178));
};
float* x1184 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1185 = (float*)myMalloc(1 * sizeof(float));;
x1185[0] = 0.0f;
float* x1187 = (float*)myMalloc(1 * sizeof(float));;
x1187[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 64, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1187, x1187, in_desc, x1178, out_desc, x1184, sbmv_desc, x749,
x404, x572, x731, 1.0E-5));
};
float* x1190 = (float*)myMalloc(1 * sizeof(float));;
x1190[0] = 0.0f;
float* x1192 = (float*)myMalloc(1 * sizeof(float));;
x1192[0] = 1.0f;
float* x1194 = (float*)myGpuMalloc(1048576 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1192, x_desc, x1184, x1190, x_desc, x1194));
};
float* x1196 = (float*)myGpuMalloc(4194304 * sizeof(float));
float* x1197 = (float*)myMalloc(1 * sizeof(float));;
x1197[0] = 0.0f;
float* x1199 = (float*)myMalloc(1 * sizeof(float));;
x1199[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 64, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1199, in_desc, x1194, filt_desc, x353,
conv_desc, algo, ws_data, ws_size,
x1197, out_desc, x1196));
};
float* x1202 = (float*)myGpuMalloc(4194304 * sizeof(float));
float* x1203 = (float*)myMalloc(1 * sizeof(float));;
x1203[0] = 0.0f;
float* x1205 = (float*)myMalloc(1 * sizeof(float));;
x1205[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1205, x1205, in_desc, x1196, out_desc, x1202, sbmv_desc, x854,
x635, x470, x365, 1.0E-5));
};
float* x1208 = (float*)myGpuMalloc(4194304 * sizeof(float));
float* x1209 = (float*)myMalloc(1 * sizeof(float));;
x1209[0] = 0.0f;
float* x1211 = (float*)myMalloc(1 * sizeof(float));;
x1211[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 64, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1211, in_desc, x1158, filt_desc, x743,
conv_desc, algo, ws_data, ws_size,
x1209, out_desc, x1208));
};
float* x1214 = (float*)myGpuMalloc(4194304 * sizeof(float));
float* x1215 = (float*)myMalloc(1 * sizeof(float));;
x1215[0] = 0.0f;
float* x1217 = (float*)myMalloc(1 * sizeof(float));;
x1217[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1217, x1217, in_desc, x1208, out_desc, x1214, sbmv_desc, x485,
x866, x1049, x986, 1.0E-5));
};
float* x1220 = (float*)myMalloc(1 * sizeof(float));;
x1220[0] = 1.0f;
float* x1222 = (float*)myMalloc(1 * sizeof(float));;
x1222[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1220, bias_desc, x1214, x1222, out_desc, x1202));
};
float* x1225 = (float*)myMalloc(1 * sizeof(float));;
x1225[0] = 0.0f;
float* x1227 = (float*)myMalloc(1 * sizeof(float));;
x1227[0] = 1.0f;
float* x1229 = (float*)myGpuMalloc(4194304 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1227, x_desc, x1202, x1225, x_desc, x1229));
};
float* x1231 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1232 = (float*)myMalloc(1 * sizeof(float));;
x1232[0] = 0.0f;
float* x1234 = (float*)myMalloc(1 * sizeof(float));;
x1234[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1234, in_desc, x1229, filt_desc, x770,
conv_desc, algo, ws_data, ws_size,
x1232, out_desc, x1231));
};
float* x1237 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1238 = (float*)myMalloc(1 * sizeof(float));;
x1238[0] = 0.0f;
float* x1240 = (float*)myMalloc(1 * sizeof(float));;
x1240[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 64, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1240, x1240, in_desc, x1231, out_desc, x1237, sbmv_desc, x683,
x437, x287, x563, 1.0E-5));
};
float* x1243 = (float*)myMalloc(1 * sizeof(float));;
x1243[0] = 0.0f;
float* x1245 = (float*)myMalloc(1 * sizeof(float));;
x1245[0] = 1.0f;
float* x1247 = (float*)myGpuMalloc(1048576 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1245, x_desc, x1237, x1243, x_desc, x1247));
};
float* x1249 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1250 = (float*)myMalloc(1 * sizeof(float));;
x1250[0] = 0.0f;
float* x1252 = (float*)myMalloc(1 * sizeof(float));;
x1252[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 64, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1252, in_desc, x1247, filt_desc, x506,
conv_desc, algo, ws_data, ws_size,
x1250, out_desc, x1249));
};
float* x1255 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1256 = (float*)myMalloc(1 * sizeof(float));;
x1256[0] = 0.0f;
float* x1258 = (float*)myMalloc(1 * sizeof(float));;
x1258[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 64, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1258, x1258, in_desc, x1249, out_desc, x1255, sbmv_desc, x881,
x716, x389, x989, 1.0E-5));
};
float* x1261 = (float*)myMalloc(1 * sizeof(float));;
x1261[0] = 0.0f;
float* x1263 = (float*)myMalloc(1 * sizeof(float));;
x1263[0] = 1.0f;
float* x1265 = (float*)myGpuMalloc(1048576 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1263, x_desc, x1255, x1261, x_desc, x1265));
};
float* x1267 = (float*)myGpuMalloc(4194304 * sizeof(float));
float* x1268 = (float*)myMalloc(1 * sizeof(float));;
x1268[0] = 0.0f;
float* x1270 = (float*)myMalloc(1 * sizeof(float));;
x1270[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 64, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1270, in_desc, x1265, filt_desc, x647,
conv_desc, algo, ws_data, ws_size,
x1268, out_desc, x1267));
};
float* x1273 = (float*)myGpuMalloc(4194304 * sizeof(float));
float* x1274 = (float*)myMalloc(1 * sizeof(float));;
x1274[0] = 0.0f;
float* x1276 = (float*)myMalloc(1 * sizeof(float));;
x1276[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1276, x1276, in_desc, x1267, out_desc, x1273, sbmv_desc, x431,
x278, x530, x755, 1.0E-5));
};
float* x1279 = (float*)myMalloc(1 * sizeof(float));;
x1279[0] = 1.0f;
float* x1281 = (float*)myMalloc(1 * sizeof(float));;
x1281[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1279, bias_desc, x1229, x1281, out_desc, x1273));
};
float* x1284 = (float*)myMalloc(1 * sizeof(float));;
x1284[0] = 0.0f;
float* x1286 = (float*)myMalloc(1 * sizeof(float));;
x1286[0] = 1.0f;
float* x1288 = (float*)myGpuMalloc(4194304 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1286, x_desc, x1273, x1284, x_desc, x1288));
};
float* x1290 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1291 = (float*)myMalloc(1 * sizeof(float));;
x1291[0] = 0.0f;
float* x1293 = (float*)myMalloc(1 * sizeof(float));;
x1293[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1293, in_desc, x1288, filt_desc, x707,
conv_desc, algo, ws_data, ws_size,
x1291, out_desc, x1290));
};
float* x1296 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1297 = (float*)myMalloc(1 * sizeof(float));;
x1297[0] = 0.0f;
float* x1299 = (float*)myMalloc(1 * sizeof(float));;
x1299[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 64, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1299, x1299, in_desc, x1290, out_desc, x1296, sbmv_desc, x500,
x329, x1028, x818, 1.0E-5));
};
float* x1302 = (float*)myMalloc(1 * sizeof(float));;
x1302[0] = 0.0f;
float* x1304 = (float*)myMalloc(1 * sizeof(float));;
x1304[0] = 1.0f;
float* x1306 = (float*)myGpuMalloc(1048576 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1304, x_desc, x1296, x1302, x_desc, x1306));
};
float* x1308 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1309 = (float*)myMalloc(1 * sizeof(float));;
x1309[0] = 0.0f;
float* x1311 = (float*)myMalloc(1 * sizeof(float));;
x1311[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 64, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1311, in_desc, x1306, filt_desc, x476,
conv_desc, algo, ws_data, ws_size,
x1309, out_desc, x1308));
};
float* x1314 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1315 = (float*)myMalloc(1 * sizeof(float));;
x1315[0] = 0.0f;
float* x1317 = (float*)myMalloc(1 * sizeof(float));;
x1317[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 64, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1317, x1317, in_desc, x1308, out_desc, x1314, sbmv_desc, x473,
x662, x794, x611, 1.0E-5));
};
float* x1320 = (float*)myMalloc(1 * sizeof(float));;
x1320[0] = 0.0f;
float* x1322 = (float*)myMalloc(1 * sizeof(float));;
x1322[0] = 1.0f;
float* x1324 = (float*)myGpuMalloc(1048576 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1322, x_desc, x1314, x1320, x_desc, x1324));
};
float* x1326 = (float*)myGpuMalloc(4194304 * sizeof(float));
float* x1327 = (float*)myMalloc(1 * sizeof(float));;
x1327[0] = 0.0f;
float* x1329 = (float*)myMalloc(1 * sizeof(float));;
x1329[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 64, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1329, in_desc, x1324, filt_desc, x518,
conv_desc, algo, ws_data, ws_size,
x1327, out_desc, x1326));
};
float* x1332 = (float*)myGpuMalloc(4194304 * sizeof(float));
float* x1333 = (float*)myMalloc(1 * sizeof(float));;
x1333[0] = 0.0f;
float* x1335 = (float*)myMalloc(1 * sizeof(float));;
x1335[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1335, x1335, in_desc, x1326, out_desc, x1332, sbmv_desc, x368,
x998, x809, x656, 1.0E-5));
};
float* x1338 = (float*)myMalloc(1 * sizeof(float));;
x1338[0] = 1.0f;
float* x1340 = (float*)myMalloc(1 * sizeof(float));;
x1340[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1338, bias_desc, x1288, x1340, out_desc, x1332));
};
float* x1343 = (float*)myMalloc(1 * sizeof(float));;
x1343[0] = 0.0f;
float* x1345 = (float*)myMalloc(1 * sizeof(float));;
x1345[0] = 1.0f;
float* x1347 = (float*)myGpuMalloc(4194304 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1345, x_desc, x1332, x1343, x_desc, x1347));
};
float* x1349 = (float*)myGpuMalloc(2097152 * sizeof(float));
float* x1350 = (float*)myMalloc(1 * sizeof(float));;
x1350[0] = 0.0f;
float* x1352 = (float*)myMalloc(1 * sizeof(float));;
x1352[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 16, 16));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1352, in_desc, x1347, filt_desc, x290,
conv_desc, algo, ws_data, ws_size,
x1350, out_desc, x1349));
};
float* x1355 = (float*)myGpuMalloc(2097152 * sizeof(float));
float* x1356 = (float*)myMalloc(1 * sizeof(float));;
x1356[0] = 0.0f;
float* x1358 = (float*)myMalloc(1 * sizeof(float));;
x1358[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 16, 16));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 16, 16));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1358, x1358, in_desc, x1349, out_desc, x1355, sbmv_desc, x509,
x773, x869, x659, 1.0E-5));
};
float* x1361 = (float*)myMalloc(1 * sizeof(float));;
x1361[0] = 0.0f;
float* x1363 = (float*)myMalloc(1 * sizeof(float));;
x1363[0] = 1.0f;
float* x1365 = (float*)myGpuMalloc(2097152 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 16, 16));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1363, x_desc, x1355, x1361, x_desc, x1365));
};
float* x1367 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1368 = (float*)myMalloc(1 * sizeof(float));;
x1368[0] = 0.0f;
float* x1370 = (float*)myMalloc(1 * sizeof(float));;
x1370[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 16, 16));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 128, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1370, in_desc, x1365, filt_desc, x338,
conv_desc, algo, ws_data, ws_size,
x1368, out_desc, x1367));
};
float* x1373 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1374 = (float*)myMalloc(1 * sizeof(float));;
x1374[0] = 0.0f;
float* x1376 = (float*)myMalloc(1 * sizeof(float));;
x1376[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1376, x1376, in_desc, x1367, out_desc, x1373, sbmv_desc, x1013,
x827, x641, x386, 1.0E-5));
};
float* x1379 = (float*)myMalloc(1 * sizeof(float));;
x1379[0] = 0.0f;
float* x1381 = (float*)myMalloc(1 * sizeof(float));;
x1381[0] = 1.0f;
float* x1383 = (float*)myGpuMalloc(524288 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1381, x_desc, x1373, x1379, x_desc, x1383));
};
float* x1385 = (float*)myGpuMalloc(2097152 * sizeof(float));
float* x1386 = (float*)myMalloc(1 * sizeof(float));;
x1386[0] = 0.0f;
float* x1388 = (float*)myMalloc(1 * sizeof(float));;
x1388[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 128, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1388, in_desc, x1383, filt_desc, x575,
conv_desc, algo, ws_data, ws_size,
x1386, out_desc, x1385));
};
float* x1391 = (float*)myGpuMalloc(2097152 * sizeof(float));
float* x1392 = (float*)myMalloc(1 * sizeof(float));;
x1392[0] = 0.0f;
float* x1394 = (float*)myMalloc(1 * sizeof(float));;
x1394[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1394, x1394, in_desc, x1385, out_desc, x1391, sbmv_desc, x692,
x887, x704, x560, 1.0E-5));
};
float* x1397 = (float*)myGpuMalloc(2097152 * sizeof(float));
float* x1398 = (float*)myMalloc(1 * sizeof(float));;
x1398[0] = 0.0f;
float* x1400 = (float*)myMalloc(1 * sizeof(float));;
x1400[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1400, in_desc, x1347, filt_desc, x1031,
conv_desc, algo, ws_data, ws_size,
x1398, out_desc, x1397));
};
float* x1403 = (float*)myGpuMalloc(2097152 * sizeof(float));
float* x1404 = (float*)myMalloc(1 * sizeof(float));;
x1404[0] = 0.0f;
float* x1406 = (float*)myMalloc(1 * sizeof(float));;
x1406[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1406, x1406, in_desc, x1397, out_desc, x1403, sbmv_desc, x878,
x614, x383, x326, 1.0E-5));
};
float* x1409 = (float*)myMalloc(1 * sizeof(float));;
x1409[0] = 1.0f;
float* x1411 = (float*)myMalloc(1 * sizeof(float));;
x1411[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1409, bias_desc, x1403, x1411, out_desc, x1391));
};
float* x1414 = (float*)myMalloc(1 * sizeof(float));;
x1414[0] = 0.0f;
float* x1416 = (float*)myMalloc(1 * sizeof(float));;
x1416[0] = 1.0f;
float* x1418 = (float*)myGpuMalloc(2097152 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1416, x_desc, x1391, x1414, x_desc, x1418));
};
float* x1420 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1421 = (float*)myMalloc(1 * sizeof(float));;
x1421[0] = 0.0f;
float* x1423 = (float*)myMalloc(1 * sizeof(float));;
x1423[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 512, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1423, in_desc, x1418, filt_desc, x1025,
conv_desc, algo, ws_data, ws_size,
x1421, out_desc, x1420));
};
float* x1426 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1427 = (float*)myMalloc(1 * sizeof(float));;
x1427[0] = 0.0f;
float* x1429 = (float*)myMalloc(1 * sizeof(float));;
x1429[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1429, x1429, in_desc, x1420, out_desc, x1426, sbmv_desc, x923,
x308, x557, x788, 1.0E-5));
};
float* x1432 = (float*)myMalloc(1 * sizeof(float));;
x1432[0] = 0.0f;
float* x1434 = (float*)myMalloc(1 * sizeof(float));;
x1434[0] = 1.0f;
float* x1436 = (float*)myGpuMalloc(524288 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1434, x_desc, x1426, x1432, x_desc, x1436));
};
float* x1438 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1439 = (float*)myMalloc(1 * sizeof(float));;
x1439[0] = 0.0f;
float* x1441 = (float*)myMalloc(1 * sizeof(float));;
x1441[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 128, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1441, in_desc, x1436, filt_desc, x962,
conv_desc, algo, ws_data, ws_size,
x1439, out_desc, x1438));
};
float* x1444 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1445 = (float*)myMalloc(1 * sizeof(float));;
x1445[0] = 0.0f;
float* x1447 = (float*)myMalloc(1 * sizeof(float));;
x1447[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1447, x1447, in_desc, x1438, out_desc, x1444, sbmv_desc, x281,
x542, x362, x932, 1.0E-5));
};
float* x1450 = (float*)myMalloc(1 * sizeof(float));;
x1450[0] = 0.0f;
float* x1452 = (float*)myMalloc(1 * sizeof(float));;
x1452[0] = 1.0f;
float* x1454 = (float*)myGpuMalloc(524288 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1452, x_desc, x1444, x1450, x_desc, x1454));
};
float* x1456 = (float*)myGpuMalloc(2097152 * sizeof(float));
float* x1457 = (float*)myMalloc(1 * sizeof(float));;
x1457[0] = 0.0f;
float* x1459 = (float*)myMalloc(1 * sizeof(float));;
x1459[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 128, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1459, in_desc, x1454, filt_desc, x590,
conv_desc, algo, ws_data, ws_size,
x1457, out_desc, x1456));
};
float* x1462 = (float*)myGpuMalloc(2097152 * sizeof(float));
float* x1463 = (float*)myMalloc(1 * sizeof(float));;
x1463[0] = 0.0f;
float* x1465 = (float*)myMalloc(1 * sizeof(float));;
x1465[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1465, x1465, in_desc, x1456, out_desc, x1462, sbmv_desc, x413,
x995, x698, x521, 1.0E-5));
};
float* x1468 = (float*)myMalloc(1 * sizeof(float));;
x1468[0] = 1.0f;
float* x1470 = (float*)myMalloc(1 * sizeof(float));;
x1470[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1468, bias_desc, x1418, x1470, out_desc, x1462));
};
float* x1473 = (float*)myMalloc(1 * sizeof(float));;
x1473[0] = 0.0f;
float* x1475 = (float*)myMalloc(1 * sizeof(float));;
x1475[0] = 1.0f;
float* x1477 = (float*)myGpuMalloc(2097152 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1475, x_desc, x1462, x1473, x_desc, x1477));
};
float* x1479 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1480 = (float*)myMalloc(1 * sizeof(float));;
x1480[0] = 0.0f;
float* x1482 = (float*)myMalloc(1 * sizeof(float));;
x1482[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 512, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1482, in_desc, x1477, filt_desc, x845,
conv_desc, algo, ws_data, ws_size,
x1480, out_desc, x1479));
};
float* x1485 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1486 = (float*)myMalloc(1 * sizeof(float));;
x1486[0] = 0.0f;
float* x1488 = (float*)myMalloc(1 * sizeof(float));;
x1488[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1488, x1488, in_desc, x1479, out_desc, x1485, sbmv_desc, x392,
x767, x593, x284, 1.0E-5));
};
float* x1491 = (float*)myMalloc(1 * sizeof(float));;
x1491[0] = 0.0f;
float* x1493 = (float*)myMalloc(1 * sizeof(float));;
x1493[0] = 1.0f;
float* x1495 = (float*)myGpuMalloc(524288 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1493, x_desc, x1485, x1491, x_desc, x1495));
};
float* x1497 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1498 = (float*)myMalloc(1 * sizeof(float));;
x1498[0] = 0.0f;
float* x1500 = (float*)myMalloc(1 * sizeof(float));;
x1500[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 128, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1500, in_desc, x1495, filt_desc, x830,
conv_desc, algo, ws_data, ws_size,
x1498, out_desc, x1497));
};
float* x1503 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1504 = (float*)myMalloc(1 * sizeof(float));;
x1504[0] = 0.0f;
float* x1506 = (float*)myMalloc(1 * sizeof(float));;
x1506[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1506, x1506, in_desc, x1497, out_desc, x1503, sbmv_desc, x638,
x440, x908, x1055, 1.0E-5));
};
float* x1509 = (float*)myMalloc(1 * sizeof(float));;
x1509[0] = 0.0f;
float* x1511 = (float*)myMalloc(1 * sizeof(float));;
x1511[0] = 1.0f;
float* x1513 = (float*)myGpuMalloc(524288 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1511, x_desc, x1503, x1509, x_desc, x1513));
};
float* x1515 = (float*)myGpuMalloc(2097152 * sizeof(float));
float* x1516 = (float*)myMalloc(1 * sizeof(float));;
x1516[0] = 0.0f;
float* x1518 = (float*)myMalloc(1 * sizeof(float));;
x1518[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 128, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1518, in_desc, x1513, filt_desc, x380,
conv_desc, algo, ws_data, ws_size,
x1516, out_desc, x1515));
};
float* x1521 = (float*)myGpuMalloc(2097152 * sizeof(float));
float* x1522 = (float*)myMalloc(1 * sizeof(float));;
x1522[0] = 0.0f;
float* x1524 = (float*)myMalloc(1 * sizeof(float));;
x1524[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1524, x1524, in_desc, x1515, out_desc, x1521, sbmv_desc, x758,
x503, x332, x926, 1.0E-5));
};
float* x1527 = (float*)myMalloc(1 * sizeof(float));;
x1527[0] = 1.0f;
float* x1529 = (float*)myMalloc(1 * sizeof(float));;
x1529[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1527, bias_desc, x1477, x1529, out_desc, x1521));
};
float* x1532 = (float*)myMalloc(1 * sizeof(float));;
x1532[0] = 0.0f;
float* x1534 = (float*)myMalloc(1 * sizeof(float));;
x1534[0] = 1.0f;
float* x1536 = (float*)myGpuMalloc(2097152 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1534, x_desc, x1521, x1532, x_desc, x1536));
};
float* x1538 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1539 = (float*)myMalloc(1 * sizeof(float));;
x1539[0] = 0.0f;
float* x1541 = (float*)myMalloc(1 * sizeof(float));;
x1541[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 512, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1541, in_desc, x1536, filt_desc, x653,
conv_desc, algo, ws_data, ws_size,
x1539, out_desc, x1538));
};
float* x1544 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1545 = (float*)myMalloc(1 * sizeof(float));;
x1545[0] = 0.0f;
float* x1547 = (float*)myMalloc(1 * sizeof(float));;
x1547[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1547, x1547, in_desc, x1538, out_desc, x1544, sbmv_desc, x374,
x983, x965, x1040, 1.0E-5));
};
float* x1550 = (float*)myMalloc(1 * sizeof(float));;
x1550[0] = 0.0f;
float* x1552 = (float*)myMalloc(1 * sizeof(float));;
x1552[0] = 1.0f;
float* x1554 = (float*)myGpuMalloc(524288 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1552, x_desc, x1544, x1550, x_desc, x1554));
};
float* x1556 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1557 = (float*)myMalloc(1 * sizeof(float));;
x1557[0] = 0.0f;
float* x1559 = (float*)myMalloc(1 * sizeof(float));;
x1559[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 128, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1559, in_desc, x1554, filt_desc, x752,
conv_desc, algo, ws_data, ws_size,
x1557, out_desc, x1556));
};
float* x1562 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1563 = (float*)myMalloc(1 * sizeof(float));;
x1563[0] = 0.0f;
float* x1565 = (float*)myMalloc(1 * sizeof(float));;
x1565[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1565, x1565, in_desc, x1556, out_desc, x1562, sbmv_desc, x494,
x371, x1061, x701, 1.0E-5));
};
float* x1568 = (float*)myMalloc(1 * sizeof(float));;
x1568[0] = 0.0f;
float* x1570 = (float*)myMalloc(1 * sizeof(float));;
x1570[0] = 1.0f;
float* x1572 = (float*)myGpuMalloc(524288 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1570, x_desc, x1562, x1568, x_desc, x1572));
};
float* x1574 = (float*)myGpuMalloc(2097152 * sizeof(float));
float* x1575 = (float*)myMalloc(1 * sizeof(float));;
x1575[0] = 0.0f;
float* x1577 = (float*)myMalloc(1 * sizeof(float));;
x1577[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 128, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1577, in_desc, x1572, filt_desc, x422,
conv_desc, algo, ws_data, ws_size,
x1575, out_desc, x1574));
};
float* x1580 = (float*)myGpuMalloc(2097152 * sizeof(float));
float* x1581 = (float*)myMalloc(1 * sizeof(float));;
x1581[0] = 0.0f;
float* x1583 = (float*)myMalloc(1 * sizeof(float));;
x1583[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1583, x1583, in_desc, x1574, out_desc, x1580, sbmv_desc, x725,
x419, x314, x959, 1.0E-5));
};
float* x1586 = (float*)myMalloc(1 * sizeof(float));;
x1586[0] = 1.0f;
float* x1588 = (float*)myMalloc(1 * sizeof(float));;
x1588[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1586, bias_desc, x1536, x1588, out_desc, x1580));
};
float* x1591 = (float*)myMalloc(1 * sizeof(float));;
x1591[0] = 0.0f;
float* x1593 = (float*)myMalloc(1 * sizeof(float));;
x1593[0] = 1.0f;
float* x1595 = (float*)myGpuMalloc(2097152 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1593, x_desc, x1580, x1591, x_desc, x1595));
};
float* x1597 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1598 = (float*)myMalloc(1 * sizeof(float));;
x1598[0] = 0.0f;
float* x1600 = (float*)myMalloc(1 * sizeof(float));;
x1600[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 512, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 8, 8));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1600, in_desc, x1595, filt_desc, x797,
conv_desc, algo, ws_data, ws_size,
x1598, out_desc, x1597));
};
float* x1603 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1604 = (float*)myMalloc(1 * sizeof(float));;
x1604[0] = 0.0f;
float* x1606 = (float*)myMalloc(1 * sizeof(float));;
x1606[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 8, 8));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1606, x1606, in_desc, x1597, out_desc, x1603, sbmv_desc, x1067,
x320, x650, x851, 1.0E-5));
};
float* x1609 = (float*)myMalloc(1 * sizeof(float));;
x1609[0] = 0.0f;
float* x1611 = (float*)myMalloc(1 * sizeof(float));;
x1611[0] = 1.0f;
float* x1613 = (float*)myGpuMalloc(1048576 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 8, 8));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1611, x_desc, x1603, x1609, x_desc, x1613));
};
float* x1615 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1616 = (float*)myMalloc(1 * sizeof(float));;
x1616[0] = 0.0f;
float* x1618 = (float*)myMalloc(1 * sizeof(float));;
x1618[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 8, 8));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1618, in_desc, x1613, filt_desc, x782,
conv_desc, algo, ws_data, ws_size,
x1616, out_desc, x1615));
};
float* x1621 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1622 = (float*)myMalloc(1 * sizeof(float));;
x1622[0] = 0.0f;
float* x1624 = (float*)myMalloc(1 * sizeof(float));;
x1624[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1624, x1624, in_desc, x1615, out_desc, x1621, sbmv_desc, x581,
x305, x944, x554, 1.0E-5));
};
float* x1627 = (float*)myMalloc(1 * sizeof(float));;
x1627[0] = 0.0f;
float* x1629 = (float*)myMalloc(1 * sizeof(float));;
x1629[0] = 1.0f;
float* x1631 = (float*)myGpuMalloc(262144 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1629, x_desc, x1621, x1627, x_desc, x1631));
};
float* x1633 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1634 = (float*)myMalloc(1 * sizeof(float));;
x1634[0] = 0.0f;
float* x1636 = (float*)myMalloc(1 * sizeof(float));;
x1636[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1636, in_desc, x1631, filt_desc, x1064,
conv_desc, algo, ws_data, ws_size,
x1634, out_desc, x1633));
};
float* x1639 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1640 = (float*)myMalloc(1 * sizeof(float));;
x1640[0] = 0.0f;
float* x1642 = (float*)myMalloc(1 * sizeof(float));;
x1642[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1024, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1642, x1642, in_desc, x1633, out_desc, x1639, sbmv_desc, x311,
x608, x905, x1058, 1.0E-5));
};
float* x1645 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1646 = (float*)myMalloc(1 * sizeof(float));;
x1646[0] = 0.0f;
float* x1648 = (float*)myMalloc(1 * sizeof(float));;
x1648[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 512, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1648, in_desc, x1595, filt_desc, x482,
conv_desc, algo, ws_data, ws_size,
x1646, out_desc, x1645));
};
float* x1651 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1652 = (float*)myMalloc(1 * sizeof(float));;
x1652[0] = 0.0f;
float* x1654 = (float*)myMalloc(1 * sizeof(float));;
x1654[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1024, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1654, x1654, in_desc, x1645, out_desc, x1651, sbmv_desc, x344,
x917, x515, x890, 1.0E-5));
};
float* x1657 = (float*)myMalloc(1 * sizeof(float));;
x1657[0] = 1.0f;
float* x1659 = (float*)myMalloc(1 * sizeof(float));;
x1659[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1657, bias_desc, x1651, x1659, out_desc, x1639));
};
float* x1662 = (float*)myMalloc(1 * sizeof(float));;
x1662[0] = 0.0f;
float* x1664 = (float*)myMalloc(1 * sizeof(float));;
x1664[0] = 1.0f;
float* x1666 = (float*)myGpuMalloc(1048576 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1664, x_desc, x1639, x1662, x_desc, x1666));
};
float* x1668 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1669 = (float*)myMalloc(1 * sizeof(float));;
x1669[0] = 0.0f;
float* x1671 = (float*)myMalloc(1 * sizeof(float));;
x1671[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 1024, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1671, in_desc, x1666, filt_desc, x296,
conv_desc, algo, ws_data, ws_size,
x1669, out_desc, x1668));
};
float* x1674 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1675 = (float*)myMalloc(1 * sizeof(float));;
x1675[0] = 0.0f;
float* x1677 = (float*)myMalloc(1 * sizeof(float));;
x1677[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1677, x1677, in_desc, x1668, out_desc, x1674, sbmv_desc, x347,
x914, x1034, x728, 1.0E-5));
};
float* x1680 = (float*)myMalloc(1 * sizeof(float));;
x1680[0] = 0.0f;
float* x1682 = (float*)myMalloc(1 * sizeof(float));;
x1682[0] = 1.0f;
float* x1684 = (float*)myGpuMalloc(262144 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1682, x_desc, x1674, x1680, x_desc, x1684));
};
float* x1686 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1687 = (float*)myMalloc(1 * sizeof(float));;
x1687[0] = 0.0f;
float* x1689 = (float*)myMalloc(1 * sizeof(float));;
x1689[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1689, in_desc, x1684, filt_desc, x350,
conv_desc, algo, ws_data, ws_size,
x1687, out_desc, x1686));
};
float* x1692 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1693 = (float*)myMalloc(1 * sizeof(float));;
x1693[0] = 0.0f;
float* x1695 = (float*)myMalloc(1 * sizeof(float));;
x1695[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1695, x1695, in_desc, x1686, out_desc, x1692, sbmv_desc, x1070,
x545, x857, x968, 1.0E-5));
};
float* x1698 = (float*)myMalloc(1 * sizeof(float));;
x1698[0] = 0.0f;
float* x1700 = (float*)myMalloc(1 * sizeof(float));;
x1700[0] = 1.0f;
float* x1702 = (float*)myGpuMalloc(262144 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1700, x_desc, x1692, x1698, x_desc, x1702));
};
float* x1704 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1705 = (float*)myMalloc(1 * sizeof(float));;
x1705[0] = 0.0f;
float* x1707 = (float*)myMalloc(1 * sizeof(float));;
x1707[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1707, in_desc, x1702, filt_desc, x425,
conv_desc, algo, ws_data, ws_size,
x1705, out_desc, x1704));
};
float* x1710 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1711 = (float*)myMalloc(1 * sizeof(float));;
x1711[0] = 0.0f;
float* x1713 = (float*)myMalloc(1 * sizeof(float));;
x1713[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1024, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1713, x1713, in_desc, x1704, out_desc, x1710, sbmv_desc, x317,
x953, x803, x686, 1.0E-5));
};
float* x1716 = (float*)myMalloc(1 * sizeof(float));;
x1716[0] = 1.0f;
float* x1718 = (float*)myMalloc(1 * sizeof(float));;
x1718[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1716, bias_desc, x1666, x1718, out_desc, x1710));
};
float* x1721 = (float*)myMalloc(1 * sizeof(float));;
x1721[0] = 0.0f;
float* x1723 = (float*)myMalloc(1 * sizeof(float));;
x1723[0] = 1.0f;
float* x1725 = (float*)myGpuMalloc(1048576 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1723, x_desc, x1710, x1721, x_desc, x1725));
};
float* x1727 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1728 = (float*)myMalloc(1 * sizeof(float));;
x1728[0] = 0.0f;
float* x1730 = (float*)myMalloc(1 * sizeof(float));;
x1730[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 1024, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1730, in_desc, x1725, filt_desc, x911,
conv_desc, algo, ws_data, ws_size,
x1728, out_desc, x1727));
};
float* x1733 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1734 = (float*)myMalloc(1 * sizeof(float));;
x1734[0] = 0.0f;
float* x1736 = (float*)myMalloc(1 * sizeof(float));;
x1736[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1736, x1736, in_desc, x1727, out_desc, x1733, sbmv_desc, x644,
x848, x791, x779, 1.0E-5));
};
float* x1739 = (float*)myMalloc(1 * sizeof(float));;
x1739[0] = 0.0f;
float* x1741 = (float*)myMalloc(1 * sizeof(float));;
x1741[0] = 1.0f;
float* x1743 = (float*)myGpuMalloc(262144 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1741, x_desc, x1733, x1739, x_desc, x1743));
};
float* x1745 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1746 = (float*)myMalloc(1 * sizeof(float));;
x1746[0] = 0.0f;
float* x1748 = (float*)myMalloc(1 * sizeof(float));;
x1748[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1748, in_desc, x1743, filt_desc, x299,
conv_desc, algo, ws_data, ws_size,
x1746, out_desc, x1745));
};
float* x1751 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1752 = (float*)myMalloc(1 * sizeof(float));;
x1752[0] = 0.0f;
float* x1754 = (float*)myMalloc(1 * sizeof(float));;
x1754[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1754, x1754, in_desc, x1745, out_desc, x1751, sbmv_desc, x941,
x833, x629, x446, 1.0E-5));
};
float* x1757 = (float*)myMalloc(1 * sizeof(float));;
x1757[0] = 0.0f;
float* x1759 = (float*)myMalloc(1 * sizeof(float));;
x1759[0] = 1.0f;
float* x1761 = (float*)myGpuMalloc(262144 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1759, x_desc, x1751, x1757, x_desc, x1761));
};
float* x1763 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1764 = (float*)myMalloc(1 * sizeof(float));;
x1764[0] = 0.0f;
float* x1766 = (float*)myMalloc(1 * sizeof(float));;
x1766[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1766, in_desc, x1761, filt_desc, x605,
conv_desc, algo, ws_data, ws_size,
x1764, out_desc, x1763));
};
float* x1769 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1770 = (float*)myMalloc(1 * sizeof(float));;
x1770[0] = 0.0f;
float* x1772 = (float*)myMalloc(1 * sizeof(float));;
x1772[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1024, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1772, x1772, in_desc, x1763, out_desc, x1769, sbmv_desc, x1046,
x428, x677, x821, 1.0E-5));
};
float* x1775 = (float*)myMalloc(1 * sizeof(float));;
x1775[0] = 1.0f;
float* x1777 = (float*)myMalloc(1 * sizeof(float));;
x1777[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1775, bias_desc, x1725, x1777, out_desc, x1769));
};
float* x1780 = (float*)myMalloc(1 * sizeof(float));;
x1780[0] = 0.0f;
float* x1782 = (float*)myMalloc(1 * sizeof(float));;
x1782[0] = 1.0f;
float* x1784 = (float*)myGpuMalloc(1048576 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1782, x_desc, x1769, x1780, x_desc, x1784));
};
float* x1786 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1787 = (float*)myMalloc(1 * sizeof(float));;
x1787[0] = 0.0f;
float* x1789 = (float*)myMalloc(1 * sizeof(float));;
x1789[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 1024, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1789, in_desc, x1784, filt_desc, x275,
conv_desc, algo, ws_data, ws_size,
x1787, out_desc, x1786));
};
float* x1792 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1793 = (float*)myMalloc(1 * sizeof(float));;
x1793[0] = 0.0f;
float* x1795 = (float*)myMalloc(1 * sizeof(float));;
x1795[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1795, x1795, in_desc, x1786, out_desc, x1792, sbmv_desc, x533,
x980, x746, x551, 1.0E-5));
};
float* x1798 = (float*)myMalloc(1 * sizeof(float));;
x1798[0] = 0.0f;
float* x1800 = (float*)myMalloc(1 * sizeof(float));;
x1800[0] = 1.0f;
float* x1802 = (float*)myGpuMalloc(262144 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1800, x_desc, x1792, x1798, x_desc, x1802));
};
float* x1804 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1805 = (float*)myMalloc(1 * sizeof(float));;
x1805[0] = 0.0f;
float* x1807 = (float*)myMalloc(1 * sizeof(float));;
x1807[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1807, in_desc, x1802, filt_desc, x1004,
conv_desc, algo, ws_data, ws_size,
x1805, out_desc, x1804));
};
float* x1810 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1811 = (float*)myMalloc(1 * sizeof(float));;
x1811[0] = 0.0f;
float* x1813 = (float*)myMalloc(1 * sizeof(float));;
x1813[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1813, x1813, in_desc, x1804, out_desc, x1810, sbmv_desc, x479,
x665, x815, x947, 1.0E-5));
};
float* x1816 = (float*)myMalloc(1 * sizeof(float));;
x1816[0] = 0.0f;
float* x1818 = (float*)myMalloc(1 * sizeof(float));;
x1818[0] = 1.0f;
float* x1820 = (float*)myGpuMalloc(262144 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1818, x_desc, x1810, x1816, x_desc, x1820));
};
float* x1822 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1823 = (float*)myMalloc(1 * sizeof(float));;
x1823[0] = 0.0f;
float* x1825 = (float*)myMalloc(1 * sizeof(float));;
x1825[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1825, in_desc, x1820, filt_desc, x524,
conv_desc, algo, ws_data, ws_size,
x1823, out_desc, x1822));
};
float* x1828 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1829 = (float*)myMalloc(1 * sizeof(float));;
x1829[0] = 0.0f;
float* x1831 = (float*)myMalloc(1 * sizeof(float));;
x1831[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1024, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1831, x1831, in_desc, x1822, out_desc, x1828, sbmv_desc, x971,
x695, x950, x740, 1.0E-5));
};
float* x1834 = (float*)myMalloc(1 * sizeof(float));;
x1834[0] = 1.0f;
float* x1836 = (float*)myMalloc(1 * sizeof(float));;
x1836[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1834, bias_desc, x1784, x1836, out_desc, x1828));
};
float* x1839 = (float*)myMalloc(1 * sizeof(float));;
x1839[0] = 0.0f;
float* x1841 = (float*)myMalloc(1 * sizeof(float));;
x1841[0] = 1.0f;
float* x1843 = (float*)myGpuMalloc(1048576 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1841, x_desc, x1828, x1839, x_desc, x1843));
};
float* x1845 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1846 = (float*)myMalloc(1 * sizeof(float));;
x1846[0] = 0.0f;
float* x1848 = (float*)myMalloc(1 * sizeof(float));;
x1848[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 1024, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1848, in_desc, x1843, filt_desc, x323,
conv_desc, algo, ws_data, ws_size,
x1846, out_desc, x1845));
};
float* x1851 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1852 = (float*)myMalloc(1 * sizeof(float));;
x1852[0] = 0.0f;
float* x1854 = (float*)myMalloc(1 * sizeof(float));;
x1854[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1854, x1854, in_desc, x1845, out_desc, x1851, sbmv_desc, x488,
x812, x1019, x464, 1.0E-5));
};
float* x1857 = (float*)myMalloc(1 * sizeof(float));;
x1857[0] = 0.0f;
float* x1859 = (float*)myMalloc(1 * sizeof(float));;
x1859[0] = 1.0f;
float* x1861 = (float*)myGpuMalloc(262144 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1859, x_desc, x1851, x1857, x_desc, x1861));
};
float* x1863 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1864 = (float*)myMalloc(1 * sizeof(float));;
x1864[0] = 0.0f;
float* x1866 = (float*)myMalloc(1 * sizeof(float));;
x1866[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1866, in_desc, x1861, filt_desc, x1043,
conv_desc, algo, ws_data, ws_size,
x1864, out_desc, x1863));
};
float* x1869 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1870 = (float*)myMalloc(1 * sizeof(float));;
x1870[0] = 0.0f;
float* x1872 = (float*)myMalloc(1 * sizeof(float));;
x1872[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1872, x1872, in_desc, x1863, out_desc, x1869, sbmv_desc, x761,
x584, x1007, x569, 1.0E-5));
};
float* x1875 = (float*)myMalloc(1 * sizeof(float));;
x1875[0] = 0.0f;
float* x1877 = (float*)myMalloc(1 * sizeof(float));;
x1877[0] = 1.0f;
float* x1879 = (float*)myGpuMalloc(262144 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1877, x_desc, x1869, x1875, x_desc, x1879));
};
float* x1881 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1882 = (float*)myMalloc(1 * sizeof(float));;
x1882[0] = 0.0f;
float* x1884 = (float*)myMalloc(1 * sizeof(float));;
x1884[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1884, in_desc, x1879, filt_desc, x920,
conv_desc, algo, ws_data, ws_size,
x1882, out_desc, x1881));
};
float* x1887 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1888 = (float*)myMalloc(1 * sizeof(float));;
x1888[0] = 0.0f;
float* x1890 = (float*)myMalloc(1 * sizeof(float));;
x1890[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1024, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1890, x1890, in_desc, x1881, out_desc, x1887, sbmv_desc, x434,
x617, x884, x1073, 1.0E-5));
};
float* x1893 = (float*)myMalloc(1 * sizeof(float));;
x1893[0] = 1.0f;
float* x1895 = (float*)myMalloc(1 * sizeof(float));;
x1895[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1893, bias_desc, x1843, x1895, out_desc, x1887));
};
float* x1898 = (float*)myMalloc(1 * sizeof(float));;
x1898[0] = 0.0f;
float* x1900 = (float*)myMalloc(1 * sizeof(float));;
x1900[0] = 1.0f;
float* x1902 = (float*)myGpuMalloc(1048576 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1900, x_desc, x1887, x1898, x_desc, x1902));
};
float* x1904 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1905 = (float*)myMalloc(1 * sizeof(float));;
x1905[0] = 0.0f;
float* x1907 = (float*)myMalloc(1 * sizeof(float));;
x1907[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 1024, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1907, in_desc, x1902, filt_desc, x710,
conv_desc, algo, ws_data, ws_size,
x1905, out_desc, x1904));
};
float* x1910 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1911 = (float*)myMalloc(1 * sizeof(float));;
x1911[0] = 0.0f;
float* x1913 = (float*)myMalloc(1 * sizeof(float));;
x1913[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1913, x1913, in_desc, x1904, out_desc, x1910, sbmv_desc, x512,
x1016, x497, x785, 1.0E-5));
};
float* x1916 = (float*)myMalloc(1 * sizeof(float));;
x1916[0] = 0.0f;
float* x1918 = (float*)myMalloc(1 * sizeof(float));;
x1918[0] = 1.0f;
float* x1920 = (float*)myGpuMalloc(262144 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1918, x_desc, x1910, x1916, x_desc, x1920));
};
float* x1922 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1923 = (float*)myMalloc(1 * sizeof(float));;
x1923[0] = 0.0f;
float* x1925 = (float*)myMalloc(1 * sizeof(float));;
x1925[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1925, in_desc, x1920, filt_desc, x935,
conv_desc, algo, ws_data, ws_size,
x1923, out_desc, x1922));
};
float* x1928 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1929 = (float*)myMalloc(1 * sizeof(float));;
x1929[0] = 0.0f;
float* x1931 = (float*)myMalloc(1 * sizeof(float));;
x1931[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1931, x1931, in_desc, x1922, out_desc, x1928, sbmv_desc, x680,
x824, x467, x977, 1.0E-5));
};
float* x1934 = (float*)myMalloc(1 * sizeof(float));;
x1934[0] = 0.0f;
float* x1936 = (float*)myMalloc(1 * sizeof(float));;
x1936[0] = 1.0f;
float* x1938 = (float*)myGpuMalloc(262144 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1936, x_desc, x1928, x1934, x_desc, x1938));
};
float* x1940 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1941 = (float*)myMalloc(1 * sizeof(float));;
x1941[0] = 0.0f;
float* x1943 = (float*)myMalloc(1 * sizeof(float));;
x1943[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1943, in_desc, x1938, filt_desc, x548,
conv_desc, algo, ws_data, ws_size,
x1941, out_desc, x1940));
};
float* x1946 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1947 = (float*)myMalloc(1 * sizeof(float));;
x1947[0] = 0.0f;
float* x1949 = (float*)myMalloc(1 * sizeof(float));;
x1949[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1024, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1949, x1949, in_desc, x1940, out_desc, x1946, sbmv_desc, x1001,
x536, x623, x806, 1.0E-5));
};
float* x1952 = (float*)myMalloc(1 * sizeof(float));;
x1952[0] = 1.0f;
float* x1954 = (float*)myMalloc(1 * sizeof(float));;
x1954[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1952, bias_desc, x1902, x1954, out_desc, x1946));
};
float* x1957 = (float*)myMalloc(1 * sizeof(float));;
x1957[0] = 0.0f;
float* x1959 = (float*)myMalloc(1 * sizeof(float));;
x1959[0] = 1.0f;
float* x1961 = (float*)myGpuMalloc(1048576 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1959, x_desc, x1946, x1957, x_desc, x1961));
};
float* x1963 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1964 = (float*)myMalloc(1 * sizeof(float));;
x1964[0] = 0.0f;
float* x1966 = (float*)myMalloc(1 * sizeof(float));;
x1966[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 1024, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1966, in_desc, x1961, filt_desc, x674,
conv_desc, algo, ws_data, ws_size,
x1964, out_desc, x1963));
};
float* x1969 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1970 = (float*)myMalloc(1 * sizeof(float));;
x1970[0] = 0.0f;
float* x1972 = (float*)myMalloc(1 * sizeof(float));;
x1972[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1972, x1972, in_desc, x1963, out_desc, x1969, sbmv_desc, x860,
x929, x458, x620, 1.0E-5));
};
float* x1975 = (float*)myMalloc(1 * sizeof(float));;
x1975[0] = 0.0f;
float* x1977 = (float*)myMalloc(1 * sizeof(float));;
x1977[0] = 1.0f;
float* x1979 = (float*)myGpuMalloc(524288 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1977, x_desc, x1969, x1975, x_desc, x1979));
};
float* x1981 = (float*)myGpuMalloc(131072 * sizeof(float));
float* x1982 = (float*)myMalloc(1 * sizeof(float));;
x1982[0] = 0.0f;
float* x1984 = (float*)myMalloc(1 * sizeof(float));;
x1984[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 512, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1984, in_desc, x1979, filt_desc, x359,
conv_desc, algo, ws_data, ws_size,
x1982, out_desc, x1981));
};
float* x1987 = (float*)myGpuMalloc(131072 * sizeof(float));
float* x1988 = (float*)myMalloc(1 * sizeof(float));;
x1988[0] = 0.0f;
float* x1990 = (float*)myMalloc(1 * sizeof(float));;
x1990[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1990, x1990, in_desc, x1981, out_desc, x1987, sbmv_desc, x872,
x734, x596, x407, 1.0E-5));
};
float* x1993 = (float*)myMalloc(1 * sizeof(float));;
x1993[0] = 0.0f;
float* x1995 = (float*)myMalloc(1 * sizeof(float));;
x1995[0] = 1.0f;
float* x1997 = (float*)myGpuMalloc(131072 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1995, x_desc, x1987, x1993, x_desc, x1997));
};
float* x1999 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x2000 = (float*)myMalloc(1 * sizeof(float));;
x2000[0] = 0.0f;
float* x2002 = (float*)myMalloc(1 * sizeof(float));;
x2002[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
2048, 512, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2002, in_desc, x1997, filt_desc, x893,
conv_desc, algo, ws_data, ws_size,
x2000, out_desc, x1999));
};
float* x2005 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x2006 = (float*)myMalloc(1 * sizeof(float));;
x2006[0] = 0.0f;
float* x2008 = (float*)myMalloc(1 * sizeof(float));;
x2008[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 2048, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2008, x2008, in_desc, x1999, out_desc, x2005, sbmv_desc, x974,
x443, x602, x836, 1.0E-5));
};
float* x2011 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x2012 = (float*)myMalloc(1 * sizeof(float));;
x2012[0] = 0.0f;
float* x2014 = (float*)myMalloc(1 * sizeof(float));;
x2014[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
2048, 1024, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2014, in_desc, x1961, filt_desc, x899,
conv_desc, algo, ws_data, ws_size,
x2012, out_desc, x2011));
};
float* x2017 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x2018 = (float*)myMalloc(1 * sizeof(float));;
x2018[0] = 0.0f;
float* x2020 = (float*)myMalloc(1 * sizeof(float));;
x2020[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 2048, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2020, x2020, in_desc, x2011, out_desc, x2017, sbmv_desc, x776,
x578, x449, x632, 1.0E-5));
};
float* x2023 = (float*)myMalloc(1 * sizeof(float));;
x2023[0] = 1.0f;
float* x2025 = (float*)myMalloc(1 * sizeof(float));;
x2025[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x2023, bias_desc, x2017, x2025, out_desc, x2005));
};
float* x2028 = (float*)myMalloc(1 * sizeof(float));;
x2028[0] = 0.0f;
float* x2030 = (float*)myMalloc(1 * sizeof(float));;
x2030[0] = 1.0f;
float* x2032 = (float*)myGpuMalloc(524288 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2030, x_desc, x2005, x2028, x_desc, x2032));
};
float* x2034 = (float*)myGpuMalloc(131072 * sizeof(float));
float* x2035 = (float*)myMalloc(1 * sizeof(float));;
x2035[0] = 0.0f;
float* x2037 = (float*)myMalloc(1 * sizeof(float));;
x2037[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 2048, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2037, in_desc, x2032, filt_desc, x902,
conv_desc, algo, ws_data, ws_size,
x2035, out_desc, x2034));
};
float* x2040 = (float*)myGpuMalloc(131072 * sizeof(float));
float* x2041 = (float*)myMalloc(1 * sizeof(float));;
x2041[0] = 0.0f;
float* x2043 = (float*)myMalloc(1 * sizeof(float));;
x2043[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2043, x2043, in_desc, x2034, out_desc, x2040, sbmv_desc, x395,
x668, x719, x452, 1.0E-5));
};
float* x2046 = (float*)myMalloc(1 * sizeof(float));;
x2046[0] = 0.0f;
float* x2048 = (float*)myMalloc(1 * sizeof(float));;
x2048[0] = 1.0f;
float* x2050 = (float*)myGpuMalloc(131072 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2048, x_desc, x2040, x2046, x_desc, x2050));
};
float* x2052 = (float*)myGpuMalloc(131072 * sizeof(float));
float* x2053 = (float*)myMalloc(1 * sizeof(float));;
x2053[0] = 0.0f;
float* x2055 = (float*)myMalloc(1 * sizeof(float));;
x2055[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 512, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2055, in_desc, x2050, filt_desc, x722,
conv_desc, algo, ws_data, ws_size,
x2053, out_desc, x2052));
};
float* x2058 = (float*)myGpuMalloc(131072 * sizeof(float));
float* x2059 = (float*)myMalloc(1 * sizeof(float));;
x2059[0] = 0.0f;
float* x2061 = (float*)myMalloc(1 * sizeof(float));;
x2061[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2061, x2061, in_desc, x2052, out_desc, x2058, sbmv_desc, x737,
x455, x671, x842, 1.0E-5));
};
float* x2064 = (float*)myMalloc(1 * sizeof(float));;
x2064[0] = 0.0f;
float* x2066 = (float*)myMalloc(1 * sizeof(float));;
x2066[0] = 1.0f;
float* x2068 = (float*)myGpuMalloc(131072 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2066, x_desc, x2058, x2064, x_desc, x2068));
};
float* x2070 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x2071 = (float*)myMalloc(1 * sizeof(float));;
x2071[0] = 0.0f;
float* x2073 = (float*)myMalloc(1 * sizeof(float));;
x2073[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
2048, 512, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2073, in_desc, x2068, filt_desc, x398,
conv_desc, algo, ws_data, ws_size,
x2071, out_desc, x2070));
};
float* x2076 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x2077 = (float*)myMalloc(1 * sizeof(float));;
x2077[0] = 0.0f;
float* x2079 = (float*)myMalloc(1 * sizeof(float));;
x2079[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 2048, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2079, x2079, in_desc, x2070, out_desc, x2076, sbmv_desc, x539,
x689, x461, x992, 1.0E-5));
};
float* x2082 = (float*)myMalloc(1 * sizeof(float));;
x2082[0] = 1.0f;
float* x2084 = (float*)myMalloc(1 * sizeof(float));;
x2084[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x2082, bias_desc, x2032, x2084, out_desc, x2076));
};
float* x2087 = (float*)myMalloc(1 * sizeof(float));;
x2087[0] = 0.0f;
float* x2089 = (float*)myMalloc(1 * sizeof(float));;
x2089[0] = 1.0f;
float* x2091 = (float*)myGpuMalloc(524288 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2089, x_desc, x2076, x2087, x_desc, x2091));
};
float* x2093 = (float*)myGpuMalloc(131072 * sizeof(float));
float* x2094 = (float*)myMalloc(1 * sizeof(float));;
x2094[0] = 0.0f;
float* x2096 = (float*)myMalloc(1 * sizeof(float));;
x2096[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 2048, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2096, in_desc, x2091, filt_desc, x1052,
conv_desc, algo, ws_data, ws_size,
x2094, out_desc, x2093));
};
float* x2099 = (float*)myGpuMalloc(131072 * sizeof(float));
float* x2100 = (float*)myMalloc(1 * sizeof(float));;
x2100[0] = 0.0f;
float* x2102 = (float*)myMalloc(1 * sizeof(float));;
x2102[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2102, x2102, in_desc, x2093, out_desc, x2099, sbmv_desc, x302,
x491, x896, x1022, 1.0E-5));
};
float* x2105 = (float*)myMalloc(1 * sizeof(float));;
x2105[0] = 0.0f;
float* x2107 = (float*)myMalloc(1 * sizeof(float));;
x2107[0] = 1.0f;
float* x2109 = (float*)myGpuMalloc(131072 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2107, x_desc, x2099, x2105, x_desc, x2109));
};
float* x2111 = (float*)myGpuMalloc(131072 * sizeof(float));
float* x2112 = (float*)myMalloc(1 * sizeof(float));;
x2112[0] = 0.0f;
float* x2114 = (float*)myMalloc(1 * sizeof(float));;
x2114[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 512, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2114, in_desc, x2109, filt_desc, x341,
conv_desc, algo, ws_data, ws_size,
x2112, out_desc, x2111));
};
float* x2117 = (float*)myGpuMalloc(131072 * sizeof(float));
float* x2118 = (float*)myMalloc(1 * sizeof(float));;
x2118[0] = 0.0f;
float* x2120 = (float*)myMalloc(1 * sizeof(float));;
x2120[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2120, x2120, in_desc, x2111, out_desc, x2117, sbmv_desc, x839,
x764, x293, x863, 1.0E-5));
};
float* x2123 = (float*)myMalloc(1 * sizeof(float));;
x2123[0] = 0.0f;
float* x2125 = (float*)myMalloc(1 * sizeof(float));;
x2125[0] = 1.0f;
float* x2127 = (float*)myGpuMalloc(131072 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2125, x_desc, x2117, x2123, x_desc, x2127));
};
float* x2129 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x2130 = (float*)myMalloc(1 * sizeof(float));;
x2130[0] = 0.0f;
float* x2132 = (float*)myMalloc(1 * sizeof(float));;
x2132[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
2048, 512, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2132, in_desc, x2127, filt_desc, x356,
conv_desc, algo, ws_data, ws_size,
x2130, out_desc, x2129));
};
float* x2135 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x2136 = (float*)myMalloc(1 * sizeof(float));;
x2136[0] = 0.0f;
float* x2138 = (float*)myMalloc(1 * sizeof(float));;
x2138[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 2048, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2138, x2138, in_desc, x2129, out_desc, x2135, sbmv_desc, x566,
x800, x1037, x626, 1.0E-5));
};
float* x2141 = (float*)myMalloc(1 * sizeof(float));;
x2141[0] = 1.0f;
float* x2143 = (float*)myMalloc(1 * sizeof(float));;
x2143[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x2141, bias_desc, x2091, x2143, out_desc, x2135));
};
float* x2146 = (float*)myMalloc(1 * sizeof(float));;
x2146[0] = 0.0f;
float* x2148 = (float*)myMalloc(1 * sizeof(float));;
x2148[0] = 1.0f;
float* x2150 = (float*)myGpuMalloc(524288 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2148, x_desc, x2135, x2146, x_desc, x2150));
};
float* x2152 = (float*)myMalloc(1 * sizeof(float));;
x2152[0] = 0.0f;
float* x2154 = (float*)myMalloc(1 * sizeof(float));;
x2154[0] = 1.0f;
float* x2156 = (float*)myGpuMalloc(131072 * sizeof(float));
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 1, 1));
cudnnPoolingDescriptor_t poolingDesc;
CUDNN_CALL(cudnnCreatePoolingDescriptor(&poolingDesc));
CUDNN_CALL(cudnnSetPooling2dDescriptor(
poolingDesc, CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING, CUDNN_NOT_PROPAGATE_NAN,
2, 2, 0,
0, 1, 1
));
CUDNN_CALL(cudnnPoolingForward(
cudnnHandle,
poolingDesc,
x2154, in_desc, x2150, x2152, out_desc, x2156));
};
// resize to WrappedArray(64, 2048)
// gemm: WrappedArray(64, 2048), Vector(10, 2048)
float* x2160 = (float*)myGpuMalloc(640 * sizeof(float));
float* x2161 = (float*)myMalloc(1 * sizeof(float));;
x2161[0] = 0.0f;
float* x2163 = (float*)myMalloc(1 * sizeof(float));;
x2163[0] = 1.0f;
CUBLAS_CALL(hipblasSgemm(cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N, 10,64,2048,x2163,x938,2048,x2156,2048,x2161,x2160,10));
float* x2166 = (float*)myMalloc(1 * sizeof(float));;
x2166[0] = 1.0f;
float* x2168 = (float*)myMalloc(1 * sizeof(float));;
x2168[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 10, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 10, 1, 1));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x2166, bias_desc, x401, x2168, out_desc, x2160));
};
// Tensor 'toCPU' invocation.
float* x2172 = (float*)myMalloc(640 * sizeof(float));;
CUDA_CALL(hipMemcpy(x2172, x2160, 640 * sizeof(float), hipMemcpyDeviceToHost));
printf("output (size 64 x 10)\n");
float x2175 = 0.0f;
for(int x2177=0; x2177 < 640; x2177++) {
float x2178 = x2175;
float x2179 = x2172[x2177];
float x2180 = fabs(x2179);
float x2181 = fabs(x2178);
bool x2182 = x2180 > x2181;
float x2183;
if (x2182) {
x2183 = x2179;
} else {
x2183 = x2178;
}
x2175 = x2183;
}
float x2187 = x2175;
printf("Max Abs: %.5f || ",x2187);
for(int x2189=0; x2189 < 10; x2189++) {
float x2190 = x2172[x2189];
printf("%.5f ",x2190);
}
printf("\n");
assert(false && "stop");
}
// Backend cleanup.
CUBLAS_CALL(hipblasDestroy(cublasHandle));
CUDA_CALL(hipFree(gpuMallocBase));
CUDNN_CALL(cudnnDestroy(cudnnHandle));
}
/*****************************************
End of C Generated Code
*******************************************/
| 3147817946a6fec05536427adacf5cd9a915bb02.cu | #include <assert.h>
#include <err.h>
#include <errno.h>
#include <fcntl.h>
#include <functional>
#include <math.h>
#include <memory>
#include <random>
#include <stdint.h>
#include <stdio.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <time.h>
#include <unistd.h>
#include <cblas.h>
#include <algorithm>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cudnn.h>
using namespace std;
#ifndef MAP_FILE
#define MAP_FILE MAP_SHARED
#endif
int fsize(int fd) {
struct stat stat;
int res = fstat(fd, &stat);
return stat.st_size;
}
int printll(char *s) {
while (*s != '\n' && *s != ',' && *s != '\t') {
putchar(*s++);
}
return 0;
}
long hash(char *str0, int len) {
unsigned char *str = (unsigned char *)str0;
unsigned long hash = 5381;
int c;
while ((c = *str++) && len--)
hash = ((hash << 5) + hash) + c; /* hash * 33 + c */
return hash;
}
long HEAP_SIZE_CPU = 1073741826; // 1048576; // 536870912; // 268435456; // 2097152; 1610612739; // 4294967304; //
void *mallocBase = calloc(HEAP_SIZE_CPU, 1);
void *mallocAddr = mallocBase;
void *waterMark = mallocBase;
void *myMalloc(size_t bytes) {
void *res = mallocAddr;
mallocAddr = (void *)((char *)mallocAddr + bytes);
if ((long)mallocAddr >= (long)mallocBase + HEAP_SIZE_CPU)
fprintf(stderr, "CPU memory breached limit of HEAP_SIZE_CPU\n");
return res;
}
long HEAP_SIZE = 4294967304; // this is for GPU
int timeval_subtract(struct timeval *result, struct timeval *t2, struct timeval *t1) {
long int diff = (t2->tv_usec + 1000000 * t2->tv_sec) - (t1->tv_usec + 1000000 * t1->tv_sec);
result->tv_sec = diff / 1000000;
result->tv_usec = diff % 1000000;
return (diff < 0);
}
#define CUDA_CALL(f) { \
cudaError_t err = (f); \
if (err != cudaSuccess) { \
fprintf(stderr, "CUDA error occurred: %s (%s:%d)\n", \
cudaGetErrorString(err), __FILE__, __LINE__); \
exit(err); \
} \
}
#define CUBLAS_CALL(f) { \
cublasStatus_t stat = (f); \
if (stat != CUBLAS_STATUS_SUCCESS) { \
fprintf(stderr, "cuBLAS error occurred: %d (%s:%d)\n", \
stat, __FILE__, __LINE__); \
exit(stat); \
} \
}
void *gpuMallocBase;
void *gpuMallocAddr;
// Alignment boundary size, in bytes.
constexpr int N = 4; // 16
void *myGpuMalloc(size_t bytes) {
bytes = ((bytes + (1 << N) - 1) >> N) << N;
void *res = gpuMallocAddr;
gpuMallocAddr = (void *)((char *)gpuMallocAddr + bytes);
if ((long)gpuMallocAddr >= (long)gpuMallocBase + HEAP_SIZE)
fprintf(stderr, "GPU breached memory limit of HEAP_SIZE\n");
return res;
}
template <typename T>
__global__ void arrayUpdate(T *data, int index, T value) {
data[index] = value;
}
__global__ void arrayFill(float *data, float value) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
data[tid] = value;
}
__global__ void arrayFill_greg(float* data, float value, int size) {
int stride = gridDim.x * blockDim.x;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < size; i += stride) data[i] = value;
}
__global__ void hardTanh(float* in, float* out, float min_val, float max_val, int size) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = gridDim.x * blockDim.x;
for (int i = tid; i < size; i += stride) {
out[i] = in[i] < min_val ? min_val : (in[i] > max_val ? max_val : in[i]);
}
}
__global__ void hardTanh_grad(float* in_x, float* in_d, float* out_d, float min_val, float max_val, int size, bool inplace) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = gridDim.x * blockDim.x;
for (int i = tid; i < size; i += stride) {
if (inplace) {
if (in_x[i] < min_val || in_x[i] > max_val) in_d[i] = 0;
} else {
if (in_x[i] >= min_val && in_x[i] <= max_val) in_d[i] += out_d[i];
}
}
}
__global__ void nllLoss(float *x, int x_stride, float *y, int* target) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int offset = tid * x_stride + target[tid];
y[tid] = -1 * x[offset];
}
__global__ void nllLoss_grad(int x_stride, float *yGrad, int* target, float* xGrad) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int offset = tid * x_stride + target[tid];
xGrad[offset] += -1 * yGrad[tid];
}
// only for 4D tensor in and 3D tensor out
__global__ void sum_grad(float* in, int inSize0, int inSize1, int inSize2, int inSize3, int nElement,
float* out, int outStride0, int outStride1, int outStride2, int dim) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (int i = tid; i < nElement; i += stride) {
int inOff2 = i / inSize3;
int inDim3 = i - inOff2 * inSize3;
int inOff1 = inOff2 / inSize2;
int inDim2 = inOff2 - inOff1 * inSize2;
int inDim0 = inOff1 / inSize1;
int inDim1 = inOff1 - inDim0 * inSize1;
int outOff = 0;
if (dim == 0) outOff = inDim1 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2;
if (dim == 1) outOff = inDim0 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2;
if (dim == 2) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim3 * outStride2;
if (dim == 3) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim2 * outStride2;
in[i] += out[outOff];
}
}
//following - https://github.com/torch/cutorch/blob/master/lib/THC/THCTensorMath.cuh#L49
static inline __device__ int compute(int outputSize0, int outputSize1, int outputSize2, int outputSize3,
int outputStride0, int outputStride1, int outputStride2, int outputStride3,
const int dimSize, const int concatDim, int linearIndex) {
int offset = 0;
int curDimSize = 3 == concatDim ? dimSize : outputSize3;
int nextDimIndex = linearIndex / curDimSize;
int curDimIndex = linearIndex - curDimSize * nextDimIndex;
int curDimOffset = curDimIndex * outputStride3;
offset += curDimOffset;
linearIndex = nextDimIndex;
curDimSize = 2 == concatDim ? dimSize : outputSize2;
nextDimIndex = linearIndex / curDimSize;
curDimIndex = linearIndex - curDimSize * nextDimIndex;
curDimOffset = curDimIndex * outputStride2;
offset += curDimOffset;
linearIndex = nextDimIndex;
curDimSize = 1 == concatDim ? dimSize : outputSize1;
nextDimIndex = linearIndex / curDimSize;
curDimIndex = linearIndex - curDimSize * nextDimIndex;
curDimOffset = curDimIndex * outputStride1;
offset += curDimOffset;
linearIndex = nextDimIndex;
return offset + linearIndex * outputStride0;
// for (int i = 3; i >= 1; i--) {
// int curDimSize = i == concatDim ? dimSize : outputSize[i];
// int nextDimIndex = linearIndex / curDimSize;
// int curDimIndex = linearIndex - curDimSize * nextDimIndex;
// int curDimOffset = curDimIndex * outputStride[i];
// offset += curDimOffset;
// linearIndex = nextDimIndex;
// }
// return offset + linearIndex * outputStride[0];
}
// TODO: Only for Dim of rank 4, and only for 2 inputs, and only for concat at dim = 1
__global__ void concat2D_1D_greg(float* in1, int dimSize1, int nElement1,
float* in2, int dimSize2, int nElement2,
float* out, int concatDim,
int outSize0, int outSize1, int outSize2, int outSize3,
int outStride0, int outStride1, int outStride2, int outStride3) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int nElement = blockIdx.y == 0 ? nElement1 : nElement2;
if (tid >= nElement) return;
float* data = blockIdx.y == 0 ? in1 : in2;
int offset = blockIdx.y == 0 ? 0 : dimSize1;
int dimSize = blockIdx.y == 0 ? dimSize1 : dimSize2;
int dataOffset = offset * outStride1;
int stride = gridDim.x * blockDim.x;
while (tid < nElement) {
int elementOffset = compute(outSize0, outSize1, outSize2, outSize3,
outStride0, outStride1, outStride2, outStride3, dimSize, concatDim, tid);
out[dataOffset + elementOffset] = data[tid];
tid += stride;
}
}
// TODO: Only for Dim of rank 4, and only for 2 inputs, and only for concat at dim = 1
__global__ void concat2D_1D_greg_grad(float* in1, int dimSize1, int nElement1,
float* in2, int dimSize2, int nElement2,
float* out, int concatDim,
int outSize0, int outSize1, int outSize2, int outSize3,
int outStride0, int outStride1, int outStride2, int outStride3) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int nElement = blockIdx.y == 0 ? nElement1 : nElement2;
if (tid >= nElement) return;
float* data = blockIdx.y == 0 ? in1 : in2;
int offset = blockIdx.y == 0 ? 0 : dimSize1;
int dimSize = blockIdx.y == 0 ? dimSize1 : dimSize2;
int dataOffset = offset * outStride1;
int stride = gridDim.x * blockDim.x;
while (tid < nElement) {
int elementOffset = compute(outSize0, outSize1, outSize2, outSize3,
outStride0, outStride1, outStride2, outStride3, dimSize, concatDim, tid);
data[tid] += out[dataOffset + elementOffset];
tid += stride;
}
}
__global__ void concat2D_1D_loop(float* in1, float* in2, float* out, int sizeLow, int sizeHigh, int sizeDim1, int sizeDim2) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= sizeLow) return;
if (blockIdx.y < sizeHigh) { // the first input
int index_out = tid + blockIdx.y * sizeLow * (sizeDim1 + sizeDim2);
int index_in1 = tid + blockIdx.y * sizeLow * sizeDim1;
for (int i = 0; i < sizeDim1; i++) {
out[index_out] = in1[index_in1];
index_out += sizeLow; index_in1 += sizeLow;
}
} else { // the second input
int index_out = tid + (blockIdx.y - sizeHigh) * sizeLow * (sizeDim1 + sizeDim2) + sizeLow * sizeDim1;
int index_in2 = tid + (blockIdx.y - sizeHigh) * sizeLow * sizeDim2;
for (int i = 0; i < sizeDim2; i++) {
out[index_out] = in2[index_in2];
index_out += sizeLow; index_in2 += sizeLow;
}
}
}
__global__ void concat2D_1D_loop_grad(float* in1, float* in2, float* out, int sizeLow, int sizeHigh, int sizeDim1, int sizeDim2) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= sizeLow) return;
if (blockIdx.y < sizeHigh) { // the first input
int index_out = tid + blockIdx.y * sizeLow * (sizeDim1 + sizeDim2);
int index_in1 = tid + blockIdx.y * sizeLow * sizeDim1;
for (int i = 0; i < sizeDim1; i++) {
in1[index_in1] += out[index_out];
index_out += sizeLow; index_in1 += sizeLow;
}
} else { // the second input
int index_out = tid + (blockIdx.y - sizeHigh) * sizeLow * (sizeDim1 + sizeDim2) + sizeLow * sizeDim1;
int index_in2 = tid + (blockIdx.y - sizeHigh) * sizeLow * sizeDim2;
for (int i = 0; i < sizeDim2; i++) {
in2[index_in2] += out[index_out];
index_out += sizeLow; index_in2 += sizeLow;
}
}
}
__global__ void concat2D_1D(float* in1, float* in2, float* out, int dim2, int bound) {
int tid = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
if (blockIdx.x < bound * dim2) {
int subid = blockIdx.y * bound * dim2 * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
out[tid] = in1[subid];
} else {
int subid = blockIdx.y * (gridDim.x - bound * dim2) * blockDim.x + (blockIdx.x - bound * dim2) * blockDim.x + threadIdx.x;
out[tid] = in2[subid];
}
}
__global__ void concat2D_1D_grad(float* in1, float* in2, float* out, int dim2, int bound) {
int tid = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
if (blockIdx.x < bound * dim2) {
int subid = blockIdx.y * bound * dim2 * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
in1[subid] += out[tid];
} else {
int subid = blockIdx.y * (gridDim.x - bound * dim2) * blockDim.x + (blockIdx.x - bound * dim2) * blockDim.x + threadIdx.x;
in2[subid] += out[tid];
}
}
__global__ void adagrad_update_1D_1D(float* x, float* d, float* m, float clip, float lr, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
if (d[tid] > clip) d[tid] = clip;
if (d[tid] < -clip) d[tid] = -clip;
m[tid] += d[tid] * d[tid];
x[tid] -= lr * d[tid] / sqrt(m[tid] + 0.00000001);
d[tid] = 0;
}
}
__global__ void elementwise_1D_1D_mul(float* in1, float* in2, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) out[tid] = in1[tid] * in2[tid];
}
__global__ void elementwise_1D_1D_mul_mutate(float* in1, float* in2, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) out[tid] += in1[tid] * in2[tid];
}
__global__ void elementwise_1D_1D_add(float* in1, float* in2, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) out[tid] = in1[tid] + in2[tid];
}
__global__ void elementwise_1D_1D_minus(float* in1, float* in2, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) out[tid] = in1[tid] - in2[tid];
}
__global__ void elementwise_1D_1D_div(float* in1, float* in2, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) out[tid] = in1[tid] / in2[tid];
}
__global__ void elementwise_1D_1D_exp(float* in, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) out[tid] = exp(in[tid]);
}
__global__ void elementwise_1D_1D_log(float* in, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) out[tid] = log(in[tid]);
}
__global__ void elementwise_1D_1D_sqrt(float* in, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) out[tid] = sqrt(in[tid]);
}
__global__ void elementwise_1D_1D_square(float* in, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) out[tid] = in[tid] * in[tid];
}
__global__ void elementwise_1D_1D_exp_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) in_d[tid] += out_d[tid] * out_x[tid];
}
__global__ void elementwise_1D_1D_log_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) in_d[tid] += out_d[tid] / in_x[tid];
}
__global__ void elementwise_1D_1D_sqrt_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) in_d[tid] += out_d[tid] / out_x[tid] / 2;
}
__global__ void elementwise_1D_1D_square_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) in_d[tid] += out_d[tid] * 2 * in_x[tid];
}
// From: https://github.com/pytorch/pytorch/blob/master/aten/src/THC/THCIntegerDivider.cuh
// Result of div/mod operation stored together.
template <typename Value>
struct DivMod {
Value div, mod;
__host__ __device__ DivMod(Value div, Value mod) : div(div), mod(mod) { }
};
// Base case: we only have an implementation for uint32_t for now. For
// everything else, we use plain division.
template <typename Value>
struct IntDivider {
IntDivider() { } // Dummy constructor for arrays.
IntDivider(Value d) : divisor(d) { }
__host__ __device__ inline Value div(Value n) const { return n / divisor; }
__host__ __device__ inline Value mod(Value n) const { return n % divisor; }
__host__ __device__ inline DivMod<Value> divmod(Value n) const {
return DivMod<Value>(n / divisor, n % divisor);
}
Value divisor;
};
// Implement fast integer division.
template <>
struct IntDivider<unsigned int> {
static_assert(sizeof(unsigned int) == 4, "Assumes 32-bit unsigned int.");
IntDivider() { } // Dummy constructor for arrays.
IntDivider(unsigned int d) : divisor(d) {
assert(divisor >= 1 && divisor <= INT32_MAX);
// TODO: gcc/clang has __builtin_clz() but it's not portable.
for (shift = 0; shift < 32; shift++) if ((1U << shift) >= divisor) break;
uint64_t one = 1;
uint64_t magic = ((one << 32) * ((one << shift) - divisor)) / divisor + 1;
m1 = magic;
assert(m1 > 0 && m1 == magic); // m1 must fit in 32 bits.
}
__host__ __device__ inline unsigned int div(unsigned int n) const {
#ifdef __CUDA_ARCH__
// 't' is the higher 32-bits of unsigned 32-bit multiplication of 'n' and
// 'm1'.
unsigned int t = __umulhi(n, m1);
return (t + n) >> shift;
#else
// Using uint64_t so that the addition does not overflow.
uint64_t t = ((uint64_t) n * m1) >> 32;
return (t + n) >> shift;
#endif
}
__host__ __device__ inline unsigned int mod(unsigned int n) const {
return n - div(n) * divisor;
}
__host__ __device__ inline DivMod<unsigned int> divmod(unsigned int n) const {
unsigned int q = div(n);
return DivMod<unsigned int>(q, n - q * divisor);
}
unsigned int divisor; // d above.
unsigned int m1; // Magic number: m' above.
unsigned int shift; // Shift amounts.
};
// From: https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/cuda/detail/OffsetCalculator.cuh
/// OffsetCalculator calculates the offset in bytes of a linear index for NARGS
/// operands that share the same shape, but may have different strides.
template <int NARGS>
struct OffsetCalculator {
static constexpr int MAX_DIMS = 25;
// The offset for each argument (in bytes). Wrapper around fixed-size array.
struct offsets_t {
__host__ __device__ uint32_t& operator[](int idx) {
return values[idx];
}
uint32_t values[NARGS];
};
// OffsetCalculator(int dims, const int64_t* sizes, const int64_t* const* strides) : dims(dims) {
OffsetCalculator(int dims, const int32_t* sizes, const int32_t* const* strides) : dims(dims) {
for (int i = 0; i < MAX_DIMS; ++i) {
if (i < dims) {
sizes_[i] = IntDivider<uint32_t>(sizes[i]);
} else {
sizes_[i] = IntDivider<uint32_t>(1);
}
for (int arg = 0; arg < NARGS; arg++) {
strides_[i][arg] = i < dims ? strides[arg][i] : 0;
}
}
}
__host__ __device__ offsets_t get(uint32_t linear_idx) const {
offsets_t offsets;
#pragma unroll
for (int arg = 0; arg < NARGS; arg++) {
offsets[arg] = 0;
}
#pragma unroll
for (int dim = 0; dim < MAX_DIMS; ++dim) {
if (dim == dims) {
break;
}
auto divmod = sizes_[dim].divmod(linear_idx);
linear_idx = divmod.div;
#pragma unroll
for (int arg = 0; arg < NARGS; arg++) {
offsets[arg] += divmod.mod * strides_[dim][arg];
}
}
return offsets;
}
void print() {
for (auto i = 1; i < 128; i++) {
auto offsets = get(i);
printf("offsets[%d]: ", i);
for (auto arg = 0; arg < NARGS; arg++) {
printf("%d ", offsets[arg]);
}
printf("\n");
}
}
int dims;
IntDivider<uint32_t> sizes_[MAX_DIMS];
uint32_t strides_[MAX_DIMS][NARGS];
};
// From: https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/cuda/Loops.cuh
template<int nt, int vt, typename func_t>
__launch_bounds__(nt, 4)
__global__ void elementwise_kernel(int N, func_t f) {
int tid = threadIdx.x;
int nv = nt * vt;
int idx = nv * blockIdx.x + tid;
#pragma unroll
for (int i = 0; i < vt; i++) {
if (idx < N) {
f(idx);
idx += nt;
}
}
}
template<int nt, int vt, typename func_t>
static void launch_kernel(int64_t N, const func_t& f) {
if (N == 0) {
return;
}
dim3 block(nt);
dim3 grid((N + block.x * vt - 1) / (block.x * vt));
elementwise_kernel<nt, vt, func_t><<<grid, block, 0>>>(N, f);
}
template<typename func_t>
void gpu_unary_kernel(float *res, float *x,
int32_t resRank, const int32_t resScalarCount,
const int32_t* resShape, const int32_t* const* strides,
const func_t& f) {
OffsetCalculator<2> calc(resRank, resShape, strides);
launch_kernel<128, 4>(resScalarCount, [=]__device__(int idx) {
auto offsets = calc.get(idx);
float* out = &res[offsets[0]];
float* in = &x[offsets[1]];
*out = f(*in);
});
}
template<typename func_t>
void gpu_binary_kernel(float *res, float *x, float *y,
int32_t resRank, const int32_t resScalarCount,
const int32_t* resShape, const int32_t* const* strides,
const func_t& f) {
OffsetCalculator<3> calc(resRank, resShape, strides);
launch_kernel<128, 4>(resScalarCount, [=]__device__(int idx) {
auto offsets = calc.get(idx);
float* out = &res[offsets[0]];
float* in1 = &x[offsets[1]];
float* in2 = &y[offsets[2]];
*out = f(*in1, *in2);
});
}
#define CUDNN_CALL(f) { \
cudnnStatus_t stat = (f); \
if (stat != CUDNN_STATUS_SUCCESS) { \
fprintf(stderr, "cuDNN error occurred: %d (%s:%d)\n", \
stat, __FILE__, __LINE__); \
exit(stat); \
} \
}
void Snippet(char *);
std::random_device rd{};
std::mt19937 gen{rd()};
std::normal_distribution<> d{0, 0.01};
int main(int argc, char *argv[]) {
if (argc != 2) {
printf("usage: query <filename>\n");
return 0;
}
Snippet(argv[1]);
return 0;
}
/*****************************************
Emitting C Generated Code
*******************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
void Snippet(char* x0) {
// Backend setup.
cublasHandle_t cublasHandle;
CUBLAS_CALL(cublasCreate(&cublasHandle));
CUDA_CALL(cudaMalloc(&gpuMallocBase, HEAP_SIZE));
CUDA_CALL(cudaMemset(gpuMallocBase, 0, HEAP_SIZE));
gpuMallocAddr = gpuMallocBase;
cudnnHandle_t cudnnHandle;
CUDNN_CALL(cudnnCreate(&cudnnHandle));
// Tensor 'toGPU' invocation.
float* x275 = (float*)myGpuMalloc(262144 * sizeof(float));
int32_t x4 = open("/u/data/u99/wang603/TiarkMlEnv/Lantern/src/out/PLDI19evaluation/resnet50/resnet50.onnx.bin",0);
int32_t x5 = fsize(x4);
float* x6 = (float*)mmap(0, x5, PROT_READ | PROT_WRITE, MAP_FILE | MAP_PRIVATE, x4, 0);
float* x7 = x6+5205440;
CUDA_CALL(cudaMemcpy(x275, x7, 262144 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x278 = (float*)myGpuMalloc(256 * sizeof(float));
float* x8 = x6+148672;
CUDA_CALL(cudaMemcpy(x278, x8, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x281 = (float*)myGpuMalloc(128 * sizeof(float));
float* x9 = x6+816064;
CUDA_CALL(cudaMemcpy(x281, x9, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x284 = (float*)myGpuMalloc(128 * sizeof(float));
float* x10 = x6+950080;
CUDA_CALL(cudaMemcpy(x284, x10, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x287 = (float*)myGpuMalloc(64 * sizeof(float));
float* x11 = x6+94784;
CUDA_CALL(cudaMemcpy(x287, x11, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x290 = (float*)myGpuMalloc(32768 * sizeof(float));
float* x12 = x6+220608;
CUDA_CALL(cudaMemcpy(x290, x12, 32768 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x293 = (float*)myGpuMalloc(512 * sizeof(float));
float* x13 = x6+22495680;
CUDA_CALL(cudaMemcpy(x293, x13, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x296 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x14 = x6+2964928;
CUDA_CALL(cudaMemcpy(x296, x14, 262144 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x299 = (float*)myGpuMalloc(589824 * sizeof(float));
float* x15 = x6+4348352;
CUDA_CALL(cudaMemcpy(x299, x15, 589824 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x302 = (float*)myGpuMalloc(512 * sizeof(float));
float* x16 = x6+20133312;
CUDA_CALL(cudaMemcpy(x302, x16, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x305 = (float*)myGpuMalloc(256 * sizeof(float));
float* x17 = x6+2169536;
CUDA_CALL(cudaMemcpy(x305, x17, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x308 = (float*)myGpuMalloc(128 * sizeof(float));
float* x18 = x6+668224;
CUDA_CALL(cudaMemcpy(x308, x18, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x311 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x19 = x6+2432448;
CUDA_CALL(cudaMemcpy(x311, x19, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x314 = (float*)myGpuMalloc(512 * sizeof(float));
float* x20 = x6+1446336;
CUDA_CALL(cudaMemcpy(x314, x20, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x317 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x21 = x6+4081088;
CUDA_CALL(cudaMemcpy(x317, x21, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x320 = (float*)myGpuMalloc(256 * sizeof(float));
float* x22 = x6+1578688;
CUDA_CALL(cudaMemcpy(x320, x22, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x323 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x23 = x6+6325696;
CUDA_CALL(cudaMemcpy(x323, x23, 262144 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x326 = (float*)myGpuMalloc(512 * sizeof(float));
float* x24 = x6+602048;
CUDA_CALL(cudaMemcpy(x326, x24, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x329 = (float*)myGpuMalloc(64 * sizeof(float));
float* x25 = x6+165888;
CUDA_CALL(cudaMemcpy(x329, x25, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x332 = (float*)myGpuMalloc(512 * sizeof(float));
float* x26 = x6+1164736;
CUDA_CALL(cudaMemcpy(x332, x26, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x335 = (float*)myGpuMalloc(64 * sizeof(float));
float* x27 = x6+6080;
CUDA_CALL(cudaMemcpy(x335, x27, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x338 = (float*)myGpuMalloc(147456 * sizeof(float));
float* x28 = x6+253888;
CUDA_CALL(cudaMemcpy(x338, x28, 147456 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x341 = (float*)myGpuMalloc(2359296 * sizeof(float));
float* x29 = x6+20135360;
CUDA_CALL(cudaMemcpy(x341, x29, 2359296 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x344 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x30 = x6+2960832;
CUDA_CALL(cudaMemcpy(x344, x30, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x347 = (float*)myGpuMalloc(256 * sizeof(float));
float* x31 = x6+3227072;
CUDA_CALL(cudaMemcpy(x347, x31, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x350 = (float*)myGpuMalloc(589824 * sizeof(float));
float* x32 = x6+3228096;
CUDA_CALL(cudaMemcpy(x350, x32, 589824 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x353 = (float*)myGpuMalloc(16384 * sizeof(float));
float* x33 = x6+43456;
CUDA_CALL(cudaMemcpy(x353, x33, 16384 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x356 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x34 = x6+22496704;
CUDA_CALL(cudaMemcpy(x356, x34, 1048576 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x359 = (float*)myGpuMalloc(2359296 * sizeof(float));
float* x35 = x6+9092544;
CUDA_CALL(cudaMemcpy(x359, x35, 2359296 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x362 = (float*)myGpuMalloc(128 * sizeof(float));
float* x36 = x6+816320;
CUDA_CALL(cudaMemcpy(x362, x36, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x365 = (float*)myGpuMalloc(256 * sizeof(float));
float* x37 = x6+60608;
CUDA_CALL(cudaMemcpy(x365, x37, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x368 = (float*)myGpuMalloc(256 * sizeof(float));
float* x38 = x6+219584;
CUDA_CALL(cudaMemcpy(x368, x38, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x371 = (float*)myGpuMalloc(128 * sizeof(float));
float* x39 = x6+1379392;
CUDA_CALL(cudaMemcpy(x371, x39, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x374 = (float*)myGpuMalloc(128 * sizeof(float));
float* x40 = x6+1231296;
CUDA_CALL(cudaMemcpy(x374, x40, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x377 = (float*)myGpuMalloc(64 * sizeof(float));
float* x41 = x6+1856;
CUDA_CALL(cudaMemcpy(x377, x41, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x380 = (float*)myGpuMalloc(65536 * sizeof(float));
float* x42 = x6+1098176;
CUDA_CALL(cudaMemcpy(x380, x42, 65536 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x383 = (float*)myGpuMalloc(512 * sizeof(float));
float* x43 = x6+601536;
CUDA_CALL(cudaMemcpy(x383, x43, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x386 = (float*)myGpuMalloc(128 * sizeof(float));
float* x44 = x6+401728;
CUDA_CALL(cudaMemcpy(x386, x44, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x389 = (float*)myGpuMalloc(64 * sizeof(float));
float* x45 = x6+131904;
CUDA_CALL(cudaMemcpy(x389, x45, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x392 = (float*)myGpuMalloc(128 * sizeof(float));
float* x46 = x6+949696;
CUDA_CALL(cudaMemcpy(x392, x46, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x395 = (float*)myGpuMalloc(512 * sizeof(float));
float* x47 = x6+15664576;
CUDA_CALL(cudaMemcpy(x395, x47, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x398 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x48 = x6+18027968;
CUDA_CALL(cudaMemcpy(x398, x48, 1048576 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x401 = (float*)myGpuMalloc(10 * sizeof(float));
float* x49 = x6+23573952;
CUDA_CALL(cudaMemcpy(x401, x49, 10 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x404 = (float*)myGpuMalloc(64 * sizeof(float));
float* x50 = x6+43264;
CUDA_CALL(cudaMemcpy(x404, x50, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x407 = (float*)myGpuMalloc(512 * sizeof(float));
float* x51 = x6+11453376;
CUDA_CALL(cudaMemcpy(x407, x51, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x410 = (float*)myGpuMalloc(64 * sizeof(float));
float* x52 = x6+6272;
CUDA_CALL(cudaMemcpy(x410, x52, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x413 = (float*)myGpuMalloc(512 * sizeof(float));
float* x53 = x6+882112;
CUDA_CALL(cudaMemcpy(x413, x53, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x416 = (float*)myGpuMalloc(64 * sizeof(float));
float* x54 = x6+6144;
CUDA_CALL(cudaMemcpy(x416, x54, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x419 = (float*)myGpuMalloc(512 * sizeof(float));
float* x55 = x6+1445824;
CUDA_CALL(cudaMemcpy(x419, x55, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x422 = (float*)myGpuMalloc(65536 * sizeof(float));
float* x56 = x6+1379776;
CUDA_CALL(cudaMemcpy(x422, x56, 65536 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x425 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x57 = x6+3818944;
CUDA_CALL(cudaMemcpy(x425, x57, 262144 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x428 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x58 = x6+5202368;
CUDA_CALL(cudaMemcpy(x428, x58, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x431 = (float*)myGpuMalloc(256 * sizeof(float));
float* x59 = x6+148416;
CUDA_CALL(cudaMemcpy(x431, x59, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x434 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x60 = x6+7441856;
CUDA_CALL(cudaMemcpy(x434, x60, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x437 = (float*)myGpuMalloc(64 * sizeof(float));
float* x61 = x6+94720;
CUDA_CALL(cudaMemcpy(x437, x61, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x440 = (float*)myGpuMalloc(128 * sizeof(float));
float* x62 = x6+1097792;
CUDA_CALL(cudaMemcpy(x440, x62, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x443 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x63 = x6+12504512;
CUDA_CALL(cudaMemcpy(x443, x63, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x446 = (float*)myGpuMalloc(256 * sizeof(float));
float* x64 = x6+4938944;
CUDA_CALL(cudaMemcpy(x446, x64, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x449 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x65 = x6+14611904;
CUDA_CALL(cudaMemcpy(x449, x65, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x452 = (float*)myGpuMalloc(512 * sizeof(float));
float* x66 = x6+15666112;
CUDA_CALL(cudaMemcpy(x452, x66, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x455 = (float*)myGpuMalloc(512 * sizeof(float));
float* x67 = x6+18026432;
CUDA_CALL(cudaMemcpy(x455, x67, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x458 = (float*)myGpuMalloc(512 * sizeof(float));
float* x68 = x6+9091520;
CUDA_CALL(cudaMemcpy(x458, x68, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x461 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x69 = x6+19080640;
CUDA_CALL(cudaMemcpy(x461, x69, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x464 = (float*)myGpuMalloc(256 * sizeof(float));
float* x70 = x6+6588608;
CUDA_CALL(cudaMemcpy(x464, x70, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x467 = (float*)myGpuMalloc(256 * sizeof(float));
float* x71 = x6+8299456;
CUDA_CALL(cudaMemcpy(x467, x71, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x470 = (float*)myGpuMalloc(256 * sizeof(float));
float* x72 = x6+60352;
CUDA_CALL(cudaMemcpy(x470, x72, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x473 = (float*)myGpuMalloc(64 * sizeof(float));
float* x73 = x6+202944;
CUDA_CALL(cudaMemcpy(x473, x73, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x476 = (float*)myGpuMalloc(36864 * sizeof(float));
float* x74 = x6+166080;
CUDA_CALL(cudaMemcpy(x476, x74, 36864 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x479 = (float*)myGpuMalloc(256 * sizeof(float));
float* x75 = x6+6058432;
CUDA_CALL(cudaMemcpy(x479, x75, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x482 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x76 = x6+2436544;
CUDA_CALL(cudaMemcpy(x482, x76, 524288 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x485 = (float*)myGpuMalloc(256 * sizeof(float));
float* x77 = x6+77248;
CUDA_CALL(cudaMemcpy(x485, x77, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x488 = (float*)myGpuMalloc(256 * sizeof(float));
float* x78 = x6+6587840;
CUDA_CALL(cudaMemcpy(x488, x78, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x491 = (float*)myGpuMalloc(512 * sizeof(float));
float* x79 = x6+20133824;
CUDA_CALL(cudaMemcpy(x491, x79, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x494 = (float*)myGpuMalloc(128 * sizeof(float));
float* x80 = x6+1379264;
CUDA_CALL(cudaMemcpy(x494, x80, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x497 = (float*)myGpuMalloc(256 * sizeof(float));
float* x81 = x6+7708608;
CUDA_CALL(cudaMemcpy(x497, x81, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x500 = (float*)myGpuMalloc(64 * sizeof(float));
float* x82 = x6+165824;
CUDA_CALL(cudaMemcpy(x500, x82, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x503 = (float*)myGpuMalloc(512 * sizeof(float));
float* x83 = x6+1164224;
CUDA_CALL(cudaMemcpy(x503, x83, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x506 = (float*)myGpuMalloc(36864 * sizeof(float));
float* x84 = x6+94912;
CUDA_CALL(cudaMemcpy(x506, x84, 36864 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x509 = (float*)myGpuMalloc(128 * sizeof(float));
float* x85 = x6+253376;
CUDA_CALL(cudaMemcpy(x509, x85, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x512 = (float*)myGpuMalloc(256 * sizeof(float));
float* x86 = x6+7708096;
CUDA_CALL(cudaMemcpy(x512, x86, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x515 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x87 = x6+2962880;
CUDA_CALL(cudaMemcpy(x515, x87, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x518 = (float*)myGpuMalloc(16384 * sizeof(float));
float* x88 = x6+203200;
CUDA_CALL(cudaMemcpy(x518, x88, 16384 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x521 = (float*)myGpuMalloc(512 * sizeof(float));
float* x89 = x6+883648;
CUDA_CALL(cudaMemcpy(x521, x89, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x524 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x90 = x6+6059456;
CUDA_CALL(cudaMemcpy(x524, x90, 262144 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x527 = (float*)myGpuMalloc(36864 * sizeof(float));
float* x91 = x6+6336;
CUDA_CALL(cudaMemcpy(x527, x91, 36864 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x530 = (float*)myGpuMalloc(256 * sizeof(float));
float* x92 = x6+148928;
CUDA_CALL(cudaMemcpy(x530, x92, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x533 = (float*)myGpuMalloc(256 * sizeof(float));
float* x93 = x6+5467584;
CUDA_CALL(cudaMemcpy(x533, x93, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x536 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x94 = x6+8563136;
CUDA_CALL(cudaMemcpy(x536, x94, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x539 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x95 = x6+19076544;
CUDA_CALL(cudaMemcpy(x539, x95, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x542 = (float*)myGpuMalloc(128 * sizeof(float));
float* x96 = x6+816192;
CUDA_CALL(cudaMemcpy(x542, x96, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x545 = (float*)myGpuMalloc(256 * sizeof(float));
float* x97 = x6+3818176;
CUDA_CALL(cudaMemcpy(x545, x97, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x548 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x98 = x6+8299968;
CUDA_CALL(cudaMemcpy(x548, x98, 262144 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x551 = (float*)myGpuMalloc(256 * sizeof(float));
float* x99 = x6+5468352;
CUDA_CALL(cudaMemcpy(x551, x99, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x554 = (float*)myGpuMalloc(256 * sizeof(float));
float* x100 = x6+2170048;
CUDA_CALL(cudaMemcpy(x554, x100, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x557 = (float*)myGpuMalloc(128 * sizeof(float));
float* x101 = x6+668352;
CUDA_CALL(cudaMemcpy(x557, x101, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x560 = (float*)myGpuMalloc(512 * sizeof(float));
float* x102 = x6+468928;
CUDA_CALL(cudaMemcpy(x560, x102, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x563 = (float*)myGpuMalloc(64 * sizeof(float));
float* x103 = x6+94848;
CUDA_CALL(cudaMemcpy(x563, x103, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x566 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x104 = x6+23545280;
CUDA_CALL(cudaMemcpy(x566, x104, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x569 = (float*)myGpuMalloc(256 * sizeof(float));
float* x105 = x6+7179456;
CUDA_CALL(cudaMemcpy(x569, x105, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x572 = (float*)myGpuMalloc(64 * sizeof(float));
float* x106 = x6+43328;
CUDA_CALL(cudaMemcpy(x572, x106, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x575 = (float*)myGpuMalloc(65536 * sizeof(float));
float* x107 = x6+401856;
CUDA_CALL(cudaMemcpy(x575, x107, 65536 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x578 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x108 = x6+14609856;
CUDA_CALL(cudaMemcpy(x578, x108, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x581 = (float*)myGpuMalloc(256 * sizeof(float));
float* x109 = x6+2169280;
CUDA_CALL(cudaMemcpy(x581, x109, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x584 = (float*)myGpuMalloc(256 * sizeof(float));
float* x110 = x6+7178944;
CUDA_CALL(cudaMemcpy(x584, x110, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x587 = (float*)myGpuMalloc(64 * sizeof(float));
float* x111 = x6+1920;
CUDA_CALL(cudaMemcpy(x587, x111, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x590 = (float*)myGpuMalloc(65536 * sizeof(float));
float* x112 = x6+816576;
CUDA_CALL(cudaMemcpy(x590, x112, 65536 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x593 = (float*)myGpuMalloc(128 * sizeof(float));
float* x113 = x6+949952;
CUDA_CALL(cudaMemcpy(x593, x113, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x596 = (float*)myGpuMalloc(512 * sizeof(float));
float* x114 = x6+11452864;
CUDA_CALL(cudaMemcpy(x596, x114, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x599 = (float*)myGpuMalloc(64 * sizeof(float));
float* x115 = x6+6208;
CUDA_CALL(cudaMemcpy(x599, x115, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x602 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x116 = x6+12506560;
CUDA_CALL(cudaMemcpy(x602, x116, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x605 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x117 = x6+4939200;
CUDA_CALL(cudaMemcpy(x605, x117, 262144 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x608 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x118 = x6+2433472;
CUDA_CALL(cudaMemcpy(x608, x118, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x611 = (float*)myGpuMalloc(64 * sizeof(float));
float* x119 = x6+203136;
CUDA_CALL(cudaMemcpy(x611, x119, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x614 = (float*)myGpuMalloc(512 * sizeof(float));
float* x120 = x6+601024;
CUDA_CALL(cudaMemcpy(x614, x120, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x617 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x121 = x6+7442880;
CUDA_CALL(cudaMemcpy(x617, x121, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x620 = (float*)myGpuMalloc(512 * sizeof(float));
float* x122 = x6+9092032;
CUDA_CALL(cudaMemcpy(x620, x122, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x623 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x123 = x6+8564160;
CUDA_CALL(cudaMemcpy(x623, x123, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x626 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x124 = x6+23551424;
CUDA_CALL(cudaMemcpy(x626, x124, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x629 = (float*)myGpuMalloc(256 * sizeof(float));
float* x125 = x6+4938688;
CUDA_CALL(cudaMemcpy(x629, x125, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x632 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x126 = x6+14613952;
CUDA_CALL(cudaMemcpy(x632, x126, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x635 = (float*)myGpuMalloc(256 * sizeof(float));
float* x127 = x6+60096;
CUDA_CALL(cudaMemcpy(x635, x127, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x638 = (float*)myGpuMalloc(128 * sizeof(float));
float* x128 = x6+1097664;
CUDA_CALL(cudaMemcpy(x638, x128, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x641 = (float*)myGpuMalloc(128 * sizeof(float));
float* x129 = x6+401600;
CUDA_CALL(cudaMemcpy(x641, x129, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x644 = (float*)myGpuMalloc(256 * sizeof(float));
float* x130 = x6+4347328;
CUDA_CALL(cudaMemcpy(x644, x130, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x647 = (float*)myGpuMalloc(16384 * sizeof(float));
float* x131 = x6+132032;
CUDA_CALL(cudaMemcpy(x647, x131, 16384 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x650 = (float*)myGpuMalloc(256 * sizeof(float));
float* x132 = x6+1578944;
CUDA_CALL(cudaMemcpy(x650, x132, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x653 = (float*)myGpuMalloc(65536 * sizeof(float));
float* x133 = x6+1165760;
CUDA_CALL(cudaMemcpy(x653, x133, 65536 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x656 = (float*)myGpuMalloc(256 * sizeof(float));
float* x134 = x6+220352;
CUDA_CALL(cudaMemcpy(x656, x134, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x659 = (float*)myGpuMalloc(128 * sizeof(float));
float* x135 = x6+253760;
CUDA_CALL(cudaMemcpy(x659, x135, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x662 = (float*)myGpuMalloc(64 * sizeof(float));
float* x136 = x6+203008;
CUDA_CALL(cudaMemcpy(x662, x136, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x665 = (float*)myGpuMalloc(256 * sizeof(float));
float* x137 = x6+6058688;
CUDA_CALL(cudaMemcpy(x665, x137, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x668 = (float*)myGpuMalloc(512 * sizeof(float));
float* x138 = x6+15665088;
CUDA_CALL(cudaMemcpy(x668, x138, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x671 = (float*)myGpuMalloc(512 * sizeof(float));
float* x139 = x6+18026944;
CUDA_CALL(cudaMemcpy(x671, x139, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x674 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x140 = x6+8566208;
CUDA_CALL(cudaMemcpy(x674, x140, 524288 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x677 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x141 = x6+5203392;
CUDA_CALL(cudaMemcpy(x677, x141, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x680 = (float*)myGpuMalloc(256 * sizeof(float));
float* x142 = x6+8298944;
CUDA_CALL(cudaMemcpy(x680, x142, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x683 = (float*)myGpuMalloc(64 * sizeof(float));
float* x143 = x6+94656;
CUDA_CALL(cudaMemcpy(x683, x143, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x686 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x144 = x6+4084160;
CUDA_CALL(cudaMemcpy(x686, x144, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x689 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x145 = x6+19078592;
CUDA_CALL(cudaMemcpy(x689, x145, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x692 = (float*)myGpuMalloc(512 * sizeof(float));
float* x146 = x6+467392;
CUDA_CALL(cudaMemcpy(x692, x146, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x695 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x147 = x6+6322624;
CUDA_CALL(cudaMemcpy(x695, x147, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x698 = (float*)myGpuMalloc(512 * sizeof(float));
float* x148 = x6+883136;
CUDA_CALL(cudaMemcpy(x698, x148, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x701 = (float*)myGpuMalloc(128 * sizeof(float));
float* x149 = x6+1379648;
CUDA_CALL(cudaMemcpy(x701, x149, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x704 = (float*)myGpuMalloc(512 * sizeof(float));
float* x150 = x6+468416;
CUDA_CALL(cudaMemcpy(x704, x150, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x707 = (float*)myGpuMalloc(16384 * sizeof(float));
float* x151 = x6+149440;
CUDA_CALL(cudaMemcpy(x707, x151, 16384 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x710 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x152 = x6+7445952;
CUDA_CALL(cudaMemcpy(x710, x152, 262144 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x713 = (float*)myGpuMalloc(1728 * sizeof(float));
float* x153 = x6+0;
CUDA_CALL(cudaMemcpy(x713, x153, 1728 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x716 = (float*)myGpuMalloc(64 * sizeof(float));
float* x154 = x6+131840;
CUDA_CALL(cudaMemcpy(x716, x154, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x719 = (float*)myGpuMalloc(512 * sizeof(float));
float* x155 = x6+15665600;
CUDA_CALL(cudaMemcpy(x719, x155, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x722 = (float*)myGpuMalloc(2359296 * sizeof(float));
float* x156 = x6+15666624;
CUDA_CALL(cudaMemcpy(x722, x156, 2359296 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x725 = (float*)myGpuMalloc(512 * sizeof(float));
float* x157 = x6+1445312;
CUDA_CALL(cudaMemcpy(x725, x157, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x728 = (float*)myGpuMalloc(256 * sizeof(float));
float* x158 = x6+3227840;
CUDA_CALL(cudaMemcpy(x728, x158, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x731 = (float*)myGpuMalloc(64 * sizeof(float));
float* x159 = x6+43392;
CUDA_CALL(cudaMemcpy(x731, x159, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x734 = (float*)myGpuMalloc(512 * sizeof(float));
float* x160 = x6+11452352;
CUDA_CALL(cudaMemcpy(x734, x160, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x737 = (float*)myGpuMalloc(512 * sizeof(float));
float* x161 = x6+18025920;
CUDA_CALL(cudaMemcpy(x737, x161, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x740 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x162 = x6+6324672;
CUDA_CALL(cudaMemcpy(x740, x162, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x743 = (float*)myGpuMalloc(16384 * sizeof(float));
float* x163 = x6+60864;
CUDA_CALL(cudaMemcpy(x743, x163, 16384 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x746 = (float*)myGpuMalloc(256 * sizeof(float));
float* x164 = x6+5468096;
CUDA_CALL(cudaMemcpy(x746, x164, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x749 = (float*)myGpuMalloc(64 * sizeof(float));
float* x165 = x6+43200;
CUDA_CALL(cudaMemcpy(x749, x165, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x752 = (float*)myGpuMalloc(147456 * sizeof(float));
float* x166 = x6+1231808;
CUDA_CALL(cudaMemcpy(x752, x166, 147456 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x755 = (float*)myGpuMalloc(256 * sizeof(float));
float* x167 = x6+149184;
CUDA_CALL(cudaMemcpy(x755, x167, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x758 = (float*)myGpuMalloc(512 * sizeof(float));
float* x168 = x6+1163712;
CUDA_CALL(cudaMemcpy(x758, x168, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x761 = (float*)myGpuMalloc(256 * sizeof(float));
float* x169 = x6+7178688;
CUDA_CALL(cudaMemcpy(x761, x169, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x764 = (float*)myGpuMalloc(512 * sizeof(float));
float* x170 = x6+22495168;
CUDA_CALL(cudaMemcpy(x764, x170, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x767 = (float*)myGpuMalloc(128 * sizeof(float));
float* x171 = x6+949824;
CUDA_CALL(cudaMemcpy(x767, x171, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x770 = (float*)myGpuMalloc(16384 * sizeof(float));
float* x172 = x6+78272;
CUDA_CALL(cudaMemcpy(x770, x172, 16384 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x773 = (float*)myGpuMalloc(128 * sizeof(float));
float* x173 = x6+253504;
CUDA_CALL(cudaMemcpy(x773, x173, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x776 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x174 = x6+14607808;
CUDA_CALL(cudaMemcpy(x776, x174, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x779 = (float*)myGpuMalloc(256 * sizeof(float));
float* x175 = x6+4348096;
CUDA_CALL(cudaMemcpy(x779, x175, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x782 = (float*)myGpuMalloc(589824 * sizeof(float));
float* x176 = x6+1579456;
CUDA_CALL(cudaMemcpy(x782, x176, 589824 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x785 = (float*)myGpuMalloc(256 * sizeof(float));
float* x177 = x6+7708864;
CUDA_CALL(cudaMemcpy(x785, x177, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x788 = (float*)myGpuMalloc(128 * sizeof(float));
float* x178 = x6+668480;
CUDA_CALL(cudaMemcpy(x788, x178, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x791 = (float*)myGpuMalloc(256 * sizeof(float));
float* x179 = x6+4347840;
CUDA_CALL(cudaMemcpy(x791, x179, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x794 = (float*)myGpuMalloc(64 * sizeof(float));
float* x180 = x6+203072;
CUDA_CALL(cudaMemcpy(x794, x180, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x797 = (float*)myGpuMalloc(131072 * sizeof(float));
float* x181 = x6+1447360;
CUDA_CALL(cudaMemcpy(x797, x181, 131072 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x800 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x182 = x6+23547328;
CUDA_CALL(cudaMemcpy(x800, x182, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x803 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x183 = x6+4083136;
CUDA_CALL(cudaMemcpy(x803, x183, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x806 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x184 = x6+8565184;
CUDA_CALL(cudaMemcpy(x806, x184, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x809 = (float*)myGpuMalloc(256 * sizeof(float));
float* x185 = x6+220096;
CUDA_CALL(cudaMemcpy(x809, x185, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x812 = (float*)myGpuMalloc(256 * sizeof(float));
float* x186 = x6+6588096;
CUDA_CALL(cudaMemcpy(x812, x186, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x815 = (float*)myGpuMalloc(256 * sizeof(float));
float* x187 = x6+6058944;
CUDA_CALL(cudaMemcpy(x815, x187, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x818 = (float*)myGpuMalloc(64 * sizeof(float));
float* x188 = x6+166016;
CUDA_CALL(cudaMemcpy(x818, x188, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x821 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x189 = x6+5204416;
CUDA_CALL(cudaMemcpy(x821, x189, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x824 = (float*)myGpuMalloc(256 * sizeof(float));
float* x190 = x6+8299200;
CUDA_CALL(cudaMemcpy(x824, x190, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x827 = (float*)myGpuMalloc(128 * sizeof(float));
float* x191 = x6+401472;
CUDA_CALL(cudaMemcpy(x827, x191, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x830 = (float*)myGpuMalloc(147456 * sizeof(float));
float* x192 = x6+950208;
CUDA_CALL(cudaMemcpy(x830, x192, 147456 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x833 = (float*)myGpuMalloc(256 * sizeof(float));
float* x193 = x6+4938432;
CUDA_CALL(cudaMemcpy(x833, x193, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x836 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x194 = x6+12508608;
CUDA_CALL(cudaMemcpy(x836, x194, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x839 = (float*)myGpuMalloc(512 * sizeof(float));
float* x195 = x6+22494656;
CUDA_CALL(cudaMemcpy(x839, x195, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x842 = (float*)myGpuMalloc(512 * sizeof(float));
float* x196 = x6+18027456;
CUDA_CALL(cudaMemcpy(x842, x196, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x845 = (float*)myGpuMalloc(65536 * sizeof(float));
float* x197 = x6+884160;
CUDA_CALL(cudaMemcpy(x845, x197, 65536 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x848 = (float*)myGpuMalloc(256 * sizeof(float));
float* x198 = x6+4347584;
CUDA_CALL(cudaMemcpy(x848, x198, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x851 = (float*)myGpuMalloc(256 * sizeof(float));
float* x199 = x6+1579200;
CUDA_CALL(cudaMemcpy(x851, x199, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x854 = (float*)myGpuMalloc(256 * sizeof(float));
float* x200 = x6+59840;
CUDA_CALL(cudaMemcpy(x854, x200, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x857 = (float*)myGpuMalloc(256 * sizeof(float));
float* x201 = x6+3818432;
CUDA_CALL(cudaMemcpy(x857, x201, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x860 = (float*)myGpuMalloc(512 * sizeof(float));
float* x202 = x6+9090496;
CUDA_CALL(cudaMemcpy(x860, x202, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x863 = (float*)myGpuMalloc(512 * sizeof(float));
float* x203 = x6+22496192;
CUDA_CALL(cudaMemcpy(x863, x203, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x866 = (float*)myGpuMalloc(256 * sizeof(float));
float* x204 = x6+77504;
CUDA_CALL(cudaMemcpy(x866, x204, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x869 = (float*)myGpuMalloc(128 * sizeof(float));
float* x205 = x6+253632;
CUDA_CALL(cudaMemcpy(x869, x205, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x872 = (float*)myGpuMalloc(512 * sizeof(float));
float* x206 = x6+11451840;
CUDA_CALL(cudaMemcpy(x872, x206, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x875 = (float*)myGpuMalloc(64 * sizeof(float));
float* x207 = x6+1728;
CUDA_CALL(cudaMemcpy(x875, x207, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x878 = (float*)myGpuMalloc(512 * sizeof(float));
float* x208 = x6+600512;
CUDA_CALL(cudaMemcpy(x878, x208, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x881 = (float*)myGpuMalloc(64 * sizeof(float));
float* x209 = x6+131776;
CUDA_CALL(cudaMemcpy(x881, x209, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x884 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x210 = x6+7443904;
CUDA_CALL(cudaMemcpy(x884, x210, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x887 = (float*)myGpuMalloc(512 * sizeof(float));
float* x211 = x6+467904;
CUDA_CALL(cudaMemcpy(x887, x211, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x890 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x212 = x6+2963904;
CUDA_CALL(cudaMemcpy(x890, x212, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x893 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x213 = x6+11453888;
CUDA_CALL(cudaMemcpy(x893, x213, 1048576 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x896 = (float*)myGpuMalloc(512 * sizeof(float));
float* x214 = x6+20134336;
CUDA_CALL(cudaMemcpy(x896, x214, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x899 = (float*)myGpuMalloc(2097152 * sizeof(float));
float* x215 = x6+12510656;
CUDA_CALL(cudaMemcpy(x899, x215, 2097152 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x902 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x216 = x6+14616000;
CUDA_CALL(cudaMemcpy(x902, x216, 1048576 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x905 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x217 = x6+2434496;
CUDA_CALL(cudaMemcpy(x905, x217, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x908 = (float*)myGpuMalloc(128 * sizeof(float));
float* x218 = x6+1097920;
CUDA_CALL(cudaMemcpy(x908, x218, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x911 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x219 = x6+4085184;
CUDA_CALL(cudaMemcpy(x911, x219, 262144 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x914 = (float*)myGpuMalloc(256 * sizeof(float));
float* x220 = x6+3227328;
CUDA_CALL(cudaMemcpy(x914, x220, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x917 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x221 = x6+2961856;
CUDA_CALL(cudaMemcpy(x917, x221, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x920 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x222 = x6+7179712;
CUDA_CALL(cudaMemcpy(x920, x222, 262144 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x923 = (float*)myGpuMalloc(128 * sizeof(float));
float* x223 = x6+668096;
CUDA_CALL(cudaMemcpy(x923, x223, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x926 = (float*)myGpuMalloc(512 * sizeof(float));
float* x224 = x6+1165248;
CUDA_CALL(cudaMemcpy(x926, x224, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x929 = (float*)myGpuMalloc(512 * sizeof(float));
float* x225 = x6+9091008;
CUDA_CALL(cudaMemcpy(x929, x225, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x932 = (float*)myGpuMalloc(128 * sizeof(float));
float* x226 = x6+816448;
CUDA_CALL(cudaMemcpy(x932, x226, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x935 = (float*)myGpuMalloc(589824 * sizeof(float));
float* x227 = x6+7709120;
CUDA_CALL(cudaMemcpy(x935, x227, 589824 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x938 = (float*)myGpuMalloc(20480 * sizeof(float));
float* x228 = x6+23553472;
CUDA_CALL(cudaMemcpy(x938, x228, 20480 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x941 = (float*)myGpuMalloc(256 * sizeof(float));
float* x229 = x6+4938176;
CUDA_CALL(cudaMemcpy(x941, x229, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x944 = (float*)myGpuMalloc(256 * sizeof(float));
float* x230 = x6+2169792;
CUDA_CALL(cudaMemcpy(x944, x230, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x947 = (float*)myGpuMalloc(256 * sizeof(float));
float* x231 = x6+6059200;
CUDA_CALL(cudaMemcpy(x947, x231, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x950 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x232 = x6+6323648;
CUDA_CALL(cudaMemcpy(x950, x232, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x953 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x233 = x6+4082112;
CUDA_CALL(cudaMemcpy(x953, x233, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x956 = (float*)myGpuMalloc(4096 * sizeof(float));
float* x234 = x6+1984;
CUDA_CALL(cudaMemcpy(x956, x234, 4096 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x959 = (float*)myGpuMalloc(512 * sizeof(float));
float* x235 = x6+1446848;
CUDA_CALL(cudaMemcpy(x959, x235, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x962 = (float*)myGpuMalloc(147456 * sizeof(float));
float* x236 = x6+668608;
CUDA_CALL(cudaMemcpy(x962, x236, 147456 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x965 = (float*)myGpuMalloc(128 * sizeof(float));
float* x237 = x6+1231552;
CUDA_CALL(cudaMemcpy(x965, x237, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x968 = (float*)myGpuMalloc(256 * sizeof(float));
float* x238 = x6+3818688;
CUDA_CALL(cudaMemcpy(x968, x238, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x971 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x239 = x6+6321600;
CUDA_CALL(cudaMemcpy(x971, x239, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x974 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x240 = x6+12502464;
CUDA_CALL(cudaMemcpy(x974, x240, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x977 = (float*)myGpuMalloc(256 * sizeof(float));
float* x241 = x6+8299712;
CUDA_CALL(cudaMemcpy(x977, x241, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x980 = (float*)myGpuMalloc(256 * sizeof(float));
float* x242 = x6+5467840;
CUDA_CALL(cudaMemcpy(x980, x242, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x983 = (float*)myGpuMalloc(128 * sizeof(float));
float* x243 = x6+1231424;
CUDA_CALL(cudaMemcpy(x983, x243, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x986 = (float*)myGpuMalloc(256 * sizeof(float));
float* x244 = x6+78016;
CUDA_CALL(cudaMemcpy(x986, x244, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x989 = (float*)myGpuMalloc(64 * sizeof(float));
float* x245 = x6+131968;
CUDA_CALL(cudaMemcpy(x989, x245, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x992 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x246 = x6+19082688;
CUDA_CALL(cudaMemcpy(x992, x246, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x995 = (float*)myGpuMalloc(512 * sizeof(float));
float* x247 = x6+882624;
CUDA_CALL(cudaMemcpy(x995, x247, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x998 = (float*)myGpuMalloc(256 * sizeof(float));
float* x248 = x6+219840;
CUDA_CALL(cudaMemcpy(x998, x248, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1001 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x249 = x6+8562112;
CUDA_CALL(cudaMemcpy(x1001, x249, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1004 = (float*)myGpuMalloc(589824 * sizeof(float));
float* x250 = x6+5468608;
CUDA_CALL(cudaMemcpy(x1004, x250, 589824 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1007 = (float*)myGpuMalloc(256 * sizeof(float));
float* x251 = x6+7179200;
CUDA_CALL(cudaMemcpy(x1007, x251, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1010 = (float*)myGpuMalloc(64 * sizeof(float));
float* x252 = x6+1792;
CUDA_CALL(cudaMemcpy(x1010, x252, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1013 = (float*)myGpuMalloc(128 * sizeof(float));
float* x253 = x6+401344;
CUDA_CALL(cudaMemcpy(x1013, x253, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1016 = (float*)myGpuMalloc(256 * sizeof(float));
float* x254 = x6+7708352;
CUDA_CALL(cudaMemcpy(x1016, x254, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1019 = (float*)myGpuMalloc(256 * sizeof(float));
float* x255 = x6+6588352;
CUDA_CALL(cudaMemcpy(x1019, x255, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1022 = (float*)myGpuMalloc(512 * sizeof(float));
float* x256 = x6+20134848;
CUDA_CALL(cudaMemcpy(x1022, x256, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1025 = (float*)myGpuMalloc(65536 * sizeof(float));
float* x257 = x6+602560;
CUDA_CALL(cudaMemcpy(x1025, x257, 65536 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1028 = (float*)myGpuMalloc(64 * sizeof(float));
float* x258 = x6+165952;
CUDA_CALL(cudaMemcpy(x1028, x258, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1031 = (float*)myGpuMalloc(131072 * sizeof(float));
float* x259 = x6+469440;
CUDA_CALL(cudaMemcpy(x1031, x259, 131072 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1034 = (float*)myGpuMalloc(256 * sizeof(float));
float* x260 = x6+3227584;
CUDA_CALL(cudaMemcpy(x1034, x260, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1037 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x261 = x6+23549376;
CUDA_CALL(cudaMemcpy(x1037, x261, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1040 = (float*)myGpuMalloc(128 * sizeof(float));
float* x262 = x6+1231680;
CUDA_CALL(cudaMemcpy(x1040, x262, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1043 = (float*)myGpuMalloc(589824 * sizeof(float));
float* x263 = x6+6588864;
CUDA_CALL(cudaMemcpy(x1043, x263, 589824 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1046 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x264 = x6+5201344;
CUDA_CALL(cudaMemcpy(x1046, x264, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1049 = (float*)myGpuMalloc(256 * sizeof(float));
float* x265 = x6+77760;
CUDA_CALL(cudaMemcpy(x1049, x265, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1052 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x266 = x6+19084736;
CUDA_CALL(cudaMemcpy(x1052, x266, 1048576 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1055 = (float*)myGpuMalloc(128 * sizeof(float));
float* x267 = x6+1098048;
CUDA_CALL(cudaMemcpy(x1055, x267, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1058 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x268 = x6+2435520;
CUDA_CALL(cudaMemcpy(x1058, x268, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1061 = (float*)myGpuMalloc(128 * sizeof(float));
float* x269 = x6+1379520;
CUDA_CALL(cudaMemcpy(x1061, x269, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1064 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x270 = x6+2170304;
CUDA_CALL(cudaMemcpy(x1064, x270, 262144 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1067 = (float*)myGpuMalloc(256 * sizeof(float));
float* x271 = x6+1578432;
CUDA_CALL(cudaMemcpy(x1067, x271, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1070 = (float*)myGpuMalloc(256 * sizeof(float));
float* x272 = x6+3817920;
CUDA_CALL(cudaMemcpy(x1070, x272, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1073 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x273 = x6+7444928;
CUDA_CALL(cudaMemcpy(x1073, x273, 1024 * sizeof(float), cudaMemcpyHostToDevice));
int32_t x1075 = open("../../cifar10_data/cifar-10-batches-bin/data_batch_1.bin",0);
int32_t x1076 = fsize(x1075);
int64_t x1078 = (int64_t)x1076;
int64_t x1079 = x1078 / 3073LL;
int32_t x1080 = (int32_t)x1079;
int32_t x1081 = x1080 * 3072;
float* x1082 = (float*)myMalloc(x1081 * sizeof(float));;
int* x1083 = (int32_t*)myMalloc(x1080 * sizeof(int32_t));;
char* x1077 = (char*)mmap(0, x1076, PROT_READ | PROT_WRITE, MAP_FILE | MAP_PRIVATE, x1075, 0);
for(int x1085=0; x1085 < x1080; x1085++) {
int32_t x1086 = x1085 * 3073;
char x1087 = x1077[x1086];
int32_t x1088 = (int32_t)(unsigned char)x1087;
x1083[x1085] = x1088;
int32_t x1094 = x1086 + 1;
int32_t x1092 = x1085 * 3072;
for(int x1091=0; x1091 < 3072; x1091++) {
int32_t x1095 = x1094 + x1091;
char x1096 = x1077[x1095];
int32_t x1093 = x1092 + x1091;
float x1097 = (float)(unsigned char)x1096;
float x1098 = x1097 / 255.0f;
x1082[x1093] = x1098;
}
}
int32_t x1104 = x1080 / 64;
for(int x1106=0; x1106 < x1104; x1106++) {
int32_t x1107 = x1106 * 64;
int32_t x1108 = x1107 * 3072;
float* x1109 = x1082+x1108;
int* x1110 = x1083+x1107;
printf("input (size 64 x 3 x 32 x 32)\n");
float x1112 = 0.0f;
for(int x1114=0; x1114 < 196608; x1114++) {
float x1115 = x1112;
float x1116 = x1109[x1114];
float x1117 = fabs(x1116);
float x1118 = fabs(x1115);
bool x1119 = x1117 > x1118;
float x1120;
if (x1119) {
x1120 = x1116;
} else {
x1120 = x1115;
}
x1112 = x1120;
}
float x1124 = x1112;
printf("Max Abs: %.5f || ",x1124);
for(int x1127=0; x1127 < 10; x1127++) {
float x1128 = x1109[x1127];
printf("%.5f ",x1128);
}
printf("\n");
// Tensor 'toGPU' invocation.
float* x1134 = (float*)myGpuMalloc(196608 * sizeof(float));
CUDA_CALL(cudaMemcpy(x1134, x1109, 196608 * sizeof(float), cudaMemcpyHostToDevice));
float* x1136 = (float*)myGpuMalloc(4194304 * sizeof(float));
float* x1137 = (float*)myMalloc(1 * sizeof(float));;
x1137[0] = 0.0f;
float* x1139 = (float*)myMalloc(1 * sizeof(float));;
x1139[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 3, 32, 32));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 3, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 32, 32));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1139, in_desc, x1134, filt_desc, x713,
conv_desc, algo, ws_data, ws_size,
x1137, out_desc, x1136));
};
float* x1142 = (float*)myGpuMalloc(4194304 * sizeof(float));
float* x1143 = (float*)myMalloc(1 * sizeof(float));;
x1143[0] = 0.0f;
float* x1145 = (float*)myMalloc(1 * sizeof(float));;
x1145[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 32, 32));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 32, 32));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 64, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1145, x1145, in_desc, x1136, out_desc, x1142, sbmv_desc, x875,
x1010, x377, x587, 1.0E-5));
};
float* x1148 = (float*)myMalloc(1 * sizeof(float));;
x1148[0] = 0.0f;
float* x1150 = (float*)myMalloc(1 * sizeof(float));;
x1150[0] = 1.0f;
float* x1152 = (float*)myGpuMalloc(4194304 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 32, 32));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1150, x_desc, x1142, x1148, x_desc, x1152));
};
float* x1154 = (float*)myMalloc(1 * sizeof(float));;
x1154[0] = 0.0f;
float* x1156 = (float*)myMalloc(1 * sizeof(float));;
x1156[0] = 1.0f;
float* x1158 = (float*)myGpuMalloc(1048576 * sizeof(float));
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 32, 32));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnPoolingDescriptor_t poolingDesc;
CUDNN_CALL(cudnnCreatePoolingDescriptor(&poolingDesc));
CUDNN_CALL(cudnnSetPooling2dDescriptor(
poolingDesc, CUDNN_POOLING_MAX, CUDNN_NOT_PROPAGATE_NAN,
2, 2, 0,
0, 2, 2
));
CUDNN_CALL(cudnnPoolingForward(
cudnnHandle,
poolingDesc,
x1156, in_desc, x1152, x1154, out_desc, x1158));
};
float* x1160 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1161 = (float*)myMalloc(1 * sizeof(float));;
x1161[0] = 0.0f;
float* x1163 = (float*)myMalloc(1 * sizeof(float));;
x1163[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 64, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1163, in_desc, x1158, filt_desc, x956,
conv_desc, algo, ws_data, ws_size,
x1161, out_desc, x1160));
};
float* x1166 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1167 = (float*)myMalloc(1 * sizeof(float));;
x1167[0] = 0.0f;
float* x1169 = (float*)myMalloc(1 * sizeof(float));;
x1169[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 64, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1169, x1169, in_desc, x1160, out_desc, x1166, sbmv_desc, x335,
x416, x599, x410, 1.0E-5));
};
float* x1172 = (float*)myMalloc(1 * sizeof(float));;
x1172[0] = 0.0f;
float* x1174 = (float*)myMalloc(1 * sizeof(float));;
x1174[0] = 1.0f;
float* x1176 = (float*)myGpuMalloc(1048576 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1174, x_desc, x1166, x1172, x_desc, x1176));
};
float* x1178 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1179 = (float*)myMalloc(1 * sizeof(float));;
x1179[0] = 0.0f;
float* x1181 = (float*)myMalloc(1 * sizeof(float));;
x1181[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 64, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1181, in_desc, x1176, filt_desc, x527,
conv_desc, algo, ws_data, ws_size,
x1179, out_desc, x1178));
};
float* x1184 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1185 = (float*)myMalloc(1 * sizeof(float));;
x1185[0] = 0.0f;
float* x1187 = (float*)myMalloc(1 * sizeof(float));;
x1187[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 64, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1187, x1187, in_desc, x1178, out_desc, x1184, sbmv_desc, x749,
x404, x572, x731, 1.0E-5));
};
float* x1190 = (float*)myMalloc(1 * sizeof(float));;
x1190[0] = 0.0f;
float* x1192 = (float*)myMalloc(1 * sizeof(float));;
x1192[0] = 1.0f;
float* x1194 = (float*)myGpuMalloc(1048576 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1192, x_desc, x1184, x1190, x_desc, x1194));
};
float* x1196 = (float*)myGpuMalloc(4194304 * sizeof(float));
float* x1197 = (float*)myMalloc(1 * sizeof(float));;
x1197[0] = 0.0f;
float* x1199 = (float*)myMalloc(1 * sizeof(float));;
x1199[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 64, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1199, in_desc, x1194, filt_desc, x353,
conv_desc, algo, ws_data, ws_size,
x1197, out_desc, x1196));
};
float* x1202 = (float*)myGpuMalloc(4194304 * sizeof(float));
float* x1203 = (float*)myMalloc(1 * sizeof(float));;
x1203[0] = 0.0f;
float* x1205 = (float*)myMalloc(1 * sizeof(float));;
x1205[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1205, x1205, in_desc, x1196, out_desc, x1202, sbmv_desc, x854,
x635, x470, x365, 1.0E-5));
};
float* x1208 = (float*)myGpuMalloc(4194304 * sizeof(float));
float* x1209 = (float*)myMalloc(1 * sizeof(float));;
x1209[0] = 0.0f;
float* x1211 = (float*)myMalloc(1 * sizeof(float));;
x1211[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 64, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1211, in_desc, x1158, filt_desc, x743,
conv_desc, algo, ws_data, ws_size,
x1209, out_desc, x1208));
};
float* x1214 = (float*)myGpuMalloc(4194304 * sizeof(float));
float* x1215 = (float*)myMalloc(1 * sizeof(float));;
x1215[0] = 0.0f;
float* x1217 = (float*)myMalloc(1 * sizeof(float));;
x1217[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1217, x1217, in_desc, x1208, out_desc, x1214, sbmv_desc, x485,
x866, x1049, x986, 1.0E-5));
};
float* x1220 = (float*)myMalloc(1 * sizeof(float));;
x1220[0] = 1.0f;
float* x1222 = (float*)myMalloc(1 * sizeof(float));;
x1222[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1220, bias_desc, x1214, x1222, out_desc, x1202));
};
float* x1225 = (float*)myMalloc(1 * sizeof(float));;
x1225[0] = 0.0f;
float* x1227 = (float*)myMalloc(1 * sizeof(float));;
x1227[0] = 1.0f;
float* x1229 = (float*)myGpuMalloc(4194304 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1227, x_desc, x1202, x1225, x_desc, x1229));
};
float* x1231 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1232 = (float*)myMalloc(1 * sizeof(float));;
x1232[0] = 0.0f;
float* x1234 = (float*)myMalloc(1 * sizeof(float));;
x1234[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1234, in_desc, x1229, filt_desc, x770,
conv_desc, algo, ws_data, ws_size,
x1232, out_desc, x1231));
};
float* x1237 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1238 = (float*)myMalloc(1 * sizeof(float));;
x1238[0] = 0.0f;
float* x1240 = (float*)myMalloc(1 * sizeof(float));;
x1240[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 64, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1240, x1240, in_desc, x1231, out_desc, x1237, sbmv_desc, x683,
x437, x287, x563, 1.0E-5));
};
float* x1243 = (float*)myMalloc(1 * sizeof(float));;
x1243[0] = 0.0f;
float* x1245 = (float*)myMalloc(1 * sizeof(float));;
x1245[0] = 1.0f;
float* x1247 = (float*)myGpuMalloc(1048576 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1245, x_desc, x1237, x1243, x_desc, x1247));
};
float* x1249 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1250 = (float*)myMalloc(1 * sizeof(float));;
x1250[0] = 0.0f;
float* x1252 = (float*)myMalloc(1 * sizeof(float));;
x1252[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 64, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1252, in_desc, x1247, filt_desc, x506,
conv_desc, algo, ws_data, ws_size,
x1250, out_desc, x1249));
};
float* x1255 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1256 = (float*)myMalloc(1 * sizeof(float));;
x1256[0] = 0.0f;
float* x1258 = (float*)myMalloc(1 * sizeof(float));;
x1258[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 64, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1258, x1258, in_desc, x1249, out_desc, x1255, sbmv_desc, x881,
x716, x389, x989, 1.0E-5));
};
float* x1261 = (float*)myMalloc(1 * sizeof(float));;
x1261[0] = 0.0f;
float* x1263 = (float*)myMalloc(1 * sizeof(float));;
x1263[0] = 1.0f;
float* x1265 = (float*)myGpuMalloc(1048576 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1263, x_desc, x1255, x1261, x_desc, x1265));
};
float* x1267 = (float*)myGpuMalloc(4194304 * sizeof(float));
float* x1268 = (float*)myMalloc(1 * sizeof(float));;
x1268[0] = 0.0f;
float* x1270 = (float*)myMalloc(1 * sizeof(float));;
x1270[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 64, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1270, in_desc, x1265, filt_desc, x647,
conv_desc, algo, ws_data, ws_size,
x1268, out_desc, x1267));
};
float* x1273 = (float*)myGpuMalloc(4194304 * sizeof(float));
float* x1274 = (float*)myMalloc(1 * sizeof(float));;
x1274[0] = 0.0f;
float* x1276 = (float*)myMalloc(1 * sizeof(float));;
x1276[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1276, x1276, in_desc, x1267, out_desc, x1273, sbmv_desc, x431,
x278, x530, x755, 1.0E-5));
};
float* x1279 = (float*)myMalloc(1 * sizeof(float));;
x1279[0] = 1.0f;
float* x1281 = (float*)myMalloc(1 * sizeof(float));;
x1281[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1279, bias_desc, x1229, x1281, out_desc, x1273));
};
float* x1284 = (float*)myMalloc(1 * sizeof(float));;
x1284[0] = 0.0f;
float* x1286 = (float*)myMalloc(1 * sizeof(float));;
x1286[0] = 1.0f;
float* x1288 = (float*)myGpuMalloc(4194304 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1286, x_desc, x1273, x1284, x_desc, x1288));
};
float* x1290 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1291 = (float*)myMalloc(1 * sizeof(float));;
x1291[0] = 0.0f;
float* x1293 = (float*)myMalloc(1 * sizeof(float));;
x1293[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1293, in_desc, x1288, filt_desc, x707,
conv_desc, algo, ws_data, ws_size,
x1291, out_desc, x1290));
};
float* x1296 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1297 = (float*)myMalloc(1 * sizeof(float));;
x1297[0] = 0.0f;
float* x1299 = (float*)myMalloc(1 * sizeof(float));;
x1299[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 64, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1299, x1299, in_desc, x1290, out_desc, x1296, sbmv_desc, x500,
x329, x1028, x818, 1.0E-5));
};
float* x1302 = (float*)myMalloc(1 * sizeof(float));;
x1302[0] = 0.0f;
float* x1304 = (float*)myMalloc(1 * sizeof(float));;
x1304[0] = 1.0f;
float* x1306 = (float*)myGpuMalloc(1048576 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1304, x_desc, x1296, x1302, x_desc, x1306));
};
float* x1308 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1309 = (float*)myMalloc(1 * sizeof(float));;
x1309[0] = 0.0f;
float* x1311 = (float*)myMalloc(1 * sizeof(float));;
x1311[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 64, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1311, in_desc, x1306, filt_desc, x476,
conv_desc, algo, ws_data, ws_size,
x1309, out_desc, x1308));
};
float* x1314 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1315 = (float*)myMalloc(1 * sizeof(float));;
x1315[0] = 0.0f;
float* x1317 = (float*)myMalloc(1 * sizeof(float));;
x1317[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 64, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1317, x1317, in_desc, x1308, out_desc, x1314, sbmv_desc, x473,
x662, x794, x611, 1.0E-5));
};
float* x1320 = (float*)myMalloc(1 * sizeof(float));;
x1320[0] = 0.0f;
float* x1322 = (float*)myMalloc(1 * sizeof(float));;
x1322[0] = 1.0f;
float* x1324 = (float*)myGpuMalloc(1048576 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1322, x_desc, x1314, x1320, x_desc, x1324));
};
float* x1326 = (float*)myGpuMalloc(4194304 * sizeof(float));
float* x1327 = (float*)myMalloc(1 * sizeof(float));;
x1327[0] = 0.0f;
float* x1329 = (float*)myMalloc(1 * sizeof(float));;
x1329[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, 16, 16));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 64, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1329, in_desc, x1324, filt_desc, x518,
conv_desc, algo, ws_data, ws_size,
x1327, out_desc, x1326));
};
float* x1332 = (float*)myGpuMalloc(4194304 * sizeof(float));
float* x1333 = (float*)myMalloc(1 * sizeof(float));;
x1333[0] = 0.0f;
float* x1335 = (float*)myMalloc(1 * sizeof(float));;
x1335[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1335, x1335, in_desc, x1326, out_desc, x1332, sbmv_desc, x368,
x998, x809, x656, 1.0E-5));
};
float* x1338 = (float*)myMalloc(1 * sizeof(float));;
x1338[0] = 1.0f;
float* x1340 = (float*)myMalloc(1 * sizeof(float));;
x1340[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1338, bias_desc, x1288, x1340, out_desc, x1332));
};
float* x1343 = (float*)myMalloc(1 * sizeof(float));;
x1343[0] = 0.0f;
float* x1345 = (float*)myMalloc(1 * sizeof(float));;
x1345[0] = 1.0f;
float* x1347 = (float*)myGpuMalloc(4194304 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1345, x_desc, x1332, x1343, x_desc, x1347));
};
float* x1349 = (float*)myGpuMalloc(2097152 * sizeof(float));
float* x1350 = (float*)myMalloc(1 * sizeof(float));;
x1350[0] = 0.0f;
float* x1352 = (float*)myMalloc(1 * sizeof(float));;
x1352[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 16, 16));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1352, in_desc, x1347, filt_desc, x290,
conv_desc, algo, ws_data, ws_size,
x1350, out_desc, x1349));
};
float* x1355 = (float*)myGpuMalloc(2097152 * sizeof(float));
float* x1356 = (float*)myMalloc(1 * sizeof(float));;
x1356[0] = 0.0f;
float* x1358 = (float*)myMalloc(1 * sizeof(float));;
x1358[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 16, 16));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 16, 16));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1358, x1358, in_desc, x1349, out_desc, x1355, sbmv_desc, x509,
x773, x869, x659, 1.0E-5));
};
float* x1361 = (float*)myMalloc(1 * sizeof(float));;
x1361[0] = 0.0f;
float* x1363 = (float*)myMalloc(1 * sizeof(float));;
x1363[0] = 1.0f;
float* x1365 = (float*)myGpuMalloc(2097152 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 16, 16));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1363, x_desc, x1355, x1361, x_desc, x1365));
};
float* x1367 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1368 = (float*)myMalloc(1 * sizeof(float));;
x1368[0] = 0.0f;
float* x1370 = (float*)myMalloc(1 * sizeof(float));;
x1370[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 16, 16));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 128, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1370, in_desc, x1365, filt_desc, x338,
conv_desc, algo, ws_data, ws_size,
x1368, out_desc, x1367));
};
float* x1373 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1374 = (float*)myMalloc(1 * sizeof(float));;
x1374[0] = 0.0f;
float* x1376 = (float*)myMalloc(1 * sizeof(float));;
x1376[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1376, x1376, in_desc, x1367, out_desc, x1373, sbmv_desc, x1013,
x827, x641, x386, 1.0E-5));
};
float* x1379 = (float*)myMalloc(1 * sizeof(float));;
x1379[0] = 0.0f;
float* x1381 = (float*)myMalloc(1 * sizeof(float));;
x1381[0] = 1.0f;
float* x1383 = (float*)myGpuMalloc(524288 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1381, x_desc, x1373, x1379, x_desc, x1383));
};
float* x1385 = (float*)myGpuMalloc(2097152 * sizeof(float));
float* x1386 = (float*)myMalloc(1 * sizeof(float));;
x1386[0] = 0.0f;
float* x1388 = (float*)myMalloc(1 * sizeof(float));;
x1388[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 128, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1388, in_desc, x1383, filt_desc, x575,
conv_desc, algo, ws_data, ws_size,
x1386, out_desc, x1385));
};
float* x1391 = (float*)myGpuMalloc(2097152 * sizeof(float));
float* x1392 = (float*)myMalloc(1 * sizeof(float));;
x1392[0] = 0.0f;
float* x1394 = (float*)myMalloc(1 * sizeof(float));;
x1394[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1394, x1394, in_desc, x1385, out_desc, x1391, sbmv_desc, x692,
x887, x704, x560, 1.0E-5));
};
float* x1397 = (float*)myGpuMalloc(2097152 * sizeof(float));
float* x1398 = (float*)myMalloc(1 * sizeof(float));;
x1398[0] = 0.0f;
float* x1400 = (float*)myMalloc(1 * sizeof(float));;
x1400[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 16, 16));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1400, in_desc, x1347, filt_desc, x1031,
conv_desc, algo, ws_data, ws_size,
x1398, out_desc, x1397));
};
float* x1403 = (float*)myGpuMalloc(2097152 * sizeof(float));
float* x1404 = (float*)myMalloc(1 * sizeof(float));;
x1404[0] = 0.0f;
float* x1406 = (float*)myMalloc(1 * sizeof(float));;
x1406[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1406, x1406, in_desc, x1397, out_desc, x1403, sbmv_desc, x878,
x614, x383, x326, 1.0E-5));
};
float* x1409 = (float*)myMalloc(1 * sizeof(float));;
x1409[0] = 1.0f;
float* x1411 = (float*)myMalloc(1 * sizeof(float));;
x1411[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1409, bias_desc, x1403, x1411, out_desc, x1391));
};
float* x1414 = (float*)myMalloc(1 * sizeof(float));;
x1414[0] = 0.0f;
float* x1416 = (float*)myMalloc(1 * sizeof(float));;
x1416[0] = 1.0f;
float* x1418 = (float*)myGpuMalloc(2097152 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1416, x_desc, x1391, x1414, x_desc, x1418));
};
float* x1420 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1421 = (float*)myMalloc(1 * sizeof(float));;
x1421[0] = 0.0f;
float* x1423 = (float*)myMalloc(1 * sizeof(float));;
x1423[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 512, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1423, in_desc, x1418, filt_desc, x1025,
conv_desc, algo, ws_data, ws_size,
x1421, out_desc, x1420));
};
float* x1426 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1427 = (float*)myMalloc(1 * sizeof(float));;
x1427[0] = 0.0f;
float* x1429 = (float*)myMalloc(1 * sizeof(float));;
x1429[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1429, x1429, in_desc, x1420, out_desc, x1426, sbmv_desc, x923,
x308, x557, x788, 1.0E-5));
};
float* x1432 = (float*)myMalloc(1 * sizeof(float));;
x1432[0] = 0.0f;
float* x1434 = (float*)myMalloc(1 * sizeof(float));;
x1434[0] = 1.0f;
float* x1436 = (float*)myGpuMalloc(524288 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1434, x_desc, x1426, x1432, x_desc, x1436));
};
float* x1438 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1439 = (float*)myMalloc(1 * sizeof(float));;
x1439[0] = 0.0f;
float* x1441 = (float*)myMalloc(1 * sizeof(float));;
x1441[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 128, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1441, in_desc, x1436, filt_desc, x962,
conv_desc, algo, ws_data, ws_size,
x1439, out_desc, x1438));
};
float* x1444 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1445 = (float*)myMalloc(1 * sizeof(float));;
x1445[0] = 0.0f;
float* x1447 = (float*)myMalloc(1 * sizeof(float));;
x1447[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1447, x1447, in_desc, x1438, out_desc, x1444, sbmv_desc, x281,
x542, x362, x932, 1.0E-5));
};
float* x1450 = (float*)myMalloc(1 * sizeof(float));;
x1450[0] = 0.0f;
float* x1452 = (float*)myMalloc(1 * sizeof(float));;
x1452[0] = 1.0f;
float* x1454 = (float*)myGpuMalloc(524288 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1452, x_desc, x1444, x1450, x_desc, x1454));
};
float* x1456 = (float*)myGpuMalloc(2097152 * sizeof(float));
float* x1457 = (float*)myMalloc(1 * sizeof(float));;
x1457[0] = 0.0f;
float* x1459 = (float*)myMalloc(1 * sizeof(float));;
x1459[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 128, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1459, in_desc, x1454, filt_desc, x590,
conv_desc, algo, ws_data, ws_size,
x1457, out_desc, x1456));
};
float* x1462 = (float*)myGpuMalloc(2097152 * sizeof(float));
float* x1463 = (float*)myMalloc(1 * sizeof(float));;
x1463[0] = 0.0f;
float* x1465 = (float*)myMalloc(1 * sizeof(float));;
x1465[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1465, x1465, in_desc, x1456, out_desc, x1462, sbmv_desc, x413,
x995, x698, x521, 1.0E-5));
};
float* x1468 = (float*)myMalloc(1 * sizeof(float));;
x1468[0] = 1.0f;
float* x1470 = (float*)myMalloc(1 * sizeof(float));;
x1470[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1468, bias_desc, x1418, x1470, out_desc, x1462));
};
float* x1473 = (float*)myMalloc(1 * sizeof(float));;
x1473[0] = 0.0f;
float* x1475 = (float*)myMalloc(1 * sizeof(float));;
x1475[0] = 1.0f;
float* x1477 = (float*)myGpuMalloc(2097152 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1475, x_desc, x1462, x1473, x_desc, x1477));
};
float* x1479 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1480 = (float*)myMalloc(1 * sizeof(float));;
x1480[0] = 0.0f;
float* x1482 = (float*)myMalloc(1 * sizeof(float));;
x1482[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 512, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1482, in_desc, x1477, filt_desc, x845,
conv_desc, algo, ws_data, ws_size,
x1480, out_desc, x1479));
};
float* x1485 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1486 = (float*)myMalloc(1 * sizeof(float));;
x1486[0] = 0.0f;
float* x1488 = (float*)myMalloc(1 * sizeof(float));;
x1488[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1488, x1488, in_desc, x1479, out_desc, x1485, sbmv_desc, x392,
x767, x593, x284, 1.0E-5));
};
float* x1491 = (float*)myMalloc(1 * sizeof(float));;
x1491[0] = 0.0f;
float* x1493 = (float*)myMalloc(1 * sizeof(float));;
x1493[0] = 1.0f;
float* x1495 = (float*)myGpuMalloc(524288 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1493, x_desc, x1485, x1491, x_desc, x1495));
};
float* x1497 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1498 = (float*)myMalloc(1 * sizeof(float));;
x1498[0] = 0.0f;
float* x1500 = (float*)myMalloc(1 * sizeof(float));;
x1500[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 128, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1500, in_desc, x1495, filt_desc, x830,
conv_desc, algo, ws_data, ws_size,
x1498, out_desc, x1497));
};
float* x1503 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1504 = (float*)myMalloc(1 * sizeof(float));;
x1504[0] = 0.0f;
float* x1506 = (float*)myMalloc(1 * sizeof(float));;
x1506[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1506, x1506, in_desc, x1497, out_desc, x1503, sbmv_desc, x638,
x440, x908, x1055, 1.0E-5));
};
float* x1509 = (float*)myMalloc(1 * sizeof(float));;
x1509[0] = 0.0f;
float* x1511 = (float*)myMalloc(1 * sizeof(float));;
x1511[0] = 1.0f;
float* x1513 = (float*)myGpuMalloc(524288 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1511, x_desc, x1503, x1509, x_desc, x1513));
};
float* x1515 = (float*)myGpuMalloc(2097152 * sizeof(float));
float* x1516 = (float*)myMalloc(1 * sizeof(float));;
x1516[0] = 0.0f;
float* x1518 = (float*)myMalloc(1 * sizeof(float));;
x1518[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 128, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1518, in_desc, x1513, filt_desc, x380,
conv_desc, algo, ws_data, ws_size,
x1516, out_desc, x1515));
};
float* x1521 = (float*)myGpuMalloc(2097152 * sizeof(float));
float* x1522 = (float*)myMalloc(1 * sizeof(float));;
x1522[0] = 0.0f;
float* x1524 = (float*)myMalloc(1 * sizeof(float));;
x1524[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1524, x1524, in_desc, x1515, out_desc, x1521, sbmv_desc, x758,
x503, x332, x926, 1.0E-5));
};
float* x1527 = (float*)myMalloc(1 * sizeof(float));;
x1527[0] = 1.0f;
float* x1529 = (float*)myMalloc(1 * sizeof(float));;
x1529[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1527, bias_desc, x1477, x1529, out_desc, x1521));
};
float* x1532 = (float*)myMalloc(1 * sizeof(float));;
x1532[0] = 0.0f;
float* x1534 = (float*)myMalloc(1 * sizeof(float));;
x1534[0] = 1.0f;
float* x1536 = (float*)myGpuMalloc(2097152 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1534, x_desc, x1521, x1532, x_desc, x1536));
};
float* x1538 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1539 = (float*)myMalloc(1 * sizeof(float));;
x1539[0] = 0.0f;
float* x1541 = (float*)myMalloc(1 * sizeof(float));;
x1541[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 512, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1541, in_desc, x1536, filt_desc, x653,
conv_desc, algo, ws_data, ws_size,
x1539, out_desc, x1538));
};
float* x1544 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1545 = (float*)myMalloc(1 * sizeof(float));;
x1545[0] = 0.0f;
float* x1547 = (float*)myMalloc(1 * sizeof(float));;
x1547[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1547, x1547, in_desc, x1538, out_desc, x1544, sbmv_desc, x374,
x983, x965, x1040, 1.0E-5));
};
float* x1550 = (float*)myMalloc(1 * sizeof(float));;
x1550[0] = 0.0f;
float* x1552 = (float*)myMalloc(1 * sizeof(float));;
x1552[0] = 1.0f;
float* x1554 = (float*)myGpuMalloc(524288 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1552, x_desc, x1544, x1550, x_desc, x1554));
};
float* x1556 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1557 = (float*)myMalloc(1 * sizeof(float));;
x1557[0] = 0.0f;
float* x1559 = (float*)myMalloc(1 * sizeof(float));;
x1559[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 128, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1559, in_desc, x1554, filt_desc, x752,
conv_desc, algo, ws_data, ws_size,
x1557, out_desc, x1556));
};
float* x1562 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1563 = (float*)myMalloc(1 * sizeof(float));;
x1563[0] = 0.0f;
float* x1565 = (float*)myMalloc(1 * sizeof(float));;
x1565[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1565, x1565, in_desc, x1556, out_desc, x1562, sbmv_desc, x494,
x371, x1061, x701, 1.0E-5));
};
float* x1568 = (float*)myMalloc(1 * sizeof(float));;
x1568[0] = 0.0f;
float* x1570 = (float*)myMalloc(1 * sizeof(float));;
x1570[0] = 1.0f;
float* x1572 = (float*)myGpuMalloc(524288 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1570, x_desc, x1562, x1568, x_desc, x1572));
};
float* x1574 = (float*)myGpuMalloc(2097152 * sizeof(float));
float* x1575 = (float*)myMalloc(1 * sizeof(float));;
x1575[0] = 0.0f;
float* x1577 = (float*)myMalloc(1 * sizeof(float));;
x1577[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, 8, 8));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 128, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1577, in_desc, x1572, filt_desc, x422,
conv_desc, algo, ws_data, ws_size,
x1575, out_desc, x1574));
};
float* x1580 = (float*)myGpuMalloc(2097152 * sizeof(float));
float* x1581 = (float*)myMalloc(1 * sizeof(float));;
x1581[0] = 0.0f;
float* x1583 = (float*)myMalloc(1 * sizeof(float));;
x1583[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1583, x1583, in_desc, x1574, out_desc, x1580, sbmv_desc, x725,
x419, x314, x959, 1.0E-5));
};
float* x1586 = (float*)myMalloc(1 * sizeof(float));;
x1586[0] = 1.0f;
float* x1588 = (float*)myMalloc(1 * sizeof(float));;
x1588[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1586, bias_desc, x1536, x1588, out_desc, x1580));
};
float* x1591 = (float*)myMalloc(1 * sizeof(float));;
x1591[0] = 0.0f;
float* x1593 = (float*)myMalloc(1 * sizeof(float));;
x1593[0] = 1.0f;
float* x1595 = (float*)myGpuMalloc(2097152 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1593, x_desc, x1580, x1591, x_desc, x1595));
};
float* x1597 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1598 = (float*)myMalloc(1 * sizeof(float));;
x1598[0] = 0.0f;
float* x1600 = (float*)myMalloc(1 * sizeof(float));;
x1600[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 512, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 8, 8));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1600, in_desc, x1595, filt_desc, x797,
conv_desc, algo, ws_data, ws_size,
x1598, out_desc, x1597));
};
float* x1603 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1604 = (float*)myMalloc(1 * sizeof(float));;
x1604[0] = 0.0f;
float* x1606 = (float*)myMalloc(1 * sizeof(float));;
x1606[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 8, 8));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 8, 8));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1606, x1606, in_desc, x1597, out_desc, x1603, sbmv_desc, x1067,
x320, x650, x851, 1.0E-5));
};
float* x1609 = (float*)myMalloc(1 * sizeof(float));;
x1609[0] = 0.0f;
float* x1611 = (float*)myMalloc(1 * sizeof(float));;
x1611[0] = 1.0f;
float* x1613 = (float*)myGpuMalloc(1048576 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 8, 8));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1611, x_desc, x1603, x1609, x_desc, x1613));
};
float* x1615 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1616 = (float*)myMalloc(1 * sizeof(float));;
x1616[0] = 0.0f;
float* x1618 = (float*)myMalloc(1 * sizeof(float));;
x1618[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 8, 8));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1618, in_desc, x1613, filt_desc, x782,
conv_desc, algo, ws_data, ws_size,
x1616, out_desc, x1615));
};
float* x1621 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1622 = (float*)myMalloc(1 * sizeof(float));;
x1622[0] = 0.0f;
float* x1624 = (float*)myMalloc(1 * sizeof(float));;
x1624[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1624, x1624, in_desc, x1615, out_desc, x1621, sbmv_desc, x581,
x305, x944, x554, 1.0E-5));
};
float* x1627 = (float*)myMalloc(1 * sizeof(float));;
x1627[0] = 0.0f;
float* x1629 = (float*)myMalloc(1 * sizeof(float));;
x1629[0] = 1.0f;
float* x1631 = (float*)myGpuMalloc(262144 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1629, x_desc, x1621, x1627, x_desc, x1631));
};
float* x1633 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1634 = (float*)myMalloc(1 * sizeof(float));;
x1634[0] = 0.0f;
float* x1636 = (float*)myMalloc(1 * sizeof(float));;
x1636[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1636, in_desc, x1631, filt_desc, x1064,
conv_desc, algo, ws_data, ws_size,
x1634, out_desc, x1633));
};
float* x1639 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1640 = (float*)myMalloc(1 * sizeof(float));;
x1640[0] = 0.0f;
float* x1642 = (float*)myMalloc(1 * sizeof(float));;
x1642[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1024, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1642, x1642, in_desc, x1633, out_desc, x1639, sbmv_desc, x311,
x608, x905, x1058, 1.0E-5));
};
float* x1645 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1646 = (float*)myMalloc(1 * sizeof(float));;
x1646[0] = 0.0f;
float* x1648 = (float*)myMalloc(1 * sizeof(float));;
x1648[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 8, 8));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 512, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1648, in_desc, x1595, filt_desc, x482,
conv_desc, algo, ws_data, ws_size,
x1646, out_desc, x1645));
};
float* x1651 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1652 = (float*)myMalloc(1 * sizeof(float));;
x1652[0] = 0.0f;
float* x1654 = (float*)myMalloc(1 * sizeof(float));;
x1654[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1024, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1654, x1654, in_desc, x1645, out_desc, x1651, sbmv_desc, x344,
x917, x515, x890, 1.0E-5));
};
float* x1657 = (float*)myMalloc(1 * sizeof(float));;
x1657[0] = 1.0f;
float* x1659 = (float*)myMalloc(1 * sizeof(float));;
x1659[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1657, bias_desc, x1651, x1659, out_desc, x1639));
};
float* x1662 = (float*)myMalloc(1 * sizeof(float));;
x1662[0] = 0.0f;
float* x1664 = (float*)myMalloc(1 * sizeof(float));;
x1664[0] = 1.0f;
float* x1666 = (float*)myGpuMalloc(1048576 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1664, x_desc, x1639, x1662, x_desc, x1666));
};
float* x1668 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1669 = (float*)myMalloc(1 * sizeof(float));;
x1669[0] = 0.0f;
float* x1671 = (float*)myMalloc(1 * sizeof(float));;
x1671[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 1024, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1671, in_desc, x1666, filt_desc, x296,
conv_desc, algo, ws_data, ws_size,
x1669, out_desc, x1668));
};
float* x1674 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1675 = (float*)myMalloc(1 * sizeof(float));;
x1675[0] = 0.0f;
float* x1677 = (float*)myMalloc(1 * sizeof(float));;
x1677[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1677, x1677, in_desc, x1668, out_desc, x1674, sbmv_desc, x347,
x914, x1034, x728, 1.0E-5));
};
float* x1680 = (float*)myMalloc(1 * sizeof(float));;
x1680[0] = 0.0f;
float* x1682 = (float*)myMalloc(1 * sizeof(float));;
x1682[0] = 1.0f;
float* x1684 = (float*)myGpuMalloc(262144 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1682, x_desc, x1674, x1680, x_desc, x1684));
};
float* x1686 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1687 = (float*)myMalloc(1 * sizeof(float));;
x1687[0] = 0.0f;
float* x1689 = (float*)myMalloc(1 * sizeof(float));;
x1689[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1689, in_desc, x1684, filt_desc, x350,
conv_desc, algo, ws_data, ws_size,
x1687, out_desc, x1686));
};
float* x1692 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1693 = (float*)myMalloc(1 * sizeof(float));;
x1693[0] = 0.0f;
float* x1695 = (float*)myMalloc(1 * sizeof(float));;
x1695[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1695, x1695, in_desc, x1686, out_desc, x1692, sbmv_desc, x1070,
x545, x857, x968, 1.0E-5));
};
float* x1698 = (float*)myMalloc(1 * sizeof(float));;
x1698[0] = 0.0f;
float* x1700 = (float*)myMalloc(1 * sizeof(float));;
x1700[0] = 1.0f;
float* x1702 = (float*)myGpuMalloc(262144 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1700, x_desc, x1692, x1698, x_desc, x1702));
};
float* x1704 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1705 = (float*)myMalloc(1 * sizeof(float));;
x1705[0] = 0.0f;
float* x1707 = (float*)myMalloc(1 * sizeof(float));;
x1707[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1707, in_desc, x1702, filt_desc, x425,
conv_desc, algo, ws_data, ws_size,
x1705, out_desc, x1704));
};
float* x1710 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1711 = (float*)myMalloc(1 * sizeof(float));;
x1711[0] = 0.0f;
float* x1713 = (float*)myMalloc(1 * sizeof(float));;
x1713[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1024, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1713, x1713, in_desc, x1704, out_desc, x1710, sbmv_desc, x317,
x953, x803, x686, 1.0E-5));
};
float* x1716 = (float*)myMalloc(1 * sizeof(float));;
x1716[0] = 1.0f;
float* x1718 = (float*)myMalloc(1 * sizeof(float));;
x1718[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1716, bias_desc, x1666, x1718, out_desc, x1710));
};
float* x1721 = (float*)myMalloc(1 * sizeof(float));;
x1721[0] = 0.0f;
float* x1723 = (float*)myMalloc(1 * sizeof(float));;
x1723[0] = 1.0f;
float* x1725 = (float*)myGpuMalloc(1048576 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1723, x_desc, x1710, x1721, x_desc, x1725));
};
float* x1727 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1728 = (float*)myMalloc(1 * sizeof(float));;
x1728[0] = 0.0f;
float* x1730 = (float*)myMalloc(1 * sizeof(float));;
x1730[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 1024, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1730, in_desc, x1725, filt_desc, x911,
conv_desc, algo, ws_data, ws_size,
x1728, out_desc, x1727));
};
float* x1733 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1734 = (float*)myMalloc(1 * sizeof(float));;
x1734[0] = 0.0f;
float* x1736 = (float*)myMalloc(1 * sizeof(float));;
x1736[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1736, x1736, in_desc, x1727, out_desc, x1733, sbmv_desc, x644,
x848, x791, x779, 1.0E-5));
};
float* x1739 = (float*)myMalloc(1 * sizeof(float));;
x1739[0] = 0.0f;
float* x1741 = (float*)myMalloc(1 * sizeof(float));;
x1741[0] = 1.0f;
float* x1743 = (float*)myGpuMalloc(262144 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1741, x_desc, x1733, x1739, x_desc, x1743));
};
float* x1745 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1746 = (float*)myMalloc(1 * sizeof(float));;
x1746[0] = 0.0f;
float* x1748 = (float*)myMalloc(1 * sizeof(float));;
x1748[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1748, in_desc, x1743, filt_desc, x299,
conv_desc, algo, ws_data, ws_size,
x1746, out_desc, x1745));
};
float* x1751 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1752 = (float*)myMalloc(1 * sizeof(float));;
x1752[0] = 0.0f;
float* x1754 = (float*)myMalloc(1 * sizeof(float));;
x1754[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1754, x1754, in_desc, x1745, out_desc, x1751, sbmv_desc, x941,
x833, x629, x446, 1.0E-5));
};
float* x1757 = (float*)myMalloc(1 * sizeof(float));;
x1757[0] = 0.0f;
float* x1759 = (float*)myMalloc(1 * sizeof(float));;
x1759[0] = 1.0f;
float* x1761 = (float*)myGpuMalloc(262144 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1759, x_desc, x1751, x1757, x_desc, x1761));
};
float* x1763 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1764 = (float*)myMalloc(1 * sizeof(float));;
x1764[0] = 0.0f;
float* x1766 = (float*)myMalloc(1 * sizeof(float));;
x1766[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1766, in_desc, x1761, filt_desc, x605,
conv_desc, algo, ws_data, ws_size,
x1764, out_desc, x1763));
};
float* x1769 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1770 = (float*)myMalloc(1 * sizeof(float));;
x1770[0] = 0.0f;
float* x1772 = (float*)myMalloc(1 * sizeof(float));;
x1772[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1024, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1772, x1772, in_desc, x1763, out_desc, x1769, sbmv_desc, x1046,
x428, x677, x821, 1.0E-5));
};
float* x1775 = (float*)myMalloc(1 * sizeof(float));;
x1775[0] = 1.0f;
float* x1777 = (float*)myMalloc(1 * sizeof(float));;
x1777[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1775, bias_desc, x1725, x1777, out_desc, x1769));
};
float* x1780 = (float*)myMalloc(1 * sizeof(float));;
x1780[0] = 0.0f;
float* x1782 = (float*)myMalloc(1 * sizeof(float));;
x1782[0] = 1.0f;
float* x1784 = (float*)myGpuMalloc(1048576 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1782, x_desc, x1769, x1780, x_desc, x1784));
};
float* x1786 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1787 = (float*)myMalloc(1 * sizeof(float));;
x1787[0] = 0.0f;
float* x1789 = (float*)myMalloc(1 * sizeof(float));;
x1789[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 1024, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1789, in_desc, x1784, filt_desc, x275,
conv_desc, algo, ws_data, ws_size,
x1787, out_desc, x1786));
};
float* x1792 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1793 = (float*)myMalloc(1 * sizeof(float));;
x1793[0] = 0.0f;
float* x1795 = (float*)myMalloc(1 * sizeof(float));;
x1795[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1795, x1795, in_desc, x1786, out_desc, x1792, sbmv_desc, x533,
x980, x746, x551, 1.0E-5));
};
float* x1798 = (float*)myMalloc(1 * sizeof(float));;
x1798[0] = 0.0f;
float* x1800 = (float*)myMalloc(1 * sizeof(float));;
x1800[0] = 1.0f;
float* x1802 = (float*)myGpuMalloc(262144 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1800, x_desc, x1792, x1798, x_desc, x1802));
};
float* x1804 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1805 = (float*)myMalloc(1 * sizeof(float));;
x1805[0] = 0.0f;
float* x1807 = (float*)myMalloc(1 * sizeof(float));;
x1807[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1807, in_desc, x1802, filt_desc, x1004,
conv_desc, algo, ws_data, ws_size,
x1805, out_desc, x1804));
};
float* x1810 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1811 = (float*)myMalloc(1 * sizeof(float));;
x1811[0] = 0.0f;
float* x1813 = (float*)myMalloc(1 * sizeof(float));;
x1813[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1813, x1813, in_desc, x1804, out_desc, x1810, sbmv_desc, x479,
x665, x815, x947, 1.0E-5));
};
float* x1816 = (float*)myMalloc(1 * sizeof(float));;
x1816[0] = 0.0f;
float* x1818 = (float*)myMalloc(1 * sizeof(float));;
x1818[0] = 1.0f;
float* x1820 = (float*)myGpuMalloc(262144 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1818, x_desc, x1810, x1816, x_desc, x1820));
};
float* x1822 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1823 = (float*)myMalloc(1 * sizeof(float));;
x1823[0] = 0.0f;
float* x1825 = (float*)myMalloc(1 * sizeof(float));;
x1825[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1825, in_desc, x1820, filt_desc, x524,
conv_desc, algo, ws_data, ws_size,
x1823, out_desc, x1822));
};
float* x1828 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1829 = (float*)myMalloc(1 * sizeof(float));;
x1829[0] = 0.0f;
float* x1831 = (float*)myMalloc(1 * sizeof(float));;
x1831[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1024, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1831, x1831, in_desc, x1822, out_desc, x1828, sbmv_desc, x971,
x695, x950, x740, 1.0E-5));
};
float* x1834 = (float*)myMalloc(1 * sizeof(float));;
x1834[0] = 1.0f;
float* x1836 = (float*)myMalloc(1 * sizeof(float));;
x1836[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1834, bias_desc, x1784, x1836, out_desc, x1828));
};
float* x1839 = (float*)myMalloc(1 * sizeof(float));;
x1839[0] = 0.0f;
float* x1841 = (float*)myMalloc(1 * sizeof(float));;
x1841[0] = 1.0f;
float* x1843 = (float*)myGpuMalloc(1048576 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1841, x_desc, x1828, x1839, x_desc, x1843));
};
float* x1845 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1846 = (float*)myMalloc(1 * sizeof(float));;
x1846[0] = 0.0f;
float* x1848 = (float*)myMalloc(1 * sizeof(float));;
x1848[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 1024, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1848, in_desc, x1843, filt_desc, x323,
conv_desc, algo, ws_data, ws_size,
x1846, out_desc, x1845));
};
float* x1851 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1852 = (float*)myMalloc(1 * sizeof(float));;
x1852[0] = 0.0f;
float* x1854 = (float*)myMalloc(1 * sizeof(float));;
x1854[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1854, x1854, in_desc, x1845, out_desc, x1851, sbmv_desc, x488,
x812, x1019, x464, 1.0E-5));
};
float* x1857 = (float*)myMalloc(1 * sizeof(float));;
x1857[0] = 0.0f;
float* x1859 = (float*)myMalloc(1 * sizeof(float));;
x1859[0] = 1.0f;
float* x1861 = (float*)myGpuMalloc(262144 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1859, x_desc, x1851, x1857, x_desc, x1861));
};
float* x1863 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1864 = (float*)myMalloc(1 * sizeof(float));;
x1864[0] = 0.0f;
float* x1866 = (float*)myMalloc(1 * sizeof(float));;
x1866[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1866, in_desc, x1861, filt_desc, x1043,
conv_desc, algo, ws_data, ws_size,
x1864, out_desc, x1863));
};
float* x1869 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1870 = (float*)myMalloc(1 * sizeof(float));;
x1870[0] = 0.0f;
float* x1872 = (float*)myMalloc(1 * sizeof(float));;
x1872[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1872, x1872, in_desc, x1863, out_desc, x1869, sbmv_desc, x761,
x584, x1007, x569, 1.0E-5));
};
float* x1875 = (float*)myMalloc(1 * sizeof(float));;
x1875[0] = 0.0f;
float* x1877 = (float*)myMalloc(1 * sizeof(float));;
x1877[0] = 1.0f;
float* x1879 = (float*)myGpuMalloc(262144 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1877, x_desc, x1869, x1875, x_desc, x1879));
};
float* x1881 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1882 = (float*)myMalloc(1 * sizeof(float));;
x1882[0] = 0.0f;
float* x1884 = (float*)myMalloc(1 * sizeof(float));;
x1884[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1884, in_desc, x1879, filt_desc, x920,
conv_desc, algo, ws_data, ws_size,
x1882, out_desc, x1881));
};
float* x1887 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1888 = (float*)myMalloc(1 * sizeof(float));;
x1888[0] = 0.0f;
float* x1890 = (float*)myMalloc(1 * sizeof(float));;
x1890[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1024, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1890, x1890, in_desc, x1881, out_desc, x1887, sbmv_desc, x434,
x617, x884, x1073, 1.0E-5));
};
float* x1893 = (float*)myMalloc(1 * sizeof(float));;
x1893[0] = 1.0f;
float* x1895 = (float*)myMalloc(1 * sizeof(float));;
x1895[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1893, bias_desc, x1843, x1895, out_desc, x1887));
};
float* x1898 = (float*)myMalloc(1 * sizeof(float));;
x1898[0] = 0.0f;
float* x1900 = (float*)myMalloc(1 * sizeof(float));;
x1900[0] = 1.0f;
float* x1902 = (float*)myGpuMalloc(1048576 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1900, x_desc, x1887, x1898, x_desc, x1902));
};
float* x1904 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1905 = (float*)myMalloc(1 * sizeof(float));;
x1905[0] = 0.0f;
float* x1907 = (float*)myMalloc(1 * sizeof(float));;
x1907[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 1024, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1907, in_desc, x1902, filt_desc, x710,
conv_desc, algo, ws_data, ws_size,
x1905, out_desc, x1904));
};
float* x1910 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1911 = (float*)myMalloc(1 * sizeof(float));;
x1911[0] = 0.0f;
float* x1913 = (float*)myMalloc(1 * sizeof(float));;
x1913[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1913, x1913, in_desc, x1904, out_desc, x1910, sbmv_desc, x512,
x1016, x497, x785, 1.0E-5));
};
float* x1916 = (float*)myMalloc(1 * sizeof(float));;
x1916[0] = 0.0f;
float* x1918 = (float*)myMalloc(1 * sizeof(float));;
x1918[0] = 1.0f;
float* x1920 = (float*)myGpuMalloc(262144 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1918, x_desc, x1910, x1916, x_desc, x1920));
};
float* x1922 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1923 = (float*)myMalloc(1 * sizeof(float));;
x1923[0] = 0.0f;
float* x1925 = (float*)myMalloc(1 * sizeof(float));;
x1925[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1925, in_desc, x1920, filt_desc, x935,
conv_desc, algo, ws_data, ws_size,
x1923, out_desc, x1922));
};
float* x1928 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1929 = (float*)myMalloc(1 * sizeof(float));;
x1929[0] = 0.0f;
float* x1931 = (float*)myMalloc(1 * sizeof(float));;
x1931[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1931, x1931, in_desc, x1922, out_desc, x1928, sbmv_desc, x680,
x824, x467, x977, 1.0E-5));
};
float* x1934 = (float*)myMalloc(1 * sizeof(float));;
x1934[0] = 0.0f;
float* x1936 = (float*)myMalloc(1 * sizeof(float));;
x1936[0] = 1.0f;
float* x1938 = (float*)myGpuMalloc(262144 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1936, x_desc, x1928, x1934, x_desc, x1938));
};
float* x1940 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1941 = (float*)myMalloc(1 * sizeof(float));;
x1941[0] = 0.0f;
float* x1943 = (float*)myMalloc(1 * sizeof(float));;
x1943[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1943, in_desc, x1938, filt_desc, x548,
conv_desc, algo, ws_data, ws_size,
x1941, out_desc, x1940));
};
float* x1946 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1947 = (float*)myMalloc(1 * sizeof(float));;
x1947[0] = 0.0f;
float* x1949 = (float*)myMalloc(1 * sizeof(float));;
x1949[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1024, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1949, x1949, in_desc, x1940, out_desc, x1946, sbmv_desc, x1001,
x536, x623, x806, 1.0E-5));
};
float* x1952 = (float*)myMalloc(1 * sizeof(float));;
x1952[0] = 1.0f;
float* x1954 = (float*)myMalloc(1 * sizeof(float));;
x1954[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1952, bias_desc, x1902, x1954, out_desc, x1946));
};
float* x1957 = (float*)myMalloc(1 * sizeof(float));;
x1957[0] = 0.0f;
float* x1959 = (float*)myMalloc(1 * sizeof(float));;
x1959[0] = 1.0f;
float* x1961 = (float*)myGpuMalloc(1048576 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1959, x_desc, x1946, x1957, x_desc, x1961));
};
float* x1963 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1964 = (float*)myMalloc(1 * sizeof(float));;
x1964[0] = 0.0f;
float* x1966 = (float*)myMalloc(1 * sizeof(float));;
x1966[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 1024, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 4, 4));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1966, in_desc, x1961, filt_desc, x674,
conv_desc, algo, ws_data, ws_size,
x1964, out_desc, x1963));
};
float* x1969 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1970 = (float*)myMalloc(1 * sizeof(float));;
x1970[0] = 0.0f;
float* x1972 = (float*)myMalloc(1 * sizeof(float));;
x1972[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 4, 4));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 4, 4));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1972, x1972, in_desc, x1963, out_desc, x1969, sbmv_desc, x860,
x929, x458, x620, 1.0E-5));
};
float* x1975 = (float*)myMalloc(1 * sizeof(float));;
x1975[0] = 0.0f;
float* x1977 = (float*)myMalloc(1 * sizeof(float));;
x1977[0] = 1.0f;
float* x1979 = (float*)myGpuMalloc(524288 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 4, 4));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1977, x_desc, x1969, x1975, x_desc, x1979));
};
float* x1981 = (float*)myGpuMalloc(131072 * sizeof(float));
float* x1982 = (float*)myMalloc(1 * sizeof(float));;
x1982[0] = 0.0f;
float* x1984 = (float*)myMalloc(1 * sizeof(float));;
x1984[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 512, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1984, in_desc, x1979, filt_desc, x359,
conv_desc, algo, ws_data, ws_size,
x1982, out_desc, x1981));
};
float* x1987 = (float*)myGpuMalloc(131072 * sizeof(float));
float* x1988 = (float*)myMalloc(1 * sizeof(float));;
x1988[0] = 0.0f;
float* x1990 = (float*)myMalloc(1 * sizeof(float));;
x1990[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1990, x1990, in_desc, x1981, out_desc, x1987, sbmv_desc, x872,
x734, x596, x407, 1.0E-5));
};
float* x1993 = (float*)myMalloc(1 * sizeof(float));;
x1993[0] = 0.0f;
float* x1995 = (float*)myMalloc(1 * sizeof(float));;
x1995[0] = 1.0f;
float* x1997 = (float*)myGpuMalloc(131072 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1995, x_desc, x1987, x1993, x_desc, x1997));
};
float* x1999 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x2000 = (float*)myMalloc(1 * sizeof(float));;
x2000[0] = 0.0f;
float* x2002 = (float*)myMalloc(1 * sizeof(float));;
x2002[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
2048, 512, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2002, in_desc, x1997, filt_desc, x893,
conv_desc, algo, ws_data, ws_size,
x2000, out_desc, x1999));
};
float* x2005 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x2006 = (float*)myMalloc(1 * sizeof(float));;
x2006[0] = 0.0f;
float* x2008 = (float*)myMalloc(1 * sizeof(float));;
x2008[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 2048, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2008, x2008, in_desc, x1999, out_desc, x2005, sbmv_desc, x974,
x443, x602, x836, 1.0E-5));
};
float* x2011 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x2012 = (float*)myMalloc(1 * sizeof(float));;
x2012[0] = 0.0f;
float* x2014 = (float*)myMalloc(1 * sizeof(float));;
x2014[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, 4, 4));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
2048, 1024, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2014, in_desc, x1961, filt_desc, x899,
conv_desc, algo, ws_data, ws_size,
x2012, out_desc, x2011));
};
float* x2017 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x2018 = (float*)myMalloc(1 * sizeof(float));;
x2018[0] = 0.0f;
float* x2020 = (float*)myMalloc(1 * sizeof(float));;
x2020[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 2048, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2020, x2020, in_desc, x2011, out_desc, x2017, sbmv_desc, x776,
x578, x449, x632, 1.0E-5));
};
float* x2023 = (float*)myMalloc(1 * sizeof(float));;
x2023[0] = 1.0f;
float* x2025 = (float*)myMalloc(1 * sizeof(float));;
x2025[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x2023, bias_desc, x2017, x2025, out_desc, x2005));
};
float* x2028 = (float*)myMalloc(1 * sizeof(float));;
x2028[0] = 0.0f;
float* x2030 = (float*)myMalloc(1 * sizeof(float));;
x2030[0] = 1.0f;
float* x2032 = (float*)myGpuMalloc(524288 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2030, x_desc, x2005, x2028, x_desc, x2032));
};
float* x2034 = (float*)myGpuMalloc(131072 * sizeof(float));
float* x2035 = (float*)myMalloc(1 * sizeof(float));;
x2035[0] = 0.0f;
float* x2037 = (float*)myMalloc(1 * sizeof(float));;
x2037[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 2048, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2037, in_desc, x2032, filt_desc, x902,
conv_desc, algo, ws_data, ws_size,
x2035, out_desc, x2034));
};
float* x2040 = (float*)myGpuMalloc(131072 * sizeof(float));
float* x2041 = (float*)myMalloc(1 * sizeof(float));;
x2041[0] = 0.0f;
float* x2043 = (float*)myMalloc(1 * sizeof(float));;
x2043[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2043, x2043, in_desc, x2034, out_desc, x2040, sbmv_desc, x395,
x668, x719, x452, 1.0E-5));
};
float* x2046 = (float*)myMalloc(1 * sizeof(float));;
x2046[0] = 0.0f;
float* x2048 = (float*)myMalloc(1 * sizeof(float));;
x2048[0] = 1.0f;
float* x2050 = (float*)myGpuMalloc(131072 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2048, x_desc, x2040, x2046, x_desc, x2050));
};
float* x2052 = (float*)myGpuMalloc(131072 * sizeof(float));
float* x2053 = (float*)myMalloc(1 * sizeof(float));;
x2053[0] = 0.0f;
float* x2055 = (float*)myMalloc(1 * sizeof(float));;
x2055[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 512, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2055, in_desc, x2050, filt_desc, x722,
conv_desc, algo, ws_data, ws_size,
x2053, out_desc, x2052));
};
float* x2058 = (float*)myGpuMalloc(131072 * sizeof(float));
float* x2059 = (float*)myMalloc(1 * sizeof(float));;
x2059[0] = 0.0f;
float* x2061 = (float*)myMalloc(1 * sizeof(float));;
x2061[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2061, x2061, in_desc, x2052, out_desc, x2058, sbmv_desc, x737,
x455, x671, x842, 1.0E-5));
};
float* x2064 = (float*)myMalloc(1 * sizeof(float));;
x2064[0] = 0.0f;
float* x2066 = (float*)myMalloc(1 * sizeof(float));;
x2066[0] = 1.0f;
float* x2068 = (float*)myGpuMalloc(131072 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2066, x_desc, x2058, x2064, x_desc, x2068));
};
float* x2070 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x2071 = (float*)myMalloc(1 * sizeof(float));;
x2071[0] = 0.0f;
float* x2073 = (float*)myMalloc(1 * sizeof(float));;
x2073[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
2048, 512, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2073, in_desc, x2068, filt_desc, x398,
conv_desc, algo, ws_data, ws_size,
x2071, out_desc, x2070));
};
float* x2076 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x2077 = (float*)myMalloc(1 * sizeof(float));;
x2077[0] = 0.0f;
float* x2079 = (float*)myMalloc(1 * sizeof(float));;
x2079[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 2048, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2079, x2079, in_desc, x2070, out_desc, x2076, sbmv_desc, x539,
x689, x461, x992, 1.0E-5));
};
float* x2082 = (float*)myMalloc(1 * sizeof(float));;
x2082[0] = 1.0f;
float* x2084 = (float*)myMalloc(1 * sizeof(float));;
x2084[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x2082, bias_desc, x2032, x2084, out_desc, x2076));
};
float* x2087 = (float*)myMalloc(1 * sizeof(float));;
x2087[0] = 0.0f;
float* x2089 = (float*)myMalloc(1 * sizeof(float));;
x2089[0] = 1.0f;
float* x2091 = (float*)myGpuMalloc(524288 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2089, x_desc, x2076, x2087, x_desc, x2091));
};
float* x2093 = (float*)myGpuMalloc(131072 * sizeof(float));
float* x2094 = (float*)myMalloc(1 * sizeof(float));;
x2094[0] = 0.0f;
float* x2096 = (float*)myMalloc(1 * sizeof(float));;
x2096[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 2048, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2096, in_desc, x2091, filt_desc, x1052,
conv_desc, algo, ws_data, ws_size,
x2094, out_desc, x2093));
};
float* x2099 = (float*)myGpuMalloc(131072 * sizeof(float));
float* x2100 = (float*)myMalloc(1 * sizeof(float));;
x2100[0] = 0.0f;
float* x2102 = (float*)myMalloc(1 * sizeof(float));;
x2102[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2102, x2102, in_desc, x2093, out_desc, x2099, sbmv_desc, x302,
x491, x896, x1022, 1.0E-5));
};
float* x2105 = (float*)myMalloc(1 * sizeof(float));;
x2105[0] = 0.0f;
float* x2107 = (float*)myMalloc(1 * sizeof(float));;
x2107[0] = 1.0f;
float* x2109 = (float*)myGpuMalloc(131072 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2107, x_desc, x2099, x2105, x_desc, x2109));
};
float* x2111 = (float*)myGpuMalloc(131072 * sizeof(float));
float* x2112 = (float*)myMalloc(1 * sizeof(float));;
x2112[0] = 0.0f;
float* x2114 = (float*)myMalloc(1 * sizeof(float));;
x2114[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 512, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2114, in_desc, x2109, filt_desc, x341,
conv_desc, algo, ws_data, ws_size,
x2112, out_desc, x2111));
};
float* x2117 = (float*)myGpuMalloc(131072 * sizeof(float));
float* x2118 = (float*)myMalloc(1 * sizeof(float));;
x2118[0] = 0.0f;
float* x2120 = (float*)myMalloc(1 * sizeof(float));;
x2120[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2120, x2120, in_desc, x2111, out_desc, x2117, sbmv_desc, x839,
x764, x293, x863, 1.0E-5));
};
float* x2123 = (float*)myMalloc(1 * sizeof(float));;
x2123[0] = 0.0f;
float* x2125 = (float*)myMalloc(1 * sizeof(float));;
x2125[0] = 1.0f;
float* x2127 = (float*)myGpuMalloc(131072 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2125, x_desc, x2117, x2123, x_desc, x2127));
};
float* x2129 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x2130 = (float*)myMalloc(1 * sizeof(float));;
x2130[0] = 0.0f;
float* x2132 = (float*)myMalloc(1 * sizeof(float));;
x2132[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, 2, 2));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
2048, 512, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2132, in_desc, x2127, filt_desc, x356,
conv_desc, algo, ws_data, ws_size,
x2130, out_desc, x2129));
};
float* x2135 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x2136 = (float*)myMalloc(1 * sizeof(float));;
x2136[0] = 0.0f;
float* x2138 = (float*)myMalloc(1 * sizeof(float));;
x2138[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 2048, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardInference(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2138, x2138, in_desc, x2129, out_desc, x2135, sbmv_desc, x566,
x800, x1037, x626, 1.0E-5));
};
float* x2141 = (float*)myMalloc(1 * sizeof(float));;
x2141[0] = 1.0f;
float* x2143 = (float*)myMalloc(1 * sizeof(float));;
x2143[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x2141, bias_desc, x2091, x2143, out_desc, x2135));
};
float* x2146 = (float*)myMalloc(1 * sizeof(float));;
x2146[0] = 0.0f;
float* x2148 = (float*)myMalloc(1 * sizeof(float));;
x2148[0] = 1.0f;
float* x2150 = (float*)myGpuMalloc(524288 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2148, x_desc, x2135, x2146, x_desc, x2150));
};
float* x2152 = (float*)myMalloc(1 * sizeof(float));;
x2152[0] = 0.0f;
float* x2154 = (float*)myMalloc(1 * sizeof(float));;
x2154[0] = 1.0f;
float* x2156 = (float*)myGpuMalloc(131072 * sizeof(float));
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 2, 2));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, 1, 1));
cudnnPoolingDescriptor_t poolingDesc;
CUDNN_CALL(cudnnCreatePoolingDescriptor(&poolingDesc));
CUDNN_CALL(cudnnSetPooling2dDescriptor(
poolingDesc, CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING, CUDNN_NOT_PROPAGATE_NAN,
2, 2, 0,
0, 1, 1
));
CUDNN_CALL(cudnnPoolingForward(
cudnnHandle,
poolingDesc,
x2154, in_desc, x2150, x2152, out_desc, x2156));
};
// resize to WrappedArray(64, 2048)
// gemm: WrappedArray(64, 2048), Vector(10, 2048)
float* x2160 = (float*)myGpuMalloc(640 * sizeof(float));
float* x2161 = (float*)myMalloc(1 * sizeof(float));;
x2161[0] = 0.0f;
float* x2163 = (float*)myMalloc(1 * sizeof(float));;
x2163[0] = 1.0f;
CUBLAS_CALL(cublasSgemm(cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, 10,64,2048,x2163,x938,2048,x2156,2048,x2161,x2160,10));
float* x2166 = (float*)myMalloc(1 * sizeof(float));;
x2166[0] = 1.0f;
float* x2168 = (float*)myMalloc(1 * sizeof(float));;
x2168[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 10, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 10, 1, 1));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x2166, bias_desc, x401, x2168, out_desc, x2160));
};
// Tensor 'toCPU' invocation.
float* x2172 = (float*)myMalloc(640 * sizeof(float));;
CUDA_CALL(cudaMemcpy(x2172, x2160, 640 * sizeof(float), cudaMemcpyDeviceToHost));
printf("output (size 64 x 10)\n");
float x2175 = 0.0f;
for(int x2177=0; x2177 < 640; x2177++) {
float x2178 = x2175;
float x2179 = x2172[x2177];
float x2180 = fabs(x2179);
float x2181 = fabs(x2178);
bool x2182 = x2180 > x2181;
float x2183;
if (x2182) {
x2183 = x2179;
} else {
x2183 = x2178;
}
x2175 = x2183;
}
float x2187 = x2175;
printf("Max Abs: %.5f || ",x2187);
for(int x2189=0; x2189 < 10; x2189++) {
float x2190 = x2172[x2189];
printf("%.5f ",x2190);
}
printf("\n");
assert(false && "stop");
}
// Backend cleanup.
CUBLAS_CALL(cublasDestroy(cublasHandle));
CUDA_CALL(cudaFree(gpuMallocBase));
CUDNN_CALL(cudnnDestroy(cudnnHandle));
}
/*****************************************
End of C Generated Code
*******************************************/
|
e3a170f4fc391e3dab82101fb84376e1ea984290.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include "hip/hip_runtime.h"
#include "../../common/book.h"
#include "../../common/cpu_bitmap.h"
#define RAND_MAX 2147483647
#define rnd( x ) (x * rand() / RAND_MAX)
#define INF 2e10f
#define SPHERES 100
#define DIM 1024
// center of sphere: x, y, z
// sphere radius
// color: r, b, g
struct Sphere {
float r, b, g;
float radius;
float x, y, z;
__device__ float hit(float ox, float oy, float *n) {
float dx = ox - x;
float dy = oy - y;
if (dx * dx + dy * dy < radius * radius) {
float dz = sqrtf(radius * radius - dx * dx - dy * dy);
*n = dz / sqrtf(radius * radius);
return dz + z;
}
return -INF;
}
};
__constant__ Sphere s[SPHERES];
__global__ void kernel(unsigned char *ptr) {
//threadIdx / blockIdx -> set pixel location
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float ox = (x - DIM/2);
float oy = (y - DIM/2);
float r = 0, g = 0, b = 0;
float maxz = -INF;
for (int i = 0; i < SPHERES; i++) {
float n;
float t = s[i].hit(ox, oy, &n);
if (t > maxz) {
float fscale = n;
r = s[i].r * fscale;
g = s[i].g * fscale;
b = s[i].b * fscale;
}
}
ptr[offset * 4 + 0] = (int) (r * 255);
ptr[offset * 4 + 1] = (int) (g * 255);
ptr[offset * 4 + 2] = (int) (b * 255);
ptr[offset * 4 + 3] = 255;
}
//Sphere *s;
void EXAMPLE_CONSTMEM_RAYSPHERE_REPO6() {
//capture start time
CPUBitmap bitmap(DIM, DIM);
unsigned char *dev_bitmap;
//Allocate GPU memory for output bitmap
HANDLE_ERROR(hipMalloc((void**)&dev_bitmap, bitmap.image_size()));
//Allocate temporary sphere memory and initialize it
Sphere *temp_s = (Sphere *) malloc(sizeof(Sphere) * SPHERES);
for (int i = 0; i < SPHERES; i++) {
temp_s[i].r = rnd(1.0f);
temp_s[i].g = rnd(1.0f);
temp_s[i].b = rnd(1.0f);
temp_s[i].x = rnd(1000.0f) - 500;
temp_s[i].y = rnd(1000.0f) - 500;
temp_s[i].z = rnd(1000.0f) - 500;
temp_s[i].radius = rnd(100.0f) + 20;
}
//Copy sphere memory from Host to GPU
//remove memory
HANDLE_ERROR(hipMemcpyToSymbol(s, temp_s, sizeof(Sphere) * SPHERES));
free(temp_s);
dim3 blocks(DIM/16, DIM/16);
dim3 threads(16, 16);
hipLaunchKernelGGL(( kernel), dim3(blocks), dim3(threads), 0, 0, dev_bitmap);
HANDLE_ERROR(hipMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost));
bitmap.display_and_exit();
hipFree(dev_bitmap);
} | e3a170f4fc391e3dab82101fb84376e1ea984290.cu | #include <stdlib.h>
#include "cuda.h"
#include "../../common/book.h"
#include "../../common/cpu_bitmap.h"
#define RAND_MAX 2147483647
#define rnd( x ) (x * rand() / RAND_MAX)
#define INF 2e10f
#define SPHERES 100
#define DIM 1024
// center of sphere: x, y, z
// sphere radius
// color: r, b, g
struct Sphere {
float r, b, g;
float radius;
float x, y, z;
__device__ float hit(float ox, float oy, float *n) {
float dx = ox - x;
float dy = oy - y;
if (dx * dx + dy * dy < radius * radius) {
float dz = sqrtf(radius * radius - dx * dx - dy * dy);
*n = dz / sqrtf(radius * radius);
return dz + z;
}
return -INF;
}
};
__constant__ Sphere s[SPHERES];
__global__ void kernel(unsigned char *ptr) {
//threadIdx / blockIdx -> set pixel location
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float ox = (x - DIM/2);
float oy = (y - DIM/2);
float r = 0, g = 0, b = 0;
float maxz = -INF;
for (int i = 0; i < SPHERES; i++) {
float n;
float t = s[i].hit(ox, oy, &n);
if (t > maxz) {
float fscale = n;
r = s[i].r * fscale;
g = s[i].g * fscale;
b = s[i].b * fscale;
}
}
ptr[offset * 4 + 0] = (int) (r * 255);
ptr[offset * 4 + 1] = (int) (g * 255);
ptr[offset * 4 + 2] = (int) (b * 255);
ptr[offset * 4 + 3] = 255;
}
//Sphere *s;
void EXAMPLE_CONSTMEM_RAYSPHERE_REPO6() {
//capture start time
CPUBitmap bitmap(DIM, DIM);
unsigned char *dev_bitmap;
//Allocate GPU memory for output bitmap
HANDLE_ERROR(cudaMalloc((void**)&dev_bitmap, bitmap.image_size()));
//Allocate temporary sphere memory and initialize it
Sphere *temp_s = (Sphere *) malloc(sizeof(Sphere) * SPHERES);
for (int i = 0; i < SPHERES; i++) {
temp_s[i].r = rnd(1.0f);
temp_s[i].g = rnd(1.0f);
temp_s[i].b = rnd(1.0f);
temp_s[i].x = rnd(1000.0f) - 500;
temp_s[i].y = rnd(1000.0f) - 500;
temp_s[i].z = rnd(1000.0f) - 500;
temp_s[i].radius = rnd(100.0f) + 20;
}
//Copy sphere memory from Host to GPU
//remove memory
HANDLE_ERROR(cudaMemcpyToSymbol(s, temp_s, sizeof(Sphere) * SPHERES));
free(temp_s);
dim3 blocks(DIM/16, DIM/16);
dim3 threads(16, 16);
kernel<<<blocks, threads>>> (dev_bitmap);
HANDLE_ERROR(cudaMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost));
bitmap.display_and_exit();
cudaFree(dev_bitmap);
} |
df1d7d9814bd4d3ad563c911bed786bb3be24396.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/operators/where_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h"
namespace platform = paddle::platform;
namespace paddle {
namespace operators {
template <typename T>
__global__ void WhereCUDAKernel(const int N, const bool* cond, const T* x,
const T* y, T* out) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (; idx < N; idx += blockDim.x * gridDim.x) {
out[idx] = cond[idx] ? x[idx] : y[idx];
}
}
template <typename T>
__global__ void WhereGradCUDAKernel(const int N, const T* dout,
const bool* cond, T* dx, T* dy) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (; idx < N; idx += blockDim.x * gridDim.x) {
if (dx != nullptr) {
dx[idx] = cond[idx] ? dout[idx] : 0.;
}
if (dy != nullptr) {
dy[idx] = cond[idx] ? 0. : dout[idx];
}
}
}
template <typename T>
class WhereKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* condition = context.Input<framework::Tensor>("Condition");
auto* X = context.Input<framework::Tensor>("X");
auto* Y = context.Input<framework::Tensor>("Y");
auto* out = context.Output<framework::Tensor>("Out");
auto numel = condition->numel();
// TODO(GaaoWei8): Input of where can be broadcast
const bool* cond_data = condition->data<bool>();
const T* x_data = X->data<T>();
const T* y_data = Y->data<T>();
T* out_data = out->mutable_data<T>(context.GetPlace());
auto stream = context.cuda_device_context().stream();
auto& dev_ctx =
context.template device_context<platform::CUDADeviceContext>();
auto config = GetGpuLaunchConfig1D(dev_ctx, numel);
hipLaunchKernelGGL(( WhereCUDAKernel<
T>), dim3(config.block_per_grid.x), dim3(config.thread_per_block.x), 0, stream,
numel, cond_data, x_data, y_data, out_data);
}
};
template <typename T>
class WhereGradKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* condition = context.Input<framework::Tensor>("Condition");
const bool* cond_data = condition->data<bool>();
auto numel = condition->numel();
auto* dout_t =
context.Input<framework::Tensor>(framework::GradVarName("Out"));
auto* dx_t = context.Output<framework::Tensor>(framework::GradVarName("X"));
auto* dy_t = context.Output<framework::Tensor>(framework::GradVarName("Y"));
auto* dout = dout_t->data<T>();
T* dx =
(dx_t != nullptr) ? dx_t->mutable_data<T>(context.GetPlace()) : nullptr;
T* dy =
(dy_t != nullptr) ? dy_t->mutable_data<T>(context.GetPlace()) : nullptr;
auto stream = context.cuda_device_context().stream();
auto& dev_ctx =
context.template device_context<platform::CUDADeviceContext>();
auto config = GetGpuLaunchConfig1D(dev_ctx, condition->numel());
hipLaunchKernelGGL(( WhereGradCUDAKernel<
T>), dim3(config.block_per_grid.x), dim3(config.thread_per_block.x), 0, stream,
numel, dout, cond_data, dx, dy);
}
};
} // namespace operators
} // namespace paddle
REGISTER_OP_CUDA_KERNEL(
where, paddle::operators::WhereKernel<platform::CUDADeviceContext, float>,
paddle::operators::WhereKernel<platform::CUDADeviceContext, double>,
paddle::operators::WhereKernel<platform::CUDADeviceContext, int>,
paddle::operators::WhereKernel<platform::CUDADeviceContext, int64_t>);
REGISTER_OP_CUDA_KERNEL(
where_grad,
paddle::operators::WhereGradKernel<platform::CUDADeviceContext, float>,
paddle::operators::WhereGradKernel<platform::CUDADeviceContext, double>,
paddle::operators::WhereGradKernel<platform::CUDADeviceContext, int>,
paddle::operators::WhereGradKernel<platform::CUDADeviceContext, int64_t>);
| df1d7d9814bd4d3ad563c911bed786bb3be24396.cu | // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/operators/where_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h"
namespace platform = paddle::platform;
namespace paddle {
namespace operators {
template <typename T>
__global__ void WhereCUDAKernel(const int N, const bool* cond, const T* x,
const T* y, T* out) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (; idx < N; idx += blockDim.x * gridDim.x) {
out[idx] = cond[idx] ? x[idx] : y[idx];
}
}
template <typename T>
__global__ void WhereGradCUDAKernel(const int N, const T* dout,
const bool* cond, T* dx, T* dy) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (; idx < N; idx += blockDim.x * gridDim.x) {
if (dx != nullptr) {
dx[idx] = cond[idx] ? dout[idx] : 0.;
}
if (dy != nullptr) {
dy[idx] = cond[idx] ? 0. : dout[idx];
}
}
}
template <typename T>
class WhereKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* condition = context.Input<framework::Tensor>("Condition");
auto* X = context.Input<framework::Tensor>("X");
auto* Y = context.Input<framework::Tensor>("Y");
auto* out = context.Output<framework::Tensor>("Out");
auto numel = condition->numel();
// TODO(GaaoWei8): Input of where can be broadcast
const bool* cond_data = condition->data<bool>();
const T* x_data = X->data<T>();
const T* y_data = Y->data<T>();
T* out_data = out->mutable_data<T>(context.GetPlace());
auto stream = context.cuda_device_context().stream();
auto& dev_ctx =
context.template device_context<platform::CUDADeviceContext>();
auto config = GetGpuLaunchConfig1D(dev_ctx, numel);
WhereCUDAKernel<
T><<<config.block_per_grid.x, config.thread_per_block.x, 0, stream>>>(
numel, cond_data, x_data, y_data, out_data);
}
};
template <typename T>
class WhereGradKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* condition = context.Input<framework::Tensor>("Condition");
const bool* cond_data = condition->data<bool>();
auto numel = condition->numel();
auto* dout_t =
context.Input<framework::Tensor>(framework::GradVarName("Out"));
auto* dx_t = context.Output<framework::Tensor>(framework::GradVarName("X"));
auto* dy_t = context.Output<framework::Tensor>(framework::GradVarName("Y"));
auto* dout = dout_t->data<T>();
T* dx =
(dx_t != nullptr) ? dx_t->mutable_data<T>(context.GetPlace()) : nullptr;
T* dy =
(dy_t != nullptr) ? dy_t->mutable_data<T>(context.GetPlace()) : nullptr;
auto stream = context.cuda_device_context().stream();
auto& dev_ctx =
context.template device_context<platform::CUDADeviceContext>();
auto config = GetGpuLaunchConfig1D(dev_ctx, condition->numel());
WhereGradCUDAKernel<
T><<<config.block_per_grid.x, config.thread_per_block.x, 0, stream>>>(
numel, dout, cond_data, dx, dy);
}
};
} // namespace operators
} // namespace paddle
REGISTER_OP_CUDA_KERNEL(
where, paddle::operators::WhereKernel<platform::CUDADeviceContext, float>,
paddle::operators::WhereKernel<platform::CUDADeviceContext, double>,
paddle::operators::WhereKernel<platform::CUDADeviceContext, int>,
paddle::operators::WhereKernel<platform::CUDADeviceContext, int64_t>);
REGISTER_OP_CUDA_KERNEL(
where_grad,
paddle::operators::WhereGradKernel<platform::CUDADeviceContext, float>,
paddle::operators::WhereGradKernel<platform::CUDADeviceContext, double>,
paddle::operators::WhereGradKernel<platform::CUDADeviceContext, int>,
paddle::operators::WhereGradKernel<platform::CUDADeviceContext, int64_t>);
|
144b449d7ec79545320359934938dfbc2a333534.hip | // !!! This is a file automatically generated by hipify!!!
// Author: Nic Olsen
#include <hip/hip_runtime.h>
#include <iostream>
#include "scan.cuh"
#include <hip/hip_cooperative_groups.h>
namespace cg = cooperative_groups;
// Scans each block of g_idata separately and writes the result to g_odata.
// g_idata and g_odata are arrays available on device of length n
// Writes the sum of each block to lasts[blockIdx.x]
__global__ void hillis_steele(float* g_odata, float* lasts, float* g_idata, unsigned int n, bool write_lasts) {
extern volatile __shared__ float s[];
int tid = threadIdx.x;
unsigned int index = blockDim.x * blockIdx.x + tid;
int pout = 0;
int pin = 1;
if (index >= n) {
s[tid] = 0.f;
} else if (tid == 0) {
s[tid] = 0.f;
} else {
s[tid] = g_idata[index - 1];
}
__syncthreads();
for (unsigned int offset = 1; offset < blockDim.x; offset <<= 1) {
pout = 1 - pout;
pin = 1 - pout;
if (tid >= offset) {
s[pout * blockDim.x + tid] = s[pin * blockDim.x + tid] + s[pin * blockDim.x + tid - offset];
} else {
s[pout * blockDim.x + tid] = s[pin * blockDim.x + tid];
}
__syncthreads();
}
if (index < n ) {
g_odata[index] = s[pout * blockDim.x + tid];
printf("g_odata is %f at index %d\n", g_odata[index], index);
}
if (write_lasts && threadIdx.x == 0) {
unsigned int block_end = blockIdx.x * blockDim.x + blockDim.x - 1;
lasts[blockIdx.x] = s[pout * blockDim.x + blockDim.x - 1] + g_idata[block_end];
// printf("Lasts is %f for a %d and block ID is %d and output data is %f\n", lasts[blockIdx.x], gridDim.x, blockIdx.x, g_odata[index]);
}
}
// Increment each element corresponding to block b_i of arr by lasts[b_i]
__global__ void inc_blocks(float* arr, float* lasts, unsigned int n) {
unsigned int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < n) {
//printf("arr_before is %f\n", arr[index]);
arr[index] = arr[index] + lasts[blockIdx.x];
//printf("arr_adter is %f\n", arr[index]);
}
}
__host__ void scan( float* in, float* out, unsigned int n, unsigned int threads_per_block) {
// Sort each block indiviually
unsigned int nBlocks = (n + threads_per_block - 1) / threads_per_block;
float* lasts;
hipMallocManaged(&lasts, nBlocks * sizeof(float));
unsigned int shmem = 2 * threads_per_block * sizeof(float);
bool write_lasts = true;
// hillis_steele<<<nBlocks, threads_per_block, shmem>>>(out, lasts, in, n, true);
//hipDeviceSynchronize();
for (unsigned int a = n; a > 1; a = (a + threads_per_block - 1) / threads_per_block) {
// void *kernelArgs[] = {
// (void *)&out, (void *)&lasts, (void *)&in, (void *)&n, (void *)&write_lasts
//};
//hipLaunchCooperativeKernel((void*)hillis_steele, nBlocks, threads_per_block, kernelArgs, shmem, 0);
hipLaunchKernelGGL(( hillis_steele), dim3(((a + threads_per_block - 1) / threads_per_block)), dim3(threads_per_block), shmem, 0, out, lasts, in, a, true);
// Swap input and output arrays
hipDeviceSynchronize();
float* tmp = in;
in = lasts;
lasts = tmp;
}
// std::cout << in[a-1] << std::endl;
// Scan lasts
//hillis_steele<<<1, threads_per_block, shmem>>>(lasts, nullptr, lasts, nBlocks, false);
hipDeviceSynchronize();
// Add starting value to each block
hipLaunchKernelGGL(( inc_blocks), dim3(nBlocks), dim3(threads_per_block), 0, 0, out, in, n);
hipDeviceSynchronize();
hipFree(lasts);
}
| 144b449d7ec79545320359934938dfbc2a333534.cu | // Author: Nic Olsen
#include <cuda.h>
#include <iostream>
#include "scan.cuh"
#include <cooperative_groups.h>
namespace cg = cooperative_groups;
// Scans each block of g_idata separately and writes the result to g_odata.
// g_idata and g_odata are arrays available on device of length n
// Writes the sum of each block to lasts[blockIdx.x]
__global__ void hillis_steele(float* g_odata, float* lasts, float* g_idata, unsigned int n, bool write_lasts) {
extern volatile __shared__ float s[];
int tid = threadIdx.x;
unsigned int index = blockDim.x * blockIdx.x + tid;
int pout = 0;
int pin = 1;
if (index >= n) {
s[tid] = 0.f;
} else if (tid == 0) {
s[tid] = 0.f;
} else {
s[tid] = g_idata[index - 1];
}
__syncthreads();
for (unsigned int offset = 1; offset < blockDim.x; offset <<= 1) {
pout = 1 - pout;
pin = 1 - pout;
if (tid >= offset) {
s[pout * blockDim.x + tid] = s[pin * blockDim.x + tid] + s[pin * blockDim.x + tid - offset];
} else {
s[pout * blockDim.x + tid] = s[pin * blockDim.x + tid];
}
__syncthreads();
}
if (index < n ) {
g_odata[index] = s[pout * blockDim.x + tid];
printf("g_odata is %f at index %d\n", g_odata[index], index);
}
if (write_lasts && threadIdx.x == 0) {
unsigned int block_end = blockIdx.x * blockDim.x + blockDim.x - 1;
lasts[blockIdx.x] = s[pout * blockDim.x + blockDim.x - 1] + g_idata[block_end];
// printf("Lasts is %f for a %d and block ID is %d and output data is %f\n", lasts[blockIdx.x], gridDim.x, blockIdx.x, g_odata[index]);
}
}
// Increment each element corresponding to block b_i of arr by lasts[b_i]
__global__ void inc_blocks(float* arr, float* lasts, unsigned int n) {
unsigned int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < n) {
//printf("arr_before is %f\n", arr[index]);
arr[index] = arr[index] + lasts[blockIdx.x];
//printf("arr_adter is %f\n", arr[index]);
}
}
__host__ void scan( float* in, float* out, unsigned int n, unsigned int threads_per_block) {
// Sort each block indiviually
unsigned int nBlocks = (n + threads_per_block - 1) / threads_per_block;
float* lasts;
cudaMallocManaged(&lasts, nBlocks * sizeof(float));
unsigned int shmem = 2 * threads_per_block * sizeof(float);
bool write_lasts = true;
// hillis_steele<<<nBlocks, threads_per_block, shmem>>>(out, lasts, in, n, true);
//cudaDeviceSynchronize();
for (unsigned int a = n; a > 1; a = (a + threads_per_block - 1) / threads_per_block) {
// void *kernelArgs[] = {
// (void *)&out, (void *)&lasts, (void *)&in, (void *)&n, (void *)&write_lasts
//};
//cudaLaunchCooperativeKernel((void*)hillis_steele, nBlocks, threads_per_block, kernelArgs, shmem, 0);
hillis_steele<<<((a + threads_per_block - 1) / threads_per_block), threads_per_block, shmem>>>(out, lasts, in, a, true);
// Swap input and output arrays
cudaDeviceSynchronize();
float* tmp = in;
in = lasts;
lasts = tmp;
}
// std::cout << in[a-1] << std::endl;
// Scan lasts
//hillis_steele<<<1, threads_per_block, shmem>>>(lasts, nullptr, lasts, nBlocks, false);
cudaDeviceSynchronize();
// Add starting value to each block
inc_blocks<<<nBlocks, threads_per_block>>>(out, in, n);
cudaDeviceSynchronize();
cudaFree(lasts);
}
|
21e87a7f7b3e39fb9c57bdcd43d080d8b8f072d8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// http://www.orangeowlsolutions.com/archives/613
// https://github.com/OrangeOwlSolutions/General-CUDA-programming/wiki/hipMallocPitch-and-hipMemcpy2D
#include<stdio.h>
#include<cuda.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
#include<assert.h>
#define BLOCKSIZE_x 8
#define BLOCKSIZE_y 8
#define BLOCKSIZE_z 8
// x*y*z < 1024
// x*y*z < 1024
// x*y*z < 1024
#define Nrows 3
#define Ncols 5
#define NN 512
//#define NN 8
/*******************/
/* iDivUp FUNCTION */
/*******************/
int iDivUp(int hostPtr, int b){ return ((hostPtr % b) != 0) ? (hostPtr / b + 1) : (hostPtr / b); }
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/******************/
/* TEST KERNEL 2D */
/******************/
// https://stackoverflow.com/questions/16619274/cuda-griddim-and-blockdim/16619633
// https://stackoverflow.com/questions/16724844/divide-the-work-of-a-3d-kernel-among-blocks
__global__ void test_kernel_3D(float **devPtr, size_t pitch)
{
int tidx = blockIdx.x*blockDim.x + threadIdx.x;
int tidy = blockIdx.y*blockDim.y + threadIdx.y;
int tidz = blockIdx.z*blockDim.z + threadIdx.z;
// printf("%i %i %i\n", tidx, tidy, tidz);
if ((tidx < Ncols) && (tidy < Nrows) && (tidz < NN))
{
float *matrix_a = devPtr[tidz];
float *row_a = (float *)((char*)matrix_a + tidy * pitch);
row_a[tidx] = row_a[tidx] * 2.0;
}
}
__global__ void test_kernel_2D(float *devPtr, size_t pitch)
{
int tidx = blockIdx.x*blockDim.x + threadIdx.x;
int tidy = blockIdx.y*blockDim.y + threadIdx.y;
if ((tidx < Ncols) && (tidy < NN*Nrows))
{
float *row_a = (float *)((char*)devPtr+ tidy * pitch);
row_a[tidx] = row_a[tidx] * 2.0;
}
}
/********/
/* MAIN */
/********/
int main()
{
// float hostPtr[Nrows][Ncols];
float xx = 0.0;
float *hostPtr = (float*)malloc(NN*Nrows*Ncols*sizeof(float));
for(int k=0; k<NN; k++){
for (int i = 0; i < Nrows; i++){
for (int j = 0; j < Ncols; j++) {
hostPtr[k*Nrows*Ncols+i*Ncols+j] = xx++;;
//printf("row %i column %i value %f \n", i, j, hostPtr[i][j]);
}
}
}
// --- 2D pitched allocation and host->device memcopy
float *devPtr;
size_t pitch;
gpuErrchk(hipMallocPitch(&devPtr, &pitch, Ncols * sizeof(float), Nrows*NN));
gpuErrchk(hipMemcpy2D(devPtr, pitch, hostPtr, Ncols*sizeof(float), Ncols*sizeof(float), Nrows*NN, hipMemcpyHostToDevice));
float **ListPtr = 0;
ListPtr = (float**)malloc(NN*sizeof(float*));
for(int i=0; i<NN; i++){
ListPtr[i] = devPtr + pitch/sizeof(float)*Nrows*i;
}
float **devListPtr = 0;
gpuErrchk(hipMalloc(&devListPtr, NN*sizeof(float*)));
gpuErrchk(hipMemcpy(devListPtr, ListPtr, NN*sizeof(float*), hipMemcpyHostToDevice));
// 3D
dim3 gridSize(iDivUp(Ncols, BLOCKSIZE_x), iDivUp(Nrows, BLOCKSIZE_y), iDivUp(NN, BLOCKSIZE_z));
dim3 blockSize(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z);
test_kernel_3D << <gridSize, blockSize >> >(devListPtr, pitch);
// 2D
//dim3 gridSize(iDivUp(Ncols, BLOCKSIZE_x), iDivUp(NN*Nrows, BLOCKSIZE_y));
//dim3 blockSize(BLOCKSIZE_x, BLOCKSIZE_y);
//test_kernel_2D << <gridSize, blockSize >> >(devPtr, pitch);
hipPeekAtLastError();
hipDeviceSynchronize();
//hipMemcpy2D(hostPtr, Ncols * sizeof(float), devPtr, pitch, Ncols * sizeof(float), Nrows*NN, hipMemcpyDeviceToHost);
gpuErrchk(hipMemcpy2D(hostPtr, Ncols * sizeof(float), devPtr, pitch, Ncols * sizeof(float), Nrows*NN, hipMemcpyDeviceToHost));
for(int k=0; k<NN; k++){
printf("-----iteration: %i-----\n", k);
for (int i = 0; i < Nrows; i++){
for (int j = 0; j < Ncols; j++)
printf("%.1f ", hostPtr[k*Nrows*Ncols+i*Ncols+j]);
// printf("N %i row %i column %i value %f \n", k, i, j, hostPtr[k*Nrows*Ncols+i*Nrows+j]);
printf("\n");
}
printf("-----done!-----\n");
}
return 0;
}
| 21e87a7f7b3e39fb9c57bdcd43d080d8b8f072d8.cu | // http://www.orangeowlsolutions.com/archives/613
// https://github.com/OrangeOwlSolutions/General-CUDA-programming/wiki/cudaMallocPitch-and-cudaMemcpy2D
#include<stdio.h>
#include<cuda.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
#include<assert.h>
#define BLOCKSIZE_x 8
#define BLOCKSIZE_y 8
#define BLOCKSIZE_z 8
// x*y*z < 1024
// x*y*z < 1024
// x*y*z < 1024
#define Nrows 3
#define Ncols 5
#define NN 512
//#define NN 8
/*******************/
/* iDivUp FUNCTION */
/*******************/
int iDivUp(int hostPtr, int b){ return ((hostPtr % b) != 0) ? (hostPtr / b + 1) : (hostPtr / b); }
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/******************/
/* TEST KERNEL 2D */
/******************/
// https://stackoverflow.com/questions/16619274/cuda-griddim-and-blockdim/16619633
// https://stackoverflow.com/questions/16724844/divide-the-work-of-a-3d-kernel-among-blocks
__global__ void test_kernel_3D(float **devPtr, size_t pitch)
{
int tidx = blockIdx.x*blockDim.x + threadIdx.x;
int tidy = blockIdx.y*blockDim.y + threadIdx.y;
int tidz = blockIdx.z*blockDim.z + threadIdx.z;
// printf("%i %i %i\n", tidx, tidy, tidz);
if ((tidx < Ncols) && (tidy < Nrows) && (tidz < NN))
{
float *matrix_a = devPtr[tidz];
float *row_a = (float *)((char*)matrix_a + tidy * pitch);
row_a[tidx] = row_a[tidx] * 2.0;
}
}
__global__ void test_kernel_2D(float *devPtr, size_t pitch)
{
int tidx = blockIdx.x*blockDim.x + threadIdx.x;
int tidy = blockIdx.y*blockDim.y + threadIdx.y;
if ((tidx < Ncols) && (tidy < NN*Nrows))
{
float *row_a = (float *)((char*)devPtr+ tidy * pitch);
row_a[tidx] = row_a[tidx] * 2.0;
}
}
/********/
/* MAIN */
/********/
int main()
{
// float hostPtr[Nrows][Ncols];
float xx = 0.0;
float *hostPtr = (float*)malloc(NN*Nrows*Ncols*sizeof(float));
for(int k=0; k<NN; k++){
for (int i = 0; i < Nrows; i++){
for (int j = 0; j < Ncols; j++) {
hostPtr[k*Nrows*Ncols+i*Ncols+j] = xx++;;
//printf("row %i column %i value %f \n", i, j, hostPtr[i][j]);
}
}
}
// --- 2D pitched allocation and host->device memcopy
float *devPtr;
size_t pitch;
gpuErrchk(cudaMallocPitch(&devPtr, &pitch, Ncols * sizeof(float), Nrows*NN));
gpuErrchk(cudaMemcpy2D(devPtr, pitch, hostPtr, Ncols*sizeof(float), Ncols*sizeof(float), Nrows*NN, cudaMemcpyHostToDevice));
float **ListPtr = 0;
ListPtr = (float**)malloc(NN*sizeof(float*));
for(int i=0; i<NN; i++){
ListPtr[i] = devPtr + pitch/sizeof(float)*Nrows*i;
}
float **devListPtr = 0;
gpuErrchk(cudaMalloc(&devListPtr, NN*sizeof(float*)));
gpuErrchk(cudaMemcpy(devListPtr, ListPtr, NN*sizeof(float*), cudaMemcpyHostToDevice));
// 3D
dim3 gridSize(iDivUp(Ncols, BLOCKSIZE_x), iDivUp(Nrows, BLOCKSIZE_y), iDivUp(NN, BLOCKSIZE_z));
dim3 blockSize(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z);
test_kernel_3D << <gridSize, blockSize >> >(devListPtr, pitch);
// 2D
//dim3 gridSize(iDivUp(Ncols, BLOCKSIZE_x), iDivUp(NN*Nrows, BLOCKSIZE_y));
//dim3 blockSize(BLOCKSIZE_x, BLOCKSIZE_y);
//test_kernel_2D << <gridSize, blockSize >> >(devPtr, pitch);
cudaPeekAtLastError();
cudaDeviceSynchronize();
//cudaMemcpy2D(hostPtr, Ncols * sizeof(float), devPtr, pitch, Ncols * sizeof(float), Nrows*NN, cudaMemcpyDeviceToHost);
gpuErrchk(cudaMemcpy2D(hostPtr, Ncols * sizeof(float), devPtr, pitch, Ncols * sizeof(float), Nrows*NN, cudaMemcpyDeviceToHost));
for(int k=0; k<NN; k++){
printf("-----iteration: %i-----\n", k);
for (int i = 0; i < Nrows; i++){
for (int j = 0; j < Ncols; j++)
printf("%.1f ", hostPtr[k*Nrows*Ncols+i*Ncols+j]);
// printf("N %i row %i column %i value %f \n", k, i, j, hostPtr[k*Nrows*Ncols+i*Nrows+j]);
printf("\n");
}
printf("-----done!-----\n");
}
return 0;
}
|
a8aab09ccf67a03b045c9a349f6e99aa64058bdd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_xvel_plus_4_bot;
int xdim0_update_halo_kernel2_xvel_plus_4_bot_h = -1;
__constant__ int ydim0_update_halo_kernel2_xvel_plus_4_bot;
int ydim0_update_halo_kernel2_xvel_plus_4_bot_h = -1;
__constant__ int xdim1_update_halo_kernel2_xvel_plus_4_bot;
int xdim1_update_halo_kernel2_xvel_plus_4_bot_h = -1;
__constant__ int ydim1_update_halo_kernel2_xvel_plus_4_bot;
int ydim1_update_halo_kernel2_xvel_plus_4_bot_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_xvel_plus_4_bot * (y) + \
xdim0_update_halo_kernel2_xvel_plus_4_bot * \
ydim0_update_halo_kernel2_xvel_plus_4_bot * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_xvel_plus_4_bot * (y) + \
xdim1_update_halo_kernel2_xvel_plus_4_bot * \
ydim1_update_halo_kernel2_xvel_plus_4_bot * (z))
// user function
__device__
inline void
update_halo_kernel2_xvel_plus_4_bot(double *xvel0, double *xvel1,
const int *fields) {
if (fields[FIELD_XVEL0] == 1)
xvel0[OPS_ACC0(0, 0, 0)] = xvel0[OPS_ACC0(0, 4, 0)];
if (fields[FIELD_XVEL1] == 1)
xvel1[OPS_ACC1(0, 0, 0)] = xvel1[OPS_ACC1(0, 4, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_xvel_plus_4_bot(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_4_bot +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_4_bot *
ydim0_update_halo_kernel2_xvel_plus_4_bot;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_4_bot +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_4_bot *
ydim1_update_halo_kernel2_xvel_plus_4_bot;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_xvel_plus_4_bot(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_xvel_plus_4_bot(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1,
ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 69))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(69, "update_halo_kernel2_xvel_plus_4_bot");
OPS_kernels[69].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_xvel_plus_4_bot_h ||
ydim0 != ydim0_update_halo_kernel2_xvel_plus_4_bot_h ||
xdim1 != xdim1_update_halo_kernel2_xvel_plus_4_bot_h ||
ydim1 != ydim1_update_halo_kernel2_xvel_plus_4_bot_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel2_xvel_plus_4_bot, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_xvel_plus_4_bot_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel2_xvel_plus_4_bot, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_xvel_plus_4_bot_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel2_xvel_plus_4_bot, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_xvel_plus_4_bot_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel2_xvel_plus_4_bot, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_xvel_plus_4_bot_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[69].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel2_xvel_plus_4_bot), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[69].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[69].mpi_time += t2 - t1;
OPS_kernels[69].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[69].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
| a8aab09ccf67a03b045c9a349f6e99aa64058bdd.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_xvel_plus_4_bot;
int xdim0_update_halo_kernel2_xvel_plus_4_bot_h = -1;
__constant__ int ydim0_update_halo_kernel2_xvel_plus_4_bot;
int ydim0_update_halo_kernel2_xvel_plus_4_bot_h = -1;
__constant__ int xdim1_update_halo_kernel2_xvel_plus_4_bot;
int xdim1_update_halo_kernel2_xvel_plus_4_bot_h = -1;
__constant__ int ydim1_update_halo_kernel2_xvel_plus_4_bot;
int ydim1_update_halo_kernel2_xvel_plus_4_bot_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_xvel_plus_4_bot * (y) + \
xdim0_update_halo_kernel2_xvel_plus_4_bot * \
ydim0_update_halo_kernel2_xvel_plus_4_bot * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_xvel_plus_4_bot * (y) + \
xdim1_update_halo_kernel2_xvel_plus_4_bot * \
ydim1_update_halo_kernel2_xvel_plus_4_bot * (z))
// user function
__device__
inline void
update_halo_kernel2_xvel_plus_4_bot(double *xvel0, double *xvel1,
const int *fields) {
if (fields[FIELD_XVEL0] == 1)
xvel0[OPS_ACC0(0, 0, 0)] = xvel0[OPS_ACC0(0, 4, 0)];
if (fields[FIELD_XVEL1] == 1)
xvel1[OPS_ACC1(0, 0, 0)] = xvel1[OPS_ACC1(0, 4, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_xvel_plus_4_bot(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_4_bot +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_4_bot *
ydim0_update_halo_kernel2_xvel_plus_4_bot;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_4_bot +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_4_bot *
ydim1_update_halo_kernel2_xvel_plus_4_bot;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_xvel_plus_4_bot(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_xvel_plus_4_bot(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1,
ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 69))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(69, "update_halo_kernel2_xvel_plus_4_bot");
OPS_kernels[69].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_xvel_plus_4_bot_h ||
ydim0 != ydim0_update_halo_kernel2_xvel_plus_4_bot_h ||
xdim1 != xdim1_update_halo_kernel2_xvel_plus_4_bot_h ||
ydim1 != ydim1_update_halo_kernel2_xvel_plus_4_bot_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel2_xvel_plus_4_bot, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_xvel_plus_4_bot_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel2_xvel_plus_4_bot, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_xvel_plus_4_bot_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel2_xvel_plus_4_bot, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_xvel_plus_4_bot_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel2_xvel_plus_4_bot, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_xvel_plus_4_bot_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[69].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel2_xvel_plus_4_bot<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[69].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[69].mpi_time += t2 - t1;
OPS_kernels[69].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[69].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
629458965b2a218277799fa1a4909dc09683bc4d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <float.h>
#include <math.h>
struct timeval startwtime, endwtime;
double seq_time;
// CPU variable
int N = 5000; // number of elements
int D = 2; // dimensions
int nT = 500; // number of threads per block
float sigma = 250.0;
float MS_error = 0.0001; // mean shift error
float error = 504820*0.001; // mean value of dataset * 10^(-3)
int iter_limit = 30;
size_t cols;
size_t rows;
//GPU variables
int *gp_N;
int *gp_D;
float *gp_sigma;
size_t gp_pitch;
//CPU matrices
float *x; // initilal matrix
float *y; // final matrix
float **val_y; // validation final matrix
float *m; // mean shift vectrors
// GPU matrices
float *gp_x;
float *gp_y;
float *gp_y_new;
float *gp_m; // mean shift vectrors
float *gp_g; // gaussian
float *gp_numer; // numerator
float *gp_denom; // denominator
__global__ void cuda_mean_shift(void);
void init(void);
void mean_shift(void);
void test(void);
void free_arrays(void);
int main(int argc, char **argv) {
MS_error = MS_error*sigma;
init();
gettimeofday (&startwtime, NULL);
mean_shift();
gettimeofday (&endwtime, NULL);
seq_time = (long double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6 + endwtime.tv_sec - startwtime.tv_sec);
printf("KNN wall clock time = %f\n", seq_time);
// Copy matrices to memory
hipMemcpy(y,gp_y, N*D*sizeof(float), hipMemcpyDeviceToHost);
test();
free_arrays();
}
/** procedure init() : initialize array "a" with data **/
void init() {
int i,j;
int ret_code=0;
FILE *f;
// Allocate system memmory
x = (float *) malloc(N*D * sizeof(float));
y = (float *) malloc(N*D * sizeof(float));
m = (float *) malloc(N*D * sizeof(float));
val_y = (float **) malloc(N * sizeof(float*));
for (i = 0; i < N; i++) {
val_y[i] = (float *) malloc(D * sizeof(float));
}
// Allocate GPU variables
hipMalloc( (void**)&gp_N , sizeof(int));
hipMalloc( (void**)&gp_D , sizeof(int));
hipMalloc( (void**)&gp_sigma , sizeof(float));
// Allocate GPU matrices
hipMallocPitch(&gp_x, &gp_pitch, N * sizeof(float), D);
hipMallocPitch(&gp_y, &gp_pitch, N * sizeof(float), D);
hipMallocPitch(&gp_y_new, &gp_pitch, N * sizeof(float), D);
hipMallocPitch(&gp_m, &gp_pitch, N * sizeof(float), D);
hipMallocPitch(&gp_g, &gp_pitch, N * sizeof(float), N);
hipMallocPitch(&gp_numer, &gp_pitch, N * sizeof(float), D);
hipMallocPitch(&gp_denom, &gp_pitch, N * sizeof(float), D);
f = fopen("../../data/S_set/S_set_5000x2.txt","r");
for (i = 0; i < N; ++i) {
for (j = 0; j < D; ++j) {
ret_code = fscanf(f, "%f\t", &x[i*D+j]);
}
ret_code = fscanf(f,"\n");
}
fclose(f);
for (i = 0; i < N; ++i) {
for (j = 0; j < D; ++j) {
y[i*D+j] = x[i*D+j];
}
}
for (i = 0; i < N; ++i) {
for (j = 0; j < D; ++j) {
m[i*D+j] =FLT_MAX;
}
}
// Copy variables to GPU
hipMemcpy(gp_N,&N,sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(gp_D,&D,sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(gp_sigma,&sigma,sizeof(int),hipMemcpyHostToDevice);
// Copy matrices to GPU
hipMemcpy(gp_x,x,N*D*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(gp_y,y,N*D*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(gp_m,m,N*D*sizeof(float), hipMemcpyHostToDevice);
}
__global__ void cuda_mean_shift(int *N,int *D,float *sigma,float *x, float *y, float *y_new,float *m, float *g,float *numer,float *denom,size_t pitch){
int tids = threadIdx.x;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int j,z;
float r;
float dist=0;
__shared__ float s_y[500*2];
__shared__ float s_m[500*2];
__shared__ float s_y_new[500*2];
__shared__ float s_numer[500*2];
__shared__ float s_denom[500*2];
for(z=0;z<*D;z++){
s_y[tids*(*D)+z] = y[tid*(*D)+z];
s_m[tids*(*D)+z] = m[tid*(*D)+z];
s_numer[tids*(*D)+z] = 0;
s_denom[tids*(*D)+z] = 0;
}
__syncthreads();
if(tid<*N){
for(j=0;j<*N;j++){
float *row_g = (float *)((char*)g + j * pitch);
for(z=0;z<*D;z++){
dist += powf((s_y[tids*(*D)+z]-x[j*(*D)+z]),2.0);
}
dist = sqrtf(dist);
if (dist>powf(*sigma,2.0)) row_g[tid]=0;
else row_g[tid] = expf(-dist/(2*powf(*sigma,2.0)));
if (tid==j) row_g[tid] += 1;
dist=0;
for(z=0;z<*D;z++){
s_numer[tids*(*D)+z] += row_g[tid]*x[j*(*D)+z];
s_denom[tids*(*D)+z] +=row_g[tid];
}
__syncthreads();
}
for(z=0;z<*D;z++){
s_y_new[tids*(*D)+z] = s_numer[tids*(*D)+z]/s_denom[tids*(*D)+z];
s_m[tids*(*D)+z] = s_y_new[tids*(*D)+z] - s_y[tids*(*D)+z];
}
for(z=0;z<*D;z++){
s_y[tids*(*D)+z] = s_y_new[tids*(*D)+z];
s_numer[tids*(*D)+z] = 0;
s_denom[tids*(*D)+z] = 0;
}
__syncthreads();
}
for(z=0;z<*D;z++){
y[tid*(*D)+z] = s_y[tids*(*D)+z];
m[tid*(*D)+z] = s_m[tids*(*D)+z];
}
__syncthreads();
}
void mean_shift() {
int iter = 0;
int i,z;
float er = FLT_MAX;
float last_er = FLT_MAX;
float temp = 0;
while(er > MS_error && iter<iter_limit && last_er >= er) {
last_er = er;
iter++;
er = 0;
hipLaunchKernelGGL(( cuda_mean_shift), dim3(N/nT),dim3(nT), 0, 0, gp_N,gp_D,gp_sigma,gp_x,gp_y,gp_y_new,gp_m,gp_g,gp_numer,gp_denom,gp_pitch);
hipMemcpy(m,gp_m,N*D*sizeof(float), hipMemcpyDeviceToHost);
for(i=0;i<N;i++){
for(z=0;z<D;z++){
temp += pow(m[i*D+z],2);
}
er += temp;
temp = 0;
}
er = sqrt(er);
printf("Iteration = %d, Error = %lf\n",iter,er);
}
}
void test() {
int i,j;
int pass = 1;
int ret_code = 0;
int count = 0;
FILE *f;
f = fopen("../../data/S_set/validation_S_set_5000x2.txt","r");
for (i = 0; i < N; ++i) {
for (j = 0; j < D; ++j) {
ret_code = fscanf(f, "%f\t", &val_y[i][j]);
}
ret_code = fscanf(f,"\n");
}
fclose(f);
f = fopen("yout.txt","w+");
for (i = 0; i < N; i++) {
for (j = 0; j < D; j++) {
fprintf(f,"%f\t",y[i*D+j]);
}
fprintf(f,"\n");
}
fclose(f);
for (i = 0; i < N; ++i) {
for (j = 0; j < D; ++j) {
if(fabs(val_y[i][j] - y[i*D+j]) > error){
count++;
pass=0;
}
}
}
printf(" TEST %s\n",(pass) ? "PASSed" : "FAILed");
printf(" Errors = %d\n",count);
}
void free_arrays(){
free(x);
free(y);
free(val_y);
free(m);
hipFree(gp_N);
hipFree(gp_D);
hipFree(gp_sigma);
hipFree(gp_x);
hipFree(gp_y);
hipFree(gp_y_new);
hipFree(gp_m);
hipFree(gp_g);
hipFree(gp_numer);
hipFree(gp_denom);
}
| 629458965b2a218277799fa1a4909dc09683bc4d.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <float.h>
#include <math.h>
struct timeval startwtime, endwtime;
double seq_time;
// CPU variable
int N = 5000; // number of elements
int D = 2; // dimensions
int nT = 500; // number of threads per block
float sigma = 250.0;
float MS_error = 0.0001; // mean shift error
float error = 504820*0.001; // mean value of dataset * 10^(-3)
int iter_limit = 30;
size_t cols;
size_t rows;
//GPU variables
int *gp_N;
int *gp_D;
float *gp_sigma;
size_t gp_pitch;
//CPU matrices
float *x; // initilal matrix
float *y; // final matrix
float **val_y; // validation final matrix
float *m; // mean shift vectrors
// GPU matrices
float *gp_x;
float *gp_y;
float *gp_y_new;
float *gp_m; // mean shift vectrors
float *gp_g; // gaussian
float *gp_numer; // numerator
float *gp_denom; // denominator
__global__ void cuda_mean_shift(void);
void init(void);
void mean_shift(void);
void test(void);
void free_arrays(void);
int main(int argc, char **argv) {
MS_error = MS_error*sigma;
init();
gettimeofday (&startwtime, NULL);
mean_shift();
gettimeofday (&endwtime, NULL);
seq_time = (long double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6 + endwtime.tv_sec - startwtime.tv_sec);
printf("KNN wall clock time = %f\n", seq_time);
// Copy matrices to memory
cudaMemcpy(y,gp_y, N*D*sizeof(float), cudaMemcpyDeviceToHost);
test();
free_arrays();
}
/** procedure init() : initialize array "a" with data **/
void init() {
int i,j;
int ret_code=0;
FILE *f;
// Allocate system memmory
x = (float *) malloc(N*D * sizeof(float));
y = (float *) malloc(N*D * sizeof(float));
m = (float *) malloc(N*D * sizeof(float));
val_y = (float **) malloc(N * sizeof(float*));
for (i = 0; i < N; i++) {
val_y[i] = (float *) malloc(D * sizeof(float));
}
// Allocate GPU variables
cudaMalloc( (void**)&gp_N , sizeof(int));
cudaMalloc( (void**)&gp_D , sizeof(int));
cudaMalloc( (void**)&gp_sigma , sizeof(float));
// Allocate GPU matrices
cudaMallocPitch(&gp_x, &gp_pitch, N * sizeof(float), D);
cudaMallocPitch(&gp_y, &gp_pitch, N * sizeof(float), D);
cudaMallocPitch(&gp_y_new, &gp_pitch, N * sizeof(float), D);
cudaMallocPitch(&gp_m, &gp_pitch, N * sizeof(float), D);
cudaMallocPitch(&gp_g, &gp_pitch, N * sizeof(float), N);
cudaMallocPitch(&gp_numer, &gp_pitch, N * sizeof(float), D);
cudaMallocPitch(&gp_denom, &gp_pitch, N * sizeof(float), D);
f = fopen("../../data/S_set/S_set_5000x2.txt","r");
for (i = 0; i < N; ++i) {
for (j = 0; j < D; ++j) {
ret_code = fscanf(f, "%f\t", &x[i*D+j]);
}
ret_code = fscanf(f,"\n");
}
fclose(f);
for (i = 0; i < N; ++i) {
for (j = 0; j < D; ++j) {
y[i*D+j] = x[i*D+j];
}
}
for (i = 0; i < N; ++i) {
for (j = 0; j < D; ++j) {
m[i*D+j] =FLT_MAX;
}
}
// Copy variables to GPU
cudaMemcpy(gp_N,&N,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(gp_D,&D,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(gp_sigma,&sigma,sizeof(int),cudaMemcpyHostToDevice);
// Copy matrices to GPU
cudaMemcpy(gp_x,x,N*D*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(gp_y,y,N*D*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(gp_m,m,N*D*sizeof(float), cudaMemcpyHostToDevice);
}
__global__ void cuda_mean_shift(int *N,int *D,float *sigma,float *x, float *y, float *y_new,float *m, float *g,float *numer,float *denom,size_t pitch){
int tids = threadIdx.x;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int j,z;
float r;
float dist=0;
__shared__ float s_y[500*2];
__shared__ float s_m[500*2];
__shared__ float s_y_new[500*2];
__shared__ float s_numer[500*2];
__shared__ float s_denom[500*2];
for(z=0;z<*D;z++){
s_y[tids*(*D)+z] = y[tid*(*D)+z];
s_m[tids*(*D)+z] = m[tid*(*D)+z];
s_numer[tids*(*D)+z] = 0;
s_denom[tids*(*D)+z] = 0;
}
__syncthreads();
if(tid<*N){
for(j=0;j<*N;j++){
float *row_g = (float *)((char*)g + j * pitch);
for(z=0;z<*D;z++){
dist += powf((s_y[tids*(*D)+z]-x[j*(*D)+z]),2.0);
}
dist = sqrtf(dist);
if (dist>powf(*sigma,2.0)) row_g[tid]=0;
else row_g[tid] = expf(-dist/(2*powf(*sigma,2.0)));
if (tid==j) row_g[tid] += 1;
dist=0;
for(z=0;z<*D;z++){
s_numer[tids*(*D)+z] += row_g[tid]*x[j*(*D)+z];
s_denom[tids*(*D)+z] +=row_g[tid];
}
__syncthreads();
}
for(z=0;z<*D;z++){
s_y_new[tids*(*D)+z] = s_numer[tids*(*D)+z]/s_denom[tids*(*D)+z];
s_m[tids*(*D)+z] = s_y_new[tids*(*D)+z] - s_y[tids*(*D)+z];
}
for(z=0;z<*D;z++){
s_y[tids*(*D)+z] = s_y_new[tids*(*D)+z];
s_numer[tids*(*D)+z] = 0;
s_denom[tids*(*D)+z] = 0;
}
__syncthreads();
}
for(z=0;z<*D;z++){
y[tid*(*D)+z] = s_y[tids*(*D)+z];
m[tid*(*D)+z] = s_m[tids*(*D)+z];
}
__syncthreads();
}
void mean_shift() {
int iter = 0;
int i,z;
float er = FLT_MAX;
float last_er = FLT_MAX;
float temp = 0;
while(er > MS_error && iter<iter_limit && last_er >= er) {
last_er = er;
iter++;
er = 0;
cuda_mean_shift<<<N/nT,nT>>>(gp_N,gp_D,gp_sigma,gp_x,gp_y,gp_y_new,gp_m,gp_g,gp_numer,gp_denom,gp_pitch);
cudaMemcpy(m,gp_m,N*D*sizeof(float), cudaMemcpyDeviceToHost);
for(i=0;i<N;i++){
for(z=0;z<D;z++){
temp += pow(m[i*D+z],2);
}
er += temp;
temp = 0;
}
er = sqrt(er);
printf("Iteration = %d, Error = %lf\n",iter,er);
}
}
void test() {
int i,j;
int pass = 1;
int ret_code = 0;
int count = 0;
FILE *f;
f = fopen("../../data/S_set/validation_S_set_5000x2.txt","r");
for (i = 0; i < N; ++i) {
for (j = 0; j < D; ++j) {
ret_code = fscanf(f, "%f\t", &val_y[i][j]);
}
ret_code = fscanf(f,"\n");
}
fclose(f);
f = fopen("yout.txt","w+");
for (i = 0; i < N; i++) {
for (j = 0; j < D; j++) {
fprintf(f,"%f\t",y[i*D+j]);
}
fprintf(f,"\n");
}
fclose(f);
for (i = 0; i < N; ++i) {
for (j = 0; j < D; ++j) {
if(fabs(val_y[i][j] - y[i*D+j]) > error){
count++;
pass=0;
}
}
}
printf(" TEST %s\n",(pass) ? "PASSed" : "FAILed");
printf(" Errors = %d\n",count);
}
void free_arrays(){
free(x);
free(y);
free(val_y);
free(m);
cudaFree(gp_N);
cudaFree(gp_D);
cudaFree(gp_sigma);
cudaFree(gp_x);
cudaFree(gp_y);
cudaFree(gp_y_new);
cudaFree(gp_m);
cudaFree(gp_g);
cudaFree(gp_numer);
cudaFree(gp_denom);
}
|
25bb43985ad91f9fb14729bc7d0d28dee2ab5d93.hip | // !!! This is a file automatically generated by hipify!!!
/*
* svmMain.cu
*
* Created on: May 21, 2012
* Author: Zeyi Wen
*/
#include <iostream>
#include <cassert>
#include <stdio.h>
#include <helper_cuda.h>
#include <hip/hip_runtime.h>
#include "cvFunction.h"
#include "SVMCmdLineParser.h"
#include "../SharedUtility/initCuda.h"
#include "svmModel.h"
#include "trainClassifier.h"
#include "classifierEvaluater.h"
using std::cout;
using std::endl;
int main(int argc, char **argv)
{
char fileName[1024];
char savedFileName[1024];
SVMCmdLineParser parser;
parser.ParseLine(argc, argv, fileName, savedFileName);
hipCtx_t context;
if(!InitCUDA(context, 'G'))
return 0;
printf("CUDA initialized.\n");
if(parser.task_type == 0){
//perform svm training
cout << "performing training" << endl;
SvmModel model;
trainSVM(parser.param, fileName, parser.numFeature, model, parser.compute_training_error);
}else if(parser.task_type == 1){
//perform cross validation*/
cout << "performing cross-validation" << endl;
crossValidation(parser.param, fileName);
}else if(parser.task_type == 2){
//perform svm evaluation
cout << "performing evaluation" << endl;
SvmModel model;
cout << "start training..." << endl;
string name=fileName;
name="./result/"+name;
ofstream ofs(name.c_str(),ios::app);
if(!ofs.is_open()){
cout<<"open ./result/ error "<<name<<endl;
return 0;
}
ofs<<"OVA"<<fileName<<"\n";
ofs<<"g "<<parser.param.gamma<<"C"<<parser.param.C<<"\n";
trainSVM(parser.param, fileName, parser.numFeature, model, parser.compute_training_error);
cout << "start evaluation..." << endl;
evaluateSVMClassifier(model, parser.testSetName, parser.numFeature);
ofs<<"\n";
ofs.close();
}else if(parser.task_type == 4){
//perform selecting best C
cout << "perform C selection" << endl;
vector<real> vC;
for(int i = 0; i < 3; i++){
SvmModel model;
model.vC = vC;
cout << "start training..." << endl;
trainSVM(parser.param, fileName, parser.numFeature, model, parser.compute_training_error);
cout << "start evaluation..." << endl;
//evaluateSVMClassifier(model, strcat(fileName, ".t"), parser.numFeature);
evaluateSVMClassifier(model, fileName, parser.numFeature);
vC = ClassifierEvaluater::updateC(model.vC);
}
}
else if(parser.task_type == 3){
cout << "performing grid search" << endl;
Grid paramGrid;
paramGrid.vfC.push_back(parser.param.C);
paramGrid.vfGamma.push_back(parser.param.gamma);
gridSearch(paramGrid, fileName);
}else if(parser.task_type == 5){
//perform svm evaluation
cout << "performing evaluation" << endl;
cout << "start training..." << endl;
string name=fileName;
name="./result/"+name;
ofstream ofs(name.c_str(),ios::app);
if(!ofs.is_open()){
cout<<"open ./result/ error "<<name<<endl;
return 0;
}
ofs<<"OVA"<<fileName<<"\n";
//double gamma=parser.param.gamma;
//double C=parser.param.C;
//for(int grid=1; grid<3;grid*=2){
// parser.param.gamma=gamma*grid*2;
// for(int grid2=1;grid2<17;grid2*=2){
// parser.param.C=C*grid2*2;
ofs<<"g "<<parser.param.gamma<<"C"<<parser.param.C<<"\n";
trainOVASVM(parser.param, fileName, parser.numFeature, parser.compute_training_error, parser.testSetName, ofs);
ofs<<"\n";
//}
// }
ofs.close();
}else{
cout << "unknown task type" << endl;
}
return 0;
}
| 25bb43985ad91f9fb14729bc7d0d28dee2ab5d93.cu | /*
* svmMain.cu
*
* Created on: May 21, 2012
* Author: Zeyi Wen
*/
#include <iostream>
#include <cassert>
#include <stdio.h>
#include <helper_cuda.h>
#include <cuda.h>
#include "cvFunction.h"
#include "SVMCmdLineParser.h"
#include "../SharedUtility/initCuda.h"
#include "svmModel.h"
#include "trainClassifier.h"
#include "classifierEvaluater.h"
using std::cout;
using std::endl;
int main(int argc, char **argv)
{
char fileName[1024];
char savedFileName[1024];
SVMCmdLineParser parser;
parser.ParseLine(argc, argv, fileName, savedFileName);
CUcontext context;
if(!InitCUDA(context, 'G'))
return 0;
printf("CUDA initialized.\n");
if(parser.task_type == 0){
//perform svm training
cout << "performing training" << endl;
SvmModel model;
trainSVM(parser.param, fileName, parser.numFeature, model, parser.compute_training_error);
}else if(parser.task_type == 1){
//perform cross validation*/
cout << "performing cross-validation" << endl;
crossValidation(parser.param, fileName);
}else if(parser.task_type == 2){
//perform svm evaluation
cout << "performing evaluation" << endl;
SvmModel model;
cout << "start training..." << endl;
string name=fileName;
name="./result/"+name;
ofstream ofs(name.c_str(),ios::app);
if(!ofs.is_open()){
cout<<"open ./result/ error "<<name<<endl;
return 0;
}
ofs<<"OVA"<<fileName<<"\n";
ofs<<"g "<<parser.param.gamma<<"C"<<parser.param.C<<"\n";
trainSVM(parser.param, fileName, parser.numFeature, model, parser.compute_training_error);
cout << "start evaluation..." << endl;
evaluateSVMClassifier(model, parser.testSetName, parser.numFeature);
ofs<<"\n";
ofs.close();
}else if(parser.task_type == 4){
//perform selecting best C
cout << "perform C selection" << endl;
vector<real> vC;
for(int i = 0; i < 3; i++){
SvmModel model;
model.vC = vC;
cout << "start training..." << endl;
trainSVM(parser.param, fileName, parser.numFeature, model, parser.compute_training_error);
cout << "start evaluation..." << endl;
//evaluateSVMClassifier(model, strcat(fileName, ".t"), parser.numFeature);
evaluateSVMClassifier(model, fileName, parser.numFeature);
vC = ClassifierEvaluater::updateC(model.vC);
}
}
else if(parser.task_type == 3){
cout << "performing grid search" << endl;
Grid paramGrid;
paramGrid.vfC.push_back(parser.param.C);
paramGrid.vfGamma.push_back(parser.param.gamma);
gridSearch(paramGrid, fileName);
}else if(parser.task_type == 5){
//perform svm evaluation
cout << "performing evaluation" << endl;
cout << "start training..." << endl;
string name=fileName;
name="./result/"+name;
ofstream ofs(name.c_str(),ios::app);
if(!ofs.is_open()){
cout<<"open ./result/ error "<<name<<endl;
return 0;
}
ofs<<"OVA"<<fileName<<"\n";
//double gamma=parser.param.gamma;
//double C=parser.param.C;
//for(int grid=1; grid<3;grid*=2){
// parser.param.gamma=gamma*grid*2;
// for(int grid2=1;grid2<17;grid2*=2){
// parser.param.C=C*grid2*2;
ofs<<"g "<<parser.param.gamma<<"C"<<parser.param.C<<"\n";
trainOVASVM(parser.param, fileName, parser.numFeature, parser.compute_training_error, parser.testSetName, ofs);
ofs<<"\n";
//}
// }
ofs.close();
}else{
cout << "unknown task type" << endl;
}
return 0;
}
|
040d76c96206f0ff1e1ed5a6ccbd98379d762395.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "./brute_kernel.h"
#define BLOCK 16
typedef struct sites_type sites_t;
__device__
float calc_dist(int x1, int x2, int y1, int y2)
{
return sqrtf(powf(x1-x2,2) + powf(y1-y2,2));
}
__global__
void brute_kernel(sites_t* sites, uchar4* out_pix, const int nsites, const int nrows, const int ncols)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
// uchar4 black = make_uchar4(0, 0, 0, 255);
if (y < nrows && x < ncols)
{
int index = y*ncols + x;
float min_dist = 99999999;
int min_dist_site = -1;
for (int s = 0; s < nsites; ++s)
{
float current_dist = calc_dist(x, sites[s].x, y, sites[s].y);
/*if (current_dist == min_dist)
{
min_dist_site = -1;
out_pix[index] = black;
}*/
/*else*/ if (current_dist < min_dist)
{
min_dist = current_dist;
min_dist_site = s;
}
}
//if (min_dist_site != -1)
//{
out_pix[index] = sites[min_dist_site].color;
//}
}
}
__global__
void s_brute_kernel(const sites_t* sites, uchar4* out_pix, const int nsites, const int nrows, const int ncols)
{
const int shared_size = 32;
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
uchar4 my_color;
__shared__ sites_t s_sites[shared_size];
if (y < nrows && x < ncols)
{
int global_index = y*ncols + x;
int local_index = threadIdx.y*blockDim.x + threadIdx.x;
float min_dist = 99999999;
for (int s = 0; s < nsites; s += shared_size)
{
// load sites into shared memory
if (local_index < shared_size && local_index+s < nsites)
{
s_sites[local_index] = sites[local_index+s];
}
__syncthreads();
for (int i = 0; i < shared_size; ++i)
{
// only run for valid sites
if (s+i >= nsites)
{
break;
}
float current_dist = calc_dist(x, s_sites[i].x, y, s_sites[i].y);
if (current_dist < min_dist)
{
min_dist = current_dist;
my_color = s_sites[i].color;
}
}
__syncthreads();
}
out_pix[global_index] = my_color;
}
}
// color the sites of the diagram black
__global__
void color_sites(sites_t* sites, uchar4* out_pix, const int nsites, const int nrows, const int ncols)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < nsites)
{
uchar4 black = make_uchar4(0, 0, 0, 255);
for (int r = -2; r <= 2; ++r)
{
for (int c = -2; c <= 2; ++c)
{
// make the sites appear bigger than 1 pixel
if (sites[i].y + r < 0 || sites[i].y + r >= nrows ||
sites[i].x + c < 0 || sites[i].x + c >= ncols)
{
continue;
}
out_pix[(sites[i].y+r)*ncols + (sites[i].x+c)] = black;
}
}
}
}
void launch_kernel(sites_t* sites, uchar4* out_pix, const int nsites, const int nrows, const int ncols)
{
dim3 block_dim(BLOCK,BLOCK,1);
dim3 grid_dim((ncols-1)/BLOCK+1,(nrows-1)/BLOCK+1,1);
hipLaunchKernelGGL(( s_brute_kernel), dim3(grid_dim), dim3(block_dim), 0, 0, sites, out_pix, nsites, nrows, ncols);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( color_sites), dim3(nsites/256+1), dim3(256), 0, 0, sites, out_pix, nsites, nrows, ncols);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
}
| 040d76c96206f0ff1e1ed5a6ccbd98379d762395.cu | #include "./brute_kernel.h"
#define BLOCK 16
typedef struct sites_type sites_t;
__device__
float calc_dist(int x1, int x2, int y1, int y2)
{
return sqrtf(powf(x1-x2,2) + powf(y1-y2,2));
}
__global__
void brute_kernel(sites_t* sites, uchar4* out_pix, const int nsites, const int nrows, const int ncols)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
// uchar4 black = make_uchar4(0, 0, 0, 255);
if (y < nrows && x < ncols)
{
int index = y*ncols + x;
float min_dist = 99999999;
int min_dist_site = -1;
for (int s = 0; s < nsites; ++s)
{
float current_dist = calc_dist(x, sites[s].x, y, sites[s].y);
/*if (current_dist == min_dist)
{
min_dist_site = -1;
out_pix[index] = black;
}*/
/*else*/ if (current_dist < min_dist)
{
min_dist = current_dist;
min_dist_site = s;
}
}
//if (min_dist_site != -1)
//{
out_pix[index] = sites[min_dist_site].color;
//}
}
}
__global__
void s_brute_kernel(const sites_t* sites, uchar4* out_pix, const int nsites, const int nrows, const int ncols)
{
const int shared_size = 32;
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
uchar4 my_color;
__shared__ sites_t s_sites[shared_size];
if (y < nrows && x < ncols)
{
int global_index = y*ncols + x;
int local_index = threadIdx.y*blockDim.x + threadIdx.x;
float min_dist = 99999999;
for (int s = 0; s < nsites; s += shared_size)
{
// load sites into shared memory
if (local_index < shared_size && local_index+s < nsites)
{
s_sites[local_index] = sites[local_index+s];
}
__syncthreads();
for (int i = 0; i < shared_size; ++i)
{
// only run for valid sites
if (s+i >= nsites)
{
break;
}
float current_dist = calc_dist(x, s_sites[i].x, y, s_sites[i].y);
if (current_dist < min_dist)
{
min_dist = current_dist;
my_color = s_sites[i].color;
}
}
__syncthreads();
}
out_pix[global_index] = my_color;
}
}
// color the sites of the diagram black
__global__
void color_sites(sites_t* sites, uchar4* out_pix, const int nsites, const int nrows, const int ncols)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < nsites)
{
uchar4 black = make_uchar4(0, 0, 0, 255);
for (int r = -2; r <= 2; ++r)
{
for (int c = -2; c <= 2; ++c)
{
// make the sites appear bigger than 1 pixel
if (sites[i].y + r < 0 || sites[i].y + r >= nrows ||
sites[i].x + c < 0 || sites[i].x + c >= ncols)
{
continue;
}
out_pix[(sites[i].y+r)*ncols + (sites[i].x+c)] = black;
}
}
}
}
void launch_kernel(sites_t* sites, uchar4* out_pix, const int nsites, const int nrows, const int ncols)
{
dim3 block_dim(BLOCK,BLOCK,1);
dim3 grid_dim((ncols-1)/BLOCK+1,(nrows-1)/BLOCK+1,1);
s_brute_kernel<<<grid_dim, block_dim>>>(sites, out_pix, nsites, nrows, ncols);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
color_sites<<<nsites/256+1, 256>>>(sites, out_pix, nsites, nrows, ncols);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
}
|
55ae3c85449c0e6d8ac4dbceff7a658447d2ca5a.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#if !defined(WITH_NV_JETSON) && !defined(PADDLE_WITH_HIP)
#include <string>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/dynload/nvjpeg.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/stream/cuda_stream.h"
namespace paddle {
namespace operators {
static hipStream_t nvjpeg_stream = nullptr;
static nvjpegHandle_t nvjpeg_handle = nullptr;
void InitNvjpegImage(nvjpegImage_t* img) {
for (int c = 0; c < NVJPEG_MAX_COMPONENT; c++) {
img->channel[c] = nullptr;
img->pitch[c] = 0;
}
}
template <typename T>
class GPUDecodeJpegKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
// Create nvJPEG handle
if (nvjpeg_handle == nullptr) {
nvjpegStatus_t create_status =
platform::dynload::nvjpegCreateSimple(&nvjpeg_handle);
PADDLE_ENFORCE_EQ(create_status, NVJPEG_STATUS_SUCCESS,
platform::errors::Fatal("nvjpegCreateSimple failed: ",
create_status));
}
nvjpegJpegState_t nvjpeg_state;
nvjpegStatus_t state_status =
platform::dynload::nvjpegJpegStateCreate(nvjpeg_handle, &nvjpeg_state);
PADDLE_ENFORCE_EQ(state_status, NVJPEG_STATUS_SUCCESS,
platform::errors::Fatal("nvjpegJpegStateCreate failed: ",
state_status));
int components;
nvjpegChromaSubsampling_t subsampling;
int widths[NVJPEG_MAX_COMPONENT];
int heights[NVJPEG_MAX_COMPONENT];
auto* x = ctx.Input<framework::Tensor>("X");
auto* x_data = x->data<T>();
nvjpegStatus_t info_status = platform::dynload::nvjpegGetImageInfo(
nvjpeg_handle, x_data, (size_t)x->numel(), &components, &subsampling,
widths, heights);
PADDLE_ENFORCE_EQ(
info_status, NVJPEG_STATUS_SUCCESS,
platform::errors::Fatal("nvjpegGetImageInfo failed: ", info_status));
int width = widths[0];
int height = heights[0];
nvjpegOutputFormat_t output_format;
int output_components;
auto mode = ctx.Attr<std::string>("mode");
if (mode == "unchanged") {
if (components == 1) {
output_format = NVJPEG_OUTPUT_Y;
output_components = 1;
} else if (components == 3) {
output_format = NVJPEG_OUTPUT_RGB;
output_components = 3;
} else {
platform::dynload::nvjpegJpegStateDestroy(nvjpeg_state);
PADDLE_THROW(platform::errors::Fatal(
"The provided mode is not supported for JPEG files on GPU"));
}
} else if (mode == "gray") {
output_format = NVJPEG_OUTPUT_Y;
output_components = 1;
} else if (mode == "rgb") {
output_format = NVJPEG_OUTPUT_RGB;
output_components = 3;
} else {
platform::dynload::nvjpegJpegStateDestroy(nvjpeg_state);
PADDLE_THROW(platform::errors::Fatal(
"The provided mode is not supported for JPEG files on GPU"));
}
nvjpegImage_t out_image;
InitNvjpegImage(&out_image);
// create nvjpeg stream
if (nvjpeg_stream == nullptr) {
hipStreamCreateWithFlags(&nvjpeg_stream, hipStreamNonBlocking);
}
int sz = widths[0] * heights[0];
auto* out = ctx.Output<framework::LoDTensor>("Out");
std::vector<int64_t> out_shape = {output_components, height, width};
out->Resize(phi::make_ddim(out_shape));
T* data = out->mutable_data<T>(ctx.GetPlace());
for (int c = 0; c < output_components; c++) {
out_image.channel[c] = data + c * sz;
out_image.pitch[c] = width;
}
nvjpegStatus_t decode_status = platform::dynload::nvjpegDecode(
nvjpeg_handle, nvjpeg_state, x_data, x->numel(), output_format,
&out_image, nvjpeg_stream);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(decode_jpeg, ops::GPUDecodeJpegKernel<uint8_t>)
#endif
| 55ae3c85449c0e6d8ac4dbceff7a658447d2ca5a.cu | // Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#if !defined(WITH_NV_JETSON) && !defined(PADDLE_WITH_HIP)
#include <string>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/dynload/nvjpeg.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/stream/cuda_stream.h"
namespace paddle {
namespace operators {
static cudaStream_t nvjpeg_stream = nullptr;
static nvjpegHandle_t nvjpeg_handle = nullptr;
void InitNvjpegImage(nvjpegImage_t* img) {
for (int c = 0; c < NVJPEG_MAX_COMPONENT; c++) {
img->channel[c] = nullptr;
img->pitch[c] = 0;
}
}
template <typename T>
class GPUDecodeJpegKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
// Create nvJPEG handle
if (nvjpeg_handle == nullptr) {
nvjpegStatus_t create_status =
platform::dynload::nvjpegCreateSimple(&nvjpeg_handle);
PADDLE_ENFORCE_EQ(create_status, NVJPEG_STATUS_SUCCESS,
platform::errors::Fatal("nvjpegCreateSimple failed: ",
create_status));
}
nvjpegJpegState_t nvjpeg_state;
nvjpegStatus_t state_status =
platform::dynload::nvjpegJpegStateCreate(nvjpeg_handle, &nvjpeg_state);
PADDLE_ENFORCE_EQ(state_status, NVJPEG_STATUS_SUCCESS,
platform::errors::Fatal("nvjpegJpegStateCreate failed: ",
state_status));
int components;
nvjpegChromaSubsampling_t subsampling;
int widths[NVJPEG_MAX_COMPONENT];
int heights[NVJPEG_MAX_COMPONENT];
auto* x = ctx.Input<framework::Tensor>("X");
auto* x_data = x->data<T>();
nvjpegStatus_t info_status = platform::dynload::nvjpegGetImageInfo(
nvjpeg_handle, x_data, (size_t)x->numel(), &components, &subsampling,
widths, heights);
PADDLE_ENFORCE_EQ(
info_status, NVJPEG_STATUS_SUCCESS,
platform::errors::Fatal("nvjpegGetImageInfo failed: ", info_status));
int width = widths[0];
int height = heights[0];
nvjpegOutputFormat_t output_format;
int output_components;
auto mode = ctx.Attr<std::string>("mode");
if (mode == "unchanged") {
if (components == 1) {
output_format = NVJPEG_OUTPUT_Y;
output_components = 1;
} else if (components == 3) {
output_format = NVJPEG_OUTPUT_RGB;
output_components = 3;
} else {
platform::dynload::nvjpegJpegStateDestroy(nvjpeg_state);
PADDLE_THROW(platform::errors::Fatal(
"The provided mode is not supported for JPEG files on GPU"));
}
} else if (mode == "gray") {
output_format = NVJPEG_OUTPUT_Y;
output_components = 1;
} else if (mode == "rgb") {
output_format = NVJPEG_OUTPUT_RGB;
output_components = 3;
} else {
platform::dynload::nvjpegJpegStateDestroy(nvjpeg_state);
PADDLE_THROW(platform::errors::Fatal(
"The provided mode is not supported for JPEG files on GPU"));
}
nvjpegImage_t out_image;
InitNvjpegImage(&out_image);
// create nvjpeg stream
if (nvjpeg_stream == nullptr) {
cudaStreamCreateWithFlags(&nvjpeg_stream, cudaStreamNonBlocking);
}
int sz = widths[0] * heights[0];
auto* out = ctx.Output<framework::LoDTensor>("Out");
std::vector<int64_t> out_shape = {output_components, height, width};
out->Resize(phi::make_ddim(out_shape));
T* data = out->mutable_data<T>(ctx.GetPlace());
for (int c = 0; c < output_components; c++) {
out_image.channel[c] = data + c * sz;
out_image.pitch[c] = width;
}
nvjpegStatus_t decode_status = platform::dynload::nvjpegDecode(
nvjpeg_handle, nvjpeg_state, x_data, x->numel(), output_format,
&out_image, nvjpeg_stream);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(decode_jpeg, ops::GPUDecodeJpegKernel<uint8_t>)
#endif
|
c406d591cc7286609c056defabd1eedebcccb2ae.hip | // !!! This is a file automatically generated by hipify!!!
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/count.h>
#include <memory>
namespace cudf {
namespace detail {
struct dispatch_map_type {
template <typename map_type,
std::enable_if_t<std::is_integral<map_type>::value and
not std::is_same<map_type, bool>::value>* = nullptr>
std::unique_ptr<table> operator()(
table_view const& source_table,
column_view const& gather_map,
size_type num_destination_rows,
bool check_bounds,
bool ignore_out_of_bounds,
bool allow_negative_indices = false,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0)
{
std::unique_ptr<table> destination_table;
if (check_bounds) {
cudf::size_type begin = (allow_negative_indices) ? -source_table.num_rows() : 0;
CUDF_EXPECTS(num_destination_rows ==
thrust::count_if(rmm::exec_policy()->on(0),
gather_map.begin<map_type>(),
gather_map.end<map_type>(),
bounds_checker<map_type>{begin, source_table.num_rows()}),
"Index out of bounds.");
}
if (allow_negative_indices) {
destination_table =
gather(source_table,
thrust::make_transform_iterator(gather_map.begin<map_type>(),
index_converter<map_type>{source_table.num_rows()}),
thrust::make_transform_iterator(gather_map.end<map_type>(),
index_converter<map_type>{source_table.num_rows()}),
ignore_out_of_bounds,
mr,
stream);
} else {
destination_table = gather(source_table,
gather_map.begin<map_type>(),
gather_map.end<map_type>(),
ignore_out_of_bounds,
mr,
stream);
}
return destination_table;
}
template <typename map_type,
std::enable_if_t<not std::is_integral<map_type>::value or
std::is_same<map_type, bool>::value>* = nullptr>
std::unique_ptr<table> operator()(
table_view const& source_table,
column_view const& gather_map,
size_type num_destination_rows,
bool check_bounds,
bool ignore_out_of_bounds,
bool allow_negative_indices = false,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0)
{
CUDF_FAIL("Gather map must be an integral type.");
}
};
std::unique_ptr<table> gather(table_view const& source_table,
column_view const& gather_map,
bool check_bounds,
bool ignore_out_of_bounds,
bool allow_negative_indices,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_EXPECTS(gather_map.has_nulls() == false, "gather_map contains nulls");
std::unique_ptr<table> destination_table = cudf::type_dispatcher(gather_map.type(),
dispatch_map_type{},
source_table,
gather_map,
gather_map.size(),
check_bounds,
ignore_out_of_bounds,
allow_negative_indices,
mr,
stream);
return destination_table;
}
} // namespace detail
std::unique_ptr<table> gather(table_view const& source_table,
column_view const& gather_map,
bool check_bounds,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::gather(source_table, gather_map, check_bounds, false, true, mr);
}
} // namespace cudf
| c406d591cc7286609c056defabd1eedebcccb2ae.cu | #include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/count.h>
#include <memory>
namespace cudf {
namespace detail {
struct dispatch_map_type {
template <typename map_type,
std::enable_if_t<std::is_integral<map_type>::value and
not std::is_same<map_type, bool>::value>* = nullptr>
std::unique_ptr<table> operator()(
table_view const& source_table,
column_view const& gather_map,
size_type num_destination_rows,
bool check_bounds,
bool ignore_out_of_bounds,
bool allow_negative_indices = false,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0)
{
std::unique_ptr<table> destination_table;
if (check_bounds) {
cudf::size_type begin = (allow_negative_indices) ? -source_table.num_rows() : 0;
CUDF_EXPECTS(num_destination_rows ==
thrust::count_if(rmm::exec_policy()->on(0),
gather_map.begin<map_type>(),
gather_map.end<map_type>(),
bounds_checker<map_type>{begin, source_table.num_rows()}),
"Index out of bounds.");
}
if (allow_negative_indices) {
destination_table =
gather(source_table,
thrust::make_transform_iterator(gather_map.begin<map_type>(),
index_converter<map_type>{source_table.num_rows()}),
thrust::make_transform_iterator(gather_map.end<map_type>(),
index_converter<map_type>{source_table.num_rows()}),
ignore_out_of_bounds,
mr,
stream);
} else {
destination_table = gather(source_table,
gather_map.begin<map_type>(),
gather_map.end<map_type>(),
ignore_out_of_bounds,
mr,
stream);
}
return destination_table;
}
template <typename map_type,
std::enable_if_t<not std::is_integral<map_type>::value or
std::is_same<map_type, bool>::value>* = nullptr>
std::unique_ptr<table> operator()(
table_view const& source_table,
column_view const& gather_map,
size_type num_destination_rows,
bool check_bounds,
bool ignore_out_of_bounds,
bool allow_negative_indices = false,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0)
{
CUDF_FAIL("Gather map must be an integral type.");
}
};
std::unique_ptr<table> gather(table_view const& source_table,
column_view const& gather_map,
bool check_bounds,
bool ignore_out_of_bounds,
bool allow_negative_indices,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_EXPECTS(gather_map.has_nulls() == false, "gather_map contains nulls");
std::unique_ptr<table> destination_table = cudf::type_dispatcher(gather_map.type(),
dispatch_map_type{},
source_table,
gather_map,
gather_map.size(),
check_bounds,
ignore_out_of_bounds,
allow_negative_indices,
mr,
stream);
return destination_table;
}
} // namespace detail
std::unique_ptr<table> gather(table_view const& source_table,
column_view const& gather_map,
bool check_bounds,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::gather(source_table, gather_map, check_bounds, false, true, mr);
}
} // namespace cudf
|
8a29407f0d3be7f71bf8ad29f4e29faf3202f5c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//: nvcc add0.cu -o add0
#include <stdlib.h>
#include <stdio.h>
__global__ void cuda_add(int a, int b, int *c)
{
*c = a + b;
}
int main(int argc, char **argv)
{
int c;
int *dev_c;
hipMalloc((void**)&dev_c, sizeof(int));
hipLaunchKernelGGL(( cuda_add), dim3(1),dim3(1), 0, 0, 2, 2, dev_c);
/*
* Arguments pour hipMemcpy
* 1 : destination
* 2 : memoire sur le device
* 3 : taille du bloc
* 4 : direction de la copie
*/
hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost);
printf("Almighty CUDA's answer: 2 + 2 = %d.\n", c);
hipFree(dev_c);
return EXIT_SUCCESS;
}
| 8a29407f0d3be7f71bf8ad29f4e29faf3202f5c1.cu | //: nvcc add0.cu -o add0
#include <stdlib.h>
#include <stdio.h>
__global__ void cuda_add(int a, int b, int *c)
{
*c = a + b;
}
int main(int argc, char **argv)
{
int c;
int *dev_c;
cudaMalloc((void**)&dev_c, sizeof(int));
cuda_add<<<1,1>>>(2, 2, dev_c);
/*
* Arguments pour cudaMemcpy
* 1 : destination
* 2 : memoire sur le device
* 3 : taille du bloc
* 4 : direction de la copie
*/
cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);
printf("Almighty CUDA's answer: 2 + 2 = %d.\n", c);
cudaFree(dev_c);
return EXIT_SUCCESS;
}
|
ac0e8376dbfc124b1650177f920ec90a51bc970d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <math.h>
//-----------------------------------------------------------------------------
// GpuConstantsPackage: a struct to hold many constants (including pointers
// to allocated memory on the device) that can be
// uploaded all at once. Placing this in the "constants
// cache" is a convenient and performant way of handling
// constant information on the GPU.
//-----------------------------------------------------------------------------
struct GpuConstantsPackage {
int nparticle;
int* partType;
float* partX;
float* partY;
float* partZ;
float* partQ;
float* Etot;
};
typedef struct GpuConstantsPackage cribSheet;
// This device constant is available to all functions in this CUDA unit
__device__ __constant__ cribSheet cSh;
//-----------------------------------------------------------------------------
// GpuMirroredInt: a struct holding mirrored int data on both the CPU and the
// GPU. Functions below will operate on this struct
// (because this isn't a workshop on C++)
//-----------------------------------------------------------------------------
struct GpuMirroredInt {
int len; // Length of the array (again, this is not a C++ course)
int IsPinned; // "Pinned" memory is best for Host <= => GPU transfers.
// In fact, if non-pinned memory is transferred to the
// GPU from the host, a temporary allocation of pinned
// memory will be created and then destroyed. Pinned
// memory is not host-pageable, but the only performance
// implication is that creating lots of pinned memory
// may make it harder for the host OS to manage large
// memory jobs.
int* HostData; // Pointer to allocated memory on the host
int* DevcData; // Pointer to allocated memory on the GPU. Note that the
// host can know what the address of memory on the GPU
// is, but it cannot simply de-reference that pointer
// in host code.
};
typedef struct GpuMirroredInt gpuInt;
//-----------------------------------------------------------------------------
// GpuMirroredInt: a struct holding mirrored fp32 data on both the CPU and the
// GPU. Functions below will operate on this struct
// (because this isn't a workshop on C++)
//-----------------------------------------------------------------------------
struct GpuMirroredFloat {
int len; // Length of the array (again, this is not a C++ course)
int IsPinned; // "Pinned" memory is best for Host <= => GPU transfers.
// In fact, if non-pinned memory is transferred to the
// GPU from the host, a temporary allocation of pinned
// memory will be created and then destroyed. Pinned
// memory is not host-pageable, but the only performance
// implication is that creating lots of pinned memory
// may make it harder for the host OS to manage large
// memory jobs.
float* HostData; // Pointer to allocated memory on the host
float* DevcData; // Pointer to allocated memory on the GPU. Note that the
// host can know what the address of memory on the GPU
// is, but it cannot simply de-reference that pointer
// in host code.
};
typedef struct GpuMirroredFloat gpuFloat;
//-----------------------------------------------------------------------------
// ParticleSimulator: run a rudimentary simulation of particles
//-----------------------------------------------------------------------------
__global__ void ParticleSimulator()
{
// Show that the device understands the number of particles
if (threadIdx.x == 0) {
printf("There are %d particles active.\n", cSh.nparticle);
}
// Loop over all atoms to report positions. This will NOT necessarily
// report all particles in orer 0, 1, 2, ..., N. The second warp
// (threadIdx.x >= 32 and threadIdx.x < 64) may fire off before the first,
// printing particles 32, 33, ... before 0, 1, 2, and so forth. Race
// conditions are mitigable with good programming practices to insulate
// code against thread access before results are ready.
int tidx = threadIdx.x;
while (tidx < cSh.nparticle) {
printf("Particle %3d: %9.4f %9.4f %9.4f with %9.4f charge\n", tidx,
cSh.partX[tidx], cSh.partY[tidx], cSh.partZ[tidx], cSh.partQ[tidx]);
tidx += blockDim.x;
}
// Loop over all particles and compute the electrostatic potential.
// Each thread will accumulate its own portion of the potential,
// then pool the results at the end.
tidx = threadIdx.x;
float qq = 0.0;
while (tidx < cSh.nparticle) {
// Naive way: each thread takes a particle and then loops over all
// other particles, up to but not including itself. Low-numbered
// threads will have little work to do while high-numbered threads
// will cause the rest of the thread block to idle.
int i;
for (i = 0; i < tidx; i++) {
float dx = cSh.partX[tidx] - cSh.partX[i];
float dy = cSh.partY[tidx] - cSh.partY[i];
float dz = cSh.partZ[tidx] - cSh.partZ[i];
float r = sqrt(dx*dx + dy*dy + dz*dz);
qq += cSh.partQ[tidx] * cSh.partQ[i] / r;
}
// Advance the counter by the number of threads in the block.
// The kernel of 64 threads processes particles 0-63, 64-127,
// 128-191, etc. until reaching nparticle.
tidx += blockDim.x;
}
// Each thread's qq could contain information pertaining to multiple atoms
// each interacting with their respective pairs. That's fine--the goal is
// to compute a total potential energy, not anything for one atom in
// particular.
atomicAdd(&cSh.Etot[0], qq);
// For edification, let's try that without atomic instructions: every
// thread simply accumulates to a number without regard to whether
// other threads are trying to edit the same number.
cSh.Etot[1] += qq;
}
//-----------------------------------------------------------------------------
// CreateGpuInt: constructor function for allocating memory in a gpuInt
// instance.
//
// Arguments:
// len: the length of array to allocate
// pin: flag to have the memory pinned (non-pageable on the host side
// for optimal transfer speed to the device)
//-----------------------------------------------------------------------------
gpuInt CreateGpuInt(int len, int pin)
{
gpuInt G;
G.len = len;
G.IsPinned = pin;
// Now that the official length is recorded, upgrade the real length
// to the next convenient multiple of 128, so as to always allocate
// GPU memory in 512-byte blocks. This is for alignment purposes,
// and keeping host to device transfers in line.
len = ((len + 127) / 128) * 128;
if (pin == 1) {
hipHostMalloc((void **)&G.HostData, len * sizeof(int),
hipHostMallocMapped);
}
else {
G.HostData = (int*)malloc(len * sizeof(int));
}
hipMalloc((void **)&G.DevcData, len * sizeof(int));
memset(G.HostData, 0, len * sizeof(int));
hipMemset((void *)G.DevcData, 0, len * sizeof(int));
return G;
}
//-----------------------------------------------------------------------------
// DestroyGpuInt: destructor function for freeing memory in a gpuInt
// instance.
//-----------------------------------------------------------------------------
void DestroyGpuInt(gpuInt *G)
{
if (G->IsPinned == 1) {
hipHostFree(G->HostData);
}
else {
free(G->HostData);
}
hipFree(G->DevcData);
}
//-----------------------------------------------------------------------------
// UploadGpuInt: upload an integer array from the host to the device.
//-----------------------------------------------------------------------------
void UploadGpuInt(gpuInt *G)
{
hipMemcpy(G->DevcData, G->HostData, G->len * sizeof(int),
hipMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// DownloadGpuInt: download an integer array from the host to the device.
//-----------------------------------------------------------------------------
void DownloadGpuInt(gpuInt *G)
{
hipMemcpy(G->HostData, G->DevcData, G->len * sizeof(int),
hipMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// CreateGpuFloat: constructor function for allocating memory in a gpuFloat
// instance.
//
// Arguments:
// len: the length of array to allocate
// pin: flag to have the memory pinned (non-pageable on the host side
// for optimal transfer speed ot the device)
//-----------------------------------------------------------------------------
gpuFloat CreateGpuFloat(int len, int pin)
{
gpuFloat G;
G.len = len;
G.IsPinned = pin;
// Now that the official length is recorded, upgrade the real length
// to the next convenient multiple of 128, so as to always allocate
// GPU memory in 512-byte blocks. This is for alignment purposes,
// and keeping host to device transfers in line.
len = ((len + 127) / 128) * 128;
if (pin == 1) {
hipHostMalloc((void **)&G.HostData, len * sizeof(float),
hipHostMallocMapped);
}
else {
G.HostData = (float*)malloc(len * sizeof(float));
}
hipMalloc((void **)&G.DevcData, len * sizeof(float));
memset(G.HostData, 0, len * sizeof(float));
hipMemset((void *)G.DevcData, 0, len * sizeof(float));
return G;
}
//-----------------------------------------------------------------------------
// DestroyGpuFloat: destructor function for freeing memory in a gpuFloat
// instance.
//-----------------------------------------------------------------------------
void DestroyGpuFloat(gpuFloat *G)
{
if (G->IsPinned == 1) {
hipHostFree(G->HostData);
}
else {
free(G->HostData);
}
hipFree(G->DevcData);
}
//-----------------------------------------------------------------------------
// UploadGpuFloat: upload an float array from the host to the device.
//-----------------------------------------------------------------------------
void UploadGpuFloat(gpuFloat *G)
{
hipMemcpy(G->DevcData, G->HostData, G->len * sizeof(float),
hipMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// DownloadGpuFloat: download an float array from the host to the device.
//-----------------------------------------------------------------------------
void DownloadGpuFloat(gpuFloat *G)
{
hipMemcpy(G->HostData, G->DevcData, G->len * sizeof(float),
hipMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// main
//-----------------------------------------------------------------------------
int main()
{
int i, np;
gpuInt particleTypes;
gpuFloat particleXcoord, particleYcoord, particleZcoord, particleCharge;
gpuFloat etot;
// Create a small array of particles and populate it
particleTypes = CreateGpuInt(1000, 1);
particleXcoord = CreateGpuFloat(1000, 1);
particleYcoord = CreateGpuFloat(1000, 1);
particleZcoord = CreateGpuFloat(1000, 1);
particleCharge = CreateGpuFloat(1000, 1);
// Allocate and initialize the total energy
// accumulator on the host and on the device.
etot = CreateGpuFloat(2, 1);
etot.HostData[0] = 0.0;
etot.HostData[1] = 0.0;
UploadGpuFloat(&etot);
// Initialize random number generator. srand() SEEDS the generator,
// thereafter each call to rand() will return a different number.
// This is a reeally bad generator (much better methods with longer
// periods before they start looping back over the same sequence are
// available).
srand(62052);
// We have allocated for a maximum of 100 particles,
// but let's say there are only 47 in this case.
np = 47;
for (i = 0; i < np; i++) {
// Integer truncation would happen anyway, I'm just making it explicit
particleTypes.HostData[i] = (int)(8 * rand());
// Create some random coordinates (double-to-float conversion
// is happening here. On the GPU this can have performance
// impact, so keep an eye on the data types at all times!
particleXcoord.HostData[i] = 20.0 * (double)rand() / (double)RAND_MAX;
particleYcoord.HostData[i] = 20.0 * (double)rand() / (double)RAND_MAX;
particleZcoord.HostData[i] = 20.0 * (double)rand() / (double)RAND_MAX;
particleCharge.HostData[i] = 0.5 - rand() / (double)RAND_MAX;
}
// Stage critical constants--see cribSheet struct instance cSh above.
cribSheet cnstage;
cnstage.nparticle = np;
cnstage.partX = particleXcoord.DevcData;
cnstage.partY = particleYcoord.DevcData;
cnstage.partZ = particleZcoord.DevcData;
cnstage.partQ = particleCharge.DevcData;
cnstage.Etot = etot.DevcData;
// Upload all data to the device
UploadGpuInt(&particleTypes);
UploadGpuFloat(&particleXcoord);
UploadGpuFloat(&particleYcoord);
UploadGpuFloat(&particleZcoord);
UploadGpuFloat(&particleCharge);
// Upload the constants to the constants cache
hipMemcpyToSymbol(cSh, &cnstage, sizeof(cribSheet));
// Launch the kernel
hipLaunchKernelGGL(( ParticleSimulator), dim3(1), dim3(64), 0, 0, );
// Download the total energy
DownloadGpuFloat(&etot);
printf("Total energy (atomic adds) = %10.4f\n", etot.HostData[0]);
printf("Total energy (straight adds) = %10.4f\n", etot.HostData[1]);
// Device synchronization
hipDeviceSynchronize();
return 0;
}
| ac0e8376dbfc124b1650177f920ec90a51bc970d.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
//-----------------------------------------------------------------------------
// GpuConstantsPackage: a struct to hold many constants (including pointers
// to allocated memory on the device) that can be
// uploaded all at once. Placing this in the "constants
// cache" is a convenient and performant way of handling
// constant information on the GPU.
//-----------------------------------------------------------------------------
struct GpuConstantsPackage {
int nparticle;
int* partType;
float* partX;
float* partY;
float* partZ;
float* partQ;
float* Etot;
};
typedef struct GpuConstantsPackage cribSheet;
// This device constant is available to all functions in this CUDA unit
__device__ __constant__ cribSheet cSh;
//-----------------------------------------------------------------------------
// GpuMirroredInt: a struct holding mirrored int data on both the CPU and the
// GPU. Functions below will operate on this struct
// (because this isn't a workshop on C++)
//-----------------------------------------------------------------------------
struct GpuMirroredInt {
int len; // Length of the array (again, this is not a C++ course)
int IsPinned; // "Pinned" memory is best for Host <= => GPU transfers.
// In fact, if non-pinned memory is transferred to the
// GPU from the host, a temporary allocation of pinned
// memory will be created and then destroyed. Pinned
// memory is not host-pageable, but the only performance
// implication is that creating lots of pinned memory
// may make it harder for the host OS to manage large
// memory jobs.
int* HostData; // Pointer to allocated memory on the host
int* DevcData; // Pointer to allocated memory on the GPU. Note that the
// host can know what the address of memory on the GPU
// is, but it cannot simply de-reference that pointer
// in host code.
};
typedef struct GpuMirroredInt gpuInt;
//-----------------------------------------------------------------------------
// GpuMirroredInt: a struct holding mirrored fp32 data on both the CPU and the
// GPU. Functions below will operate on this struct
// (because this isn't a workshop on C++)
//-----------------------------------------------------------------------------
struct GpuMirroredFloat {
int len; // Length of the array (again, this is not a C++ course)
int IsPinned; // "Pinned" memory is best for Host <= => GPU transfers.
// In fact, if non-pinned memory is transferred to the
// GPU from the host, a temporary allocation of pinned
// memory will be created and then destroyed. Pinned
// memory is not host-pageable, but the only performance
// implication is that creating lots of pinned memory
// may make it harder for the host OS to manage large
// memory jobs.
float* HostData; // Pointer to allocated memory on the host
float* DevcData; // Pointer to allocated memory on the GPU. Note that the
// host can know what the address of memory on the GPU
// is, but it cannot simply de-reference that pointer
// in host code.
};
typedef struct GpuMirroredFloat gpuFloat;
//-----------------------------------------------------------------------------
// ParticleSimulator: run a rudimentary simulation of particles
//-----------------------------------------------------------------------------
__global__ void ParticleSimulator()
{
// Show that the device understands the number of particles
if (threadIdx.x == 0) {
printf("There are %d particles active.\n", cSh.nparticle);
}
// Loop over all atoms to report positions. This will NOT necessarily
// report all particles in orer 0, 1, 2, ..., N. The second warp
// (threadIdx.x >= 32 and threadIdx.x < 64) may fire off before the first,
// printing particles 32, 33, ... before 0, 1, 2, and so forth. Race
// conditions are mitigable with good programming practices to insulate
// code against thread access before results are ready.
int tidx = threadIdx.x;
while (tidx < cSh.nparticle) {
printf("Particle %3d: %9.4f %9.4f %9.4f with %9.4f charge\n", tidx,
cSh.partX[tidx], cSh.partY[tidx], cSh.partZ[tidx], cSh.partQ[tidx]);
tidx += blockDim.x;
}
// Loop over all particles and compute the electrostatic potential.
// Each thread will accumulate its own portion of the potential,
// then pool the results at the end.
tidx = threadIdx.x;
float qq = 0.0;
while (tidx < cSh.nparticle) {
// Naive way: each thread takes a particle and then loops over all
// other particles, up to but not including itself. Low-numbered
// threads will have little work to do while high-numbered threads
// will cause the rest of the thread block to idle.
int i;
for (i = 0; i < tidx; i++) {
float dx = cSh.partX[tidx] - cSh.partX[i];
float dy = cSh.partY[tidx] - cSh.partY[i];
float dz = cSh.partZ[tidx] - cSh.partZ[i];
float r = sqrt(dx*dx + dy*dy + dz*dz);
qq += cSh.partQ[tidx] * cSh.partQ[i] / r;
}
// Advance the counter by the number of threads in the block.
// The kernel of 64 threads processes particles 0-63, 64-127,
// 128-191, etc. until reaching nparticle.
tidx += blockDim.x;
}
// Each thread's qq could contain information pertaining to multiple atoms
// each interacting with their respective pairs. That's fine--the goal is
// to compute a total potential energy, not anything for one atom in
// particular.
atomicAdd(&cSh.Etot[0], qq);
// For edification, let's try that without atomic instructions: every
// thread simply accumulates to a number without regard to whether
// other threads are trying to edit the same number.
cSh.Etot[1] += qq;
}
//-----------------------------------------------------------------------------
// CreateGpuInt: constructor function for allocating memory in a gpuInt
// instance.
//
// Arguments:
// len: the length of array to allocate
// pin: flag to have the memory pinned (non-pageable on the host side
// for optimal transfer speed to the device)
//-----------------------------------------------------------------------------
gpuInt CreateGpuInt(int len, int pin)
{
gpuInt G;
G.len = len;
G.IsPinned = pin;
// Now that the official length is recorded, upgrade the real length
// to the next convenient multiple of 128, so as to always allocate
// GPU memory in 512-byte blocks. This is for alignment purposes,
// and keeping host to device transfers in line.
len = ((len + 127) / 128) * 128;
if (pin == 1) {
cudaHostAlloc((void **)&G.HostData, len * sizeof(int),
cudaHostAllocMapped);
}
else {
G.HostData = (int*)malloc(len * sizeof(int));
}
cudaMalloc((void **)&G.DevcData, len * sizeof(int));
memset(G.HostData, 0, len * sizeof(int));
cudaMemset((void *)G.DevcData, 0, len * sizeof(int));
return G;
}
//-----------------------------------------------------------------------------
// DestroyGpuInt: destructor function for freeing memory in a gpuInt
// instance.
//-----------------------------------------------------------------------------
void DestroyGpuInt(gpuInt *G)
{
if (G->IsPinned == 1) {
cudaFreeHost(G->HostData);
}
else {
free(G->HostData);
}
cudaFree(G->DevcData);
}
//-----------------------------------------------------------------------------
// UploadGpuInt: upload an integer array from the host to the device.
//-----------------------------------------------------------------------------
void UploadGpuInt(gpuInt *G)
{
cudaMemcpy(G->DevcData, G->HostData, G->len * sizeof(int),
cudaMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// DownloadGpuInt: download an integer array from the host to the device.
//-----------------------------------------------------------------------------
void DownloadGpuInt(gpuInt *G)
{
cudaMemcpy(G->HostData, G->DevcData, G->len * sizeof(int),
cudaMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// CreateGpuFloat: constructor function for allocating memory in a gpuFloat
// instance.
//
// Arguments:
// len: the length of array to allocate
// pin: flag to have the memory pinned (non-pageable on the host side
// for optimal transfer speed ot the device)
//-----------------------------------------------------------------------------
gpuFloat CreateGpuFloat(int len, int pin)
{
gpuFloat G;
G.len = len;
G.IsPinned = pin;
// Now that the official length is recorded, upgrade the real length
// to the next convenient multiple of 128, so as to always allocate
// GPU memory in 512-byte blocks. This is for alignment purposes,
// and keeping host to device transfers in line.
len = ((len + 127) / 128) * 128;
if (pin == 1) {
cudaHostAlloc((void **)&G.HostData, len * sizeof(float),
cudaHostAllocMapped);
}
else {
G.HostData = (float*)malloc(len * sizeof(float));
}
cudaMalloc((void **)&G.DevcData, len * sizeof(float));
memset(G.HostData, 0, len * sizeof(float));
cudaMemset((void *)G.DevcData, 0, len * sizeof(float));
return G;
}
//-----------------------------------------------------------------------------
// DestroyGpuFloat: destructor function for freeing memory in a gpuFloat
// instance.
//-----------------------------------------------------------------------------
void DestroyGpuFloat(gpuFloat *G)
{
if (G->IsPinned == 1) {
cudaFreeHost(G->HostData);
}
else {
free(G->HostData);
}
cudaFree(G->DevcData);
}
//-----------------------------------------------------------------------------
// UploadGpuFloat: upload an float array from the host to the device.
//-----------------------------------------------------------------------------
void UploadGpuFloat(gpuFloat *G)
{
cudaMemcpy(G->DevcData, G->HostData, G->len * sizeof(float),
cudaMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// DownloadGpuFloat: download an float array from the host to the device.
//-----------------------------------------------------------------------------
void DownloadGpuFloat(gpuFloat *G)
{
cudaMemcpy(G->HostData, G->DevcData, G->len * sizeof(float),
cudaMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// main
//-----------------------------------------------------------------------------
int main()
{
int i, np;
gpuInt particleTypes;
gpuFloat particleXcoord, particleYcoord, particleZcoord, particleCharge;
gpuFloat etot;
// Create a small array of particles and populate it
particleTypes = CreateGpuInt(1000, 1);
particleXcoord = CreateGpuFloat(1000, 1);
particleYcoord = CreateGpuFloat(1000, 1);
particleZcoord = CreateGpuFloat(1000, 1);
particleCharge = CreateGpuFloat(1000, 1);
// Allocate and initialize the total energy
// accumulator on the host and on the device.
etot = CreateGpuFloat(2, 1);
etot.HostData[0] = 0.0;
etot.HostData[1] = 0.0;
UploadGpuFloat(&etot);
// Initialize random number generator. srand() SEEDS the generator,
// thereafter each call to rand() will return a different number.
// This is a reeally bad generator (much better methods with longer
// periods before they start looping back over the same sequence are
// available).
srand(62052);
// We have allocated for a maximum of 100 particles,
// but let's say there are only 47 in this case.
np = 47;
for (i = 0; i < np; i++) {
// Integer truncation would happen anyway, I'm just making it explicit
particleTypes.HostData[i] = (int)(8 * rand());
// Create some random coordinates (double-to-float conversion
// is happening here. On the GPU this can have performance
// impact, so keep an eye on the data types at all times!
particleXcoord.HostData[i] = 20.0 * (double)rand() / (double)RAND_MAX;
particleYcoord.HostData[i] = 20.0 * (double)rand() / (double)RAND_MAX;
particleZcoord.HostData[i] = 20.0 * (double)rand() / (double)RAND_MAX;
particleCharge.HostData[i] = 0.5 - rand() / (double)RAND_MAX;
}
// Stage critical constants--see cribSheet struct instance cSh above.
cribSheet cnstage;
cnstage.nparticle = np;
cnstage.partX = particleXcoord.DevcData;
cnstage.partY = particleYcoord.DevcData;
cnstage.partZ = particleZcoord.DevcData;
cnstage.partQ = particleCharge.DevcData;
cnstage.Etot = etot.DevcData;
// Upload all data to the device
UploadGpuInt(&particleTypes);
UploadGpuFloat(&particleXcoord);
UploadGpuFloat(&particleYcoord);
UploadGpuFloat(&particleZcoord);
UploadGpuFloat(&particleCharge);
// Upload the constants to the constants cache
cudaMemcpyToSymbol(cSh, &cnstage, sizeof(cribSheet));
// Launch the kernel
ParticleSimulator<<<1, 64>>>();
// Download the total energy
DownloadGpuFloat(&etot);
printf("Total energy (atomic adds) = %10.4f\n", etot.HostData[0]);
printf("Total energy (straight adds) = %10.4f\n", etot.HostData[1]);
// Device synchronization
cudaDeviceSynchronize();
return 0;
}
|
262b7d37e827e2d6580e398b15f1630a54a70650.hip | // !!! This is a file automatically generated by hipify!!!
// ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2014, September 8 - October 10
// ###
// ###
// ### Maria Klodt, Jan Stuehmer, Mohamed Souiai, Thomas Moellenhoff
// ###
// ###
// ###
// ### THIS FILE IS SUPPOSED TO REMAIN UNCHANGED
// ###
// ###
#include "aux.h"
#include <cstdlib>
#include <iostream>
using std::stringstream;
using std::cerr;
using std::cout;
using std::endl;
using std::string;
// parameter processing: template specialization for T=bool
template<>
bool getParam<bool>(std::string param, bool &var, int argc, char **argv)
{
const char *c_param = param.c_str();
for(int i=argc-1; i>=1; i--)
{
if (argv[i][0]!='-') continue;
if (strcmp(argv[i]+1, c_param)==0)
{
if (!(i+1<argc) || argv[i+1][0]=='-') { var = true; return true; }
std::stringstream ss;
ss << argv[i+1];
ss >> var;
return (bool)ss;
}
}
return false;
}
// opencv helpers
void convert_layered_to_interleaved(float *aOut, const float *aIn, int w, int h, int nc)
{
if (nc==1) { memcpy(aOut, aIn, w*h*sizeof(float)); return; }
size_t nOmega = (size_t)w*h;
for (int y=0; y<h; y++)
{
for (int x=0; x<w; x++)
{
for (int c=0; c<nc; c++)
{
aOut[(nc-1-c) + nc*(x + (size_t)w*y)] = aIn[x + (size_t)w*y + nOmega*c];
}
}
}
}
void convert_layered_to_mat(cv::Mat &mOut, const float *aIn)
{
convert_layered_to_interleaved((float*)mOut.data, aIn, mOut.cols, mOut.rows, mOut.channels());
}
void convert_interleaved_to_layered(float *aOut, const float *aIn, int w, int h, int nc)
{
if (nc==1) { memcpy(aOut, aIn, w*h*sizeof(float)); return; }
size_t nOmega = (size_t)w*h;
for (int y=0; y<h; y++)
{
for (int x=0; x<w; x++)
{
for (int c=0; c<nc; c++)
{
aOut[x + (size_t)w*y + nOmega*c] = aIn[(nc-1-c) + nc*(x + (size_t)w*y)];
}
}
}
}
void convert_mat_to_layered(float *aOut, const cv::Mat &mIn)
{
convert_interleaved_to_layered(aOut, (float*)mIn.data, mIn.cols, mIn.rows, mIn.channels());
}
void showImage(string title, const cv::Mat &mat, int x, int y)
{
const char *wTitle = title.c_str();
cv::namedWindow(wTitle, CV_WINDOW_AUTOSIZE);
cvMoveWindow(wTitle, x, y);
cv::imshow(wTitle, mat);
}
// adding Gaussian noise
float noise(float sigma)
{
float x1 = (float)rand()/RAND_MAX;
float x2 = (float)rand()/RAND_MAX;
return sigma * sqrtf(-2*log(::max(x1,0.000001f)))*cosf(2*M_PI*x2);
}
void addNoise(cv::Mat &m, float sigma)
{
float *data = (float*)m.data;
int w = m.cols;
int h = m.rows;
int nc = m.channels();
size_t n = (size_t)w*h*nc;
for(size_t i=0; i<n; i++)
{
data[i] += noise(sigma);
}
}
// cuda error checking
string prev_file = "";
int prev_line = 0;
void cuda_check(string file, int line)
{
hipError_t e = hipGetLastError();
if (e != hipSuccess)
{
cout << endl << file << ", line " << line << ": " << hipGetErrorString(e) << " (" << e << ")" << endl;
if (prev_line>0) cout << "Previous CUDA call:" << endl << prev_file << ", line " << prev_line << endl;
exit(1);
}
prev_file = file;
prev_line = line;
}
// Color transformation
// source : http://linuxtv.org/downloads/v4l-dvb-apis/colorspaces.html
float clamp (double x)
{
float r = x; /* round to nearest */
if (r < 0.0) return 0.0f;
else if (r > 1.0) return 1.0f;
else return r;
}
void forward_color_transf( float *imgRGB, float *imgChrom, int w, int h, int nc )
{
//int ER, EG, EB; /* gamma corrected RGB input [0;255] */
//int Y1, Cb, Cr; /* output [0;255] */
double r, g, b; /* temporaries */
double y1, pb, pr;
if (nc != 1)
{
for( int i = 0; i < w; i++ ){
for( int j = 0; j < h; j++ ){
//for( int channel; channel < nc; channel++ ){
r = imgRGB[ i + j*w + w*h*0 ]; // r = ER / 255.0;
g = imgRGB[ i + j*w + w*h*1 ]; // g = EG / 255.0;
b = imgRGB[ i + j*w + w*h*2 ]; // b = EB / 255.0;
y1 = 0.299 * r + 0.587 * g + 0.114 * b;
pb = -0.169 * r - 0.331 * g + 0.5 * b;
pr = 0.5 * r - 0.419 * g - 0.081 * b;
imgChrom[i + j*w + w*h*0] = clamp ( ( 219 / 255.0 ) * y1 + ( 16 / 255.0 )); // Y1 = clamp (219 * y1 + 16);
imgChrom[i + j*w + w*h*1] = clamp ( ( 224 / 255.0 ) * pb + ( 128 / 255.0 )); // Cb = clamp (224 * pb + 128);
imgChrom[i + j*w + w*h*2] = clamp ( ( 224 / 255.0 ) * pr + ( 128 / 255.0 )); // Cr = clamp (224 * pr + 128);
}
}
}
else
{
return;
}
}
//Inverse Transformation
void inverse_color_transf( float *imgChrom, float *imgRGB, int w, int h, int nc )
{
//int Y1, Cb, Cr; /* gamma pre-corrected input [0;255] */
//int ER, EG, EB; /* output [0;255] */
double r, g, b; /* temporaries */
double y1, pb, pr;
if (nc != 1)
{
for( int i = 0; i < w; i++ ){
for( int j = 0; j < h; j++ ){
y1 = ( imgChrom[i + j*w + w*h*0] - ( 16 / 255.0 )) / ( 219 / 255.0 ); // y1 = (Y1 - 16) / 219.0;
pb = ( imgChrom[i + j*w + w*h*1] - ( 128 / 255.0 )) / ( 224 / 255.0 ); // pb = (Cb - 128) / 224.0;
pr = ( imgChrom[i + j*w + w*h*2] - ( 128 / 255.0 )) / ( 224 / 255.0 ); // pr = (Cr - 128) / 224.0;
r = 1.0 * y1 + 0 * pb + 1.402 * pr;
g = 1.0 * y1 - 0.344 * pb - 0.714 * pr;
b = 1.0 * y1 + 1.772 * pb + 0 * pr;
imgRGB[ i + j*w + w*h*0 ] = clamp (r); // ER = clamp (r * 255); /* [ok? one should prob. limit y1,pb,pr] */
imgRGB[ i + j*w + w*h*1 ] = clamp (g); // EG = clamp (g * 255);
imgRGB[ i + j*w + w*h*2 ] = clamp (b); // EB = clamp (b * 255);
}
}
}
else
{
return;
}
}
| 262b7d37e827e2d6580e398b15f1630a54a70650.cu | // ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2014, September 8 - October 10
// ###
// ###
// ### Maria Klodt, Jan Stuehmer, Mohamed Souiai, Thomas Moellenhoff
// ###
// ###
// ###
// ### THIS FILE IS SUPPOSED TO REMAIN UNCHANGED
// ###
// ###
#include "aux.h"
#include <cstdlib>
#include <iostream>
using std::stringstream;
using std::cerr;
using std::cout;
using std::endl;
using std::string;
// parameter processing: template specialization for T=bool
template<>
bool getParam<bool>(std::string param, bool &var, int argc, char **argv)
{
const char *c_param = param.c_str();
for(int i=argc-1; i>=1; i--)
{
if (argv[i][0]!='-') continue;
if (strcmp(argv[i]+1, c_param)==0)
{
if (!(i+1<argc) || argv[i+1][0]=='-') { var = true; return true; }
std::stringstream ss;
ss << argv[i+1];
ss >> var;
return (bool)ss;
}
}
return false;
}
// opencv helpers
void convert_layered_to_interleaved(float *aOut, const float *aIn, int w, int h, int nc)
{
if (nc==1) { memcpy(aOut, aIn, w*h*sizeof(float)); return; }
size_t nOmega = (size_t)w*h;
for (int y=0; y<h; y++)
{
for (int x=0; x<w; x++)
{
for (int c=0; c<nc; c++)
{
aOut[(nc-1-c) + nc*(x + (size_t)w*y)] = aIn[x + (size_t)w*y + nOmega*c];
}
}
}
}
void convert_layered_to_mat(cv::Mat &mOut, const float *aIn)
{
convert_layered_to_interleaved((float*)mOut.data, aIn, mOut.cols, mOut.rows, mOut.channels());
}
void convert_interleaved_to_layered(float *aOut, const float *aIn, int w, int h, int nc)
{
if (nc==1) { memcpy(aOut, aIn, w*h*sizeof(float)); return; }
size_t nOmega = (size_t)w*h;
for (int y=0; y<h; y++)
{
for (int x=0; x<w; x++)
{
for (int c=0; c<nc; c++)
{
aOut[x + (size_t)w*y + nOmega*c] = aIn[(nc-1-c) + nc*(x + (size_t)w*y)];
}
}
}
}
void convert_mat_to_layered(float *aOut, const cv::Mat &mIn)
{
convert_interleaved_to_layered(aOut, (float*)mIn.data, mIn.cols, mIn.rows, mIn.channels());
}
void showImage(string title, const cv::Mat &mat, int x, int y)
{
const char *wTitle = title.c_str();
cv::namedWindow(wTitle, CV_WINDOW_AUTOSIZE);
cvMoveWindow(wTitle, x, y);
cv::imshow(wTitle, mat);
}
// adding Gaussian noise
float noise(float sigma)
{
float x1 = (float)rand()/RAND_MAX;
float x2 = (float)rand()/RAND_MAX;
return sigma * sqrtf(-2*log(std::max(x1,0.000001f)))*cosf(2*M_PI*x2);
}
void addNoise(cv::Mat &m, float sigma)
{
float *data = (float*)m.data;
int w = m.cols;
int h = m.rows;
int nc = m.channels();
size_t n = (size_t)w*h*nc;
for(size_t i=0; i<n; i++)
{
data[i] += noise(sigma);
}
}
// cuda error checking
string prev_file = "";
int prev_line = 0;
void cuda_check(string file, int line)
{
cudaError_t e = cudaGetLastError();
if (e != cudaSuccess)
{
cout << endl << file << ", line " << line << ": " << cudaGetErrorString(e) << " (" << e << ")" << endl;
if (prev_line>0) cout << "Previous CUDA call:" << endl << prev_file << ", line " << prev_line << endl;
exit(1);
}
prev_file = file;
prev_line = line;
}
// Color transformation
// source : http://linuxtv.org/downloads/v4l-dvb-apis/colorspaces.html
float clamp (double x)
{
float r = x; /* round to nearest */
if (r < 0.0) return 0.0f;
else if (r > 1.0) return 1.0f;
else return r;
}
void forward_color_transf( float *imgRGB, float *imgChrom, int w, int h, int nc )
{
//int ER, EG, EB; /* gamma corrected RGB input [0;255] */
//int Y1, Cb, Cr; /* output [0;255] */
double r, g, b; /* temporaries */
double y1, pb, pr;
if (nc != 1)
{
for( int i = 0; i < w; i++ ){
for( int j = 0; j < h; j++ ){
//for( int channel; channel < nc; channel++ ){
r = imgRGB[ i + j*w + w*h*0 ]; // r = ER / 255.0;
g = imgRGB[ i + j*w + w*h*1 ]; // g = EG / 255.0;
b = imgRGB[ i + j*w + w*h*2 ]; // b = EB / 255.0;
y1 = 0.299 * r + 0.587 * g + 0.114 * b;
pb = -0.169 * r - 0.331 * g + 0.5 * b;
pr = 0.5 * r - 0.419 * g - 0.081 * b;
imgChrom[i + j*w + w*h*0] = clamp ( ( 219 / 255.0 ) * y1 + ( 16 / 255.0 )); // Y1 = clamp (219 * y1 + 16);
imgChrom[i + j*w + w*h*1] = clamp ( ( 224 / 255.0 ) * pb + ( 128 / 255.0 )); // Cb = clamp (224 * pb + 128);
imgChrom[i + j*w + w*h*2] = clamp ( ( 224 / 255.0 ) * pr + ( 128 / 255.0 )); // Cr = clamp (224 * pr + 128);
}
}
}
else
{
return;
}
}
//Inverse Transformation
void inverse_color_transf( float *imgChrom, float *imgRGB, int w, int h, int nc )
{
//int Y1, Cb, Cr; /* gamma pre-corrected input [0;255] */
//int ER, EG, EB; /* output [0;255] */
double r, g, b; /* temporaries */
double y1, pb, pr;
if (nc != 1)
{
for( int i = 0; i < w; i++ ){
for( int j = 0; j < h; j++ ){
y1 = ( imgChrom[i + j*w + w*h*0] - ( 16 / 255.0 )) / ( 219 / 255.0 ); // y1 = (Y1 - 16) / 219.0;
pb = ( imgChrom[i + j*w + w*h*1] - ( 128 / 255.0 )) / ( 224 / 255.0 ); // pb = (Cb - 128) / 224.0;
pr = ( imgChrom[i + j*w + w*h*2] - ( 128 / 255.0 )) / ( 224 / 255.0 ); // pr = (Cr - 128) / 224.0;
r = 1.0 * y1 + 0 * pb + 1.402 * pr;
g = 1.0 * y1 - 0.344 * pb - 0.714 * pr;
b = 1.0 * y1 + 1.772 * pb + 0 * pr;
imgRGB[ i + j*w + w*h*0 ] = clamp (r); // ER = clamp (r * 255); /* [ok? one should prob. limit y1,pb,pr] */
imgRGB[ i + j*w + w*h*1 ] = clamp (g); // EG = clamp (g * 255);
imgRGB[ i + j*w + w*h*2 ] = clamp (b); // EB = clamp (b * 255);
}
}
}
else
{
return;
}
}
|
34e0acb3ea90b9d6b021236a2822653b8b1b803a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <vector>
using std::vector;
// parameter describing the size of matrix A
const int rows = 4096;
const int cols = 4096;
const int BLOCK_SIZE = 32;
// naive transpose kernel
__global__ void matrixTransposeNaive(float *_a, // pointer to matrix A on the device
float *_b) // pointer to matrix B on the device
{
int i = blockIdx.x * blockDim.x + threadIdx.x; // row
int j = blockIdx.y * blockDim.y + threadIdx.y; // col
int index_in = i*cols+j; // (i,j) from matrix A
int index_out = j*rows+i; // becomes (j,i) in matrix B = transpose(A)
_b[index_out] = _a[index_in];
}
// coalesced memory transpose kernel
__global__ void matrixTransposeShared(float *_a, // pointer to matrix A on the device
float *_b) // pointer to matrix B on the device
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index_in = j*cols+i; // (i,j) from matrix A
// this thread fills in the appropriate box inside the shared memory in this block
__shared__ float tile[BLOCK_SIZE][BLOCK_SIZE];
tile [ threadIdx.x ] [ threadIdx.y ] = _a [index_in];
// wait until all threads in this block are done writing to shared memory in parallel
__syncthreads();
i = blockIdx.y * blockDim.x + threadIdx.x;
j = blockIdx.x * blockDim.y + threadIdx.y;
int index_out = j*rows+i; // (i,j) from matrix A becomes (j,i) in matrix B = transpose(A)
_b[index_out] = tile[ threadIdx.y ] [ threadIdx.x ];
}
// coalesced memory transpose kernel without banking conflicts
__global__ void matrixTransposeNoBankConflicts(float *_a, // pointer to matrix A on the device
float *_b) // pointer to matrix B on the device
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index_in = j*cols+i; // (i,j) from matrix A
// this thread fills in the appropriate box inside the shared memory in this block
__shared__ float tile[BLOCK_SIZE][BLOCK_SIZE+1];
tile [ threadIdx.x ] [ threadIdx.y ] = _a [index_in];
i = blockIdx.y * blockDim.x + threadIdx.x;
j = blockIdx.x * blockDim.y + threadIdx.y;
int index_out = j*rows+i; // (i,j) from matrix A becomes (j,i) in matrix B = transpose(A)
// wait until all threads in this block are done writing to shared memory in parallel
__syncthreads();
_b[index_out] = tile[ threadIdx.y ] [ threadIdx.x ];
}
// the main program starts life on the CPU and calls device kernels as required
int main(int argc, char *argv[])
{
// allocate space in the host for storing input arrays (a and b) and the output array (c)
vector<float> a(rows*cols);
vector<float> b(rows*cols);
// define device pointers for the same arrays when they'll be copied to the device
float *_a, *_b;
// allocate memory on the device (GPU) and check for errors (if any) during this call
hipError_t err;
// allocate space for matrix A
if (err = hipMalloc((void **) &_a, rows*cols*sizeof(float))) {
printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// allocate space for matrix B
if (err = hipMalloc((void **) &_b, rows*cols*sizeof(float))) {
printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// Fill matrix A
for (int row = 0; row < rows; row++) {
for (int col = 0; col < cols; col++) {
a[row + col*rows] = row + col*rows;
}
}
// Copy array contents of A from the host (CPU) to the device (GPU)
// Note that this is copied to the "global" memory on the device and is accessible to all threads in all blocks
hipMemcpy(_a, a.data(), rows*cols*sizeof(float), hipMemcpyHostToDevice);
// assign a 2D distribution of 16 x 16 x 1 CUDA "threads" within each CUDA "block"
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1);
// calculate number of blocks along X and Y in a 2D CUDA "grid"
dim3 dimGrid( ceil(float(rows)/float(dimBlock.x)), ceil(float(cols)/float(dimBlock.y)), 1 );
float time;
// create CUDA events
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// start the timer
hipEventRecord( start, 0);
// launch the GPU kernel
// hipMemcpy(_b, _a, cols*rows*sizeof(float), hipMemcpyDeviceToDevice);
// matrixTransposeNaive<<<dimGrid,dimBlock>>>(_a, _b);
// matrixTransposeShared<<<dimGrid,dimBlock>>>(_a, _b);
hipLaunchKernelGGL(( matrixTransposeNoBankConflicts), dim3(dimGrid),dim3(dimBlock), 0, 0, _a, _b);
// stop the timer
hipEventRecord( stop, 0);
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop);
// print out the time required for the kernel to finish the transpose operation
double data = 2.0 * (rows * cols * sizeof(float)) / (1024 * 1024 * 1024);
printf("data %f \n", data);
printf("time %f \n", time/1000);
double Bandwidth = data/(time/1000);
printf("Elapsed Time = %f ms Bandwidth achieved (GB/s) = %f\n", time, Bandwidth);
// copy the answer back to the host (CPU) from the device (GPU)
hipMemcpy(b.data(), _b, cols*rows*sizeof(float), hipMemcpyDeviceToHost);
// for(int i = 0; i < 64; i++) {
// for(int j = 0; j < 64; j++) {
// printf("%f ", b[i * rows + j]);
// }
// printf("\n");
// }
// free device memory
hipFree(_a);
hipFree(_b);
// successful program termination
return 0;
}
| 34e0acb3ea90b9d6b021236a2822653b8b1b803a.cu | #include <cstdio>
#include <vector>
using std::vector;
// parameter describing the size of matrix A
const int rows = 4096;
const int cols = 4096;
const int BLOCK_SIZE = 32;
// naive transpose kernel
__global__ void matrixTransposeNaive(float *_a, // pointer to matrix A on the device
float *_b) // pointer to matrix B on the device
{
int i = blockIdx.x * blockDim.x + threadIdx.x; // row
int j = blockIdx.y * blockDim.y + threadIdx.y; // col
int index_in = i*cols+j; // (i,j) from matrix A
int index_out = j*rows+i; // becomes (j,i) in matrix B = transpose(A)
_b[index_out] = _a[index_in];
}
// coalesced memory transpose kernel
__global__ void matrixTransposeShared(float *_a, // pointer to matrix A on the device
float *_b) // pointer to matrix B on the device
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index_in = j*cols+i; // (i,j) from matrix A
// this thread fills in the appropriate box inside the shared memory in this block
__shared__ float tile[BLOCK_SIZE][BLOCK_SIZE];
tile [ threadIdx.x ] [ threadIdx.y ] = _a [index_in];
// wait until all threads in this block are done writing to shared memory in parallel
__syncthreads();
i = blockIdx.y * blockDim.x + threadIdx.x;
j = blockIdx.x * blockDim.y + threadIdx.y;
int index_out = j*rows+i; // (i,j) from matrix A becomes (j,i) in matrix B = transpose(A)
_b[index_out] = tile[ threadIdx.y ] [ threadIdx.x ];
}
// coalesced memory transpose kernel without banking conflicts
__global__ void matrixTransposeNoBankConflicts(float *_a, // pointer to matrix A on the device
float *_b) // pointer to matrix B on the device
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index_in = j*cols+i; // (i,j) from matrix A
// this thread fills in the appropriate box inside the shared memory in this block
__shared__ float tile[BLOCK_SIZE][BLOCK_SIZE+1];
tile [ threadIdx.x ] [ threadIdx.y ] = _a [index_in];
i = blockIdx.y * blockDim.x + threadIdx.x;
j = blockIdx.x * blockDim.y + threadIdx.y;
int index_out = j*rows+i; // (i,j) from matrix A becomes (j,i) in matrix B = transpose(A)
// wait until all threads in this block are done writing to shared memory in parallel
__syncthreads();
_b[index_out] = tile[ threadIdx.y ] [ threadIdx.x ];
}
// the main program starts life on the CPU and calls device kernels as required
int main(int argc, char *argv[])
{
// allocate space in the host for storing input arrays (a and b) and the output array (c)
vector<float> a(rows*cols);
vector<float> b(rows*cols);
// define device pointers for the same arrays when they'll be copied to the device
float *_a, *_b;
// allocate memory on the device (GPU) and check for errors (if any) during this call
cudaError_t err;
// allocate space for matrix A
if (err = cudaMalloc((void **) &_a, rows*cols*sizeof(float))) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// allocate space for matrix B
if (err = cudaMalloc((void **) &_b, rows*cols*sizeof(float))) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// Fill matrix A
for (int row = 0; row < rows; row++) {
for (int col = 0; col < cols; col++) {
a[row + col*rows] = row + col*rows;
}
}
// Copy array contents of A from the host (CPU) to the device (GPU)
// Note that this is copied to the "global" memory on the device and is accessible to all threads in all blocks
cudaMemcpy(_a, a.data(), rows*cols*sizeof(float), cudaMemcpyHostToDevice);
// assign a 2D distribution of 16 x 16 x 1 CUDA "threads" within each CUDA "block"
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1);
// calculate number of blocks along X and Y in a 2D CUDA "grid"
dim3 dimGrid( ceil(float(rows)/float(dimBlock.x)), ceil(float(cols)/float(dimBlock.y)), 1 );
float time;
// create CUDA events
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// start the timer
cudaEventRecord( start, 0);
// launch the GPU kernel
// cudaMemcpy(_b, _a, cols*rows*sizeof(float), cudaMemcpyDeviceToDevice);
// matrixTransposeNaive<<<dimGrid,dimBlock>>>(_a, _b);
// matrixTransposeShared<<<dimGrid,dimBlock>>>(_a, _b);
matrixTransposeNoBankConflicts<<<dimGrid,dimBlock>>>(_a, _b);
// stop the timer
cudaEventRecord( stop, 0);
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop);
// print out the time required for the kernel to finish the transpose operation
double data = 2.0 * (rows * cols * sizeof(float)) / (1024 * 1024 * 1024);
printf("data %f \n", data);
printf("time %f \n", time/1000);
double Bandwidth = data/(time/1000);
printf("Elapsed Time = %f ms Bandwidth achieved (GB/s) = %f\n", time, Bandwidth);
// copy the answer back to the host (CPU) from the device (GPU)
cudaMemcpy(b.data(), _b, cols*rows*sizeof(float), cudaMemcpyDeviceToHost);
// for(int i = 0; i < 64; i++) {
// for(int j = 0; j < 64; j++) {
// printf("%f ", b[i * rows + j]);
// }
// printf("\n");
// }
// free device memory
cudaFree(_a);
cudaFree(_b);
// successful program termination
return 0;
}
|
be7519778e8d065c85f1e1499f22bfb47499addc.hip | // !!! This is a file automatically generated by hipify!!!
#include<stdio.h>
#include<cuda.h>
#include<cuda_runtime.h>
#include"io.h"
#include"cuda_mpi_routines.h"
/*! \fn int initialize_cuda_mpi(int myid, int nprocs);
* \brief CUDA initialization within MPI. */
int initialize_cuda_mpi(int myid, int nprocs)
{
int i_device = 0; //GPU device for this process
int n_device; //number of GPU devices available
hipError_t flag_error;
//get the number of cuda devices
flag_error = hipGetDeviceCount(&n_device);
//check for errors
if(flag_error!=hipSuccess)
{
if(flag_error==hipErrorNoDevice)
fprintf(stderr,"hipGetDeviceCount: Error! for myid = %d and n_device = %d; hipErrorNoDevice\n",myid,n_device);
if(flag_error==hipErrorInsufficientDriver)
fprintf(stderr,"hipGetDeviceCount: Error! for myid = %d and n_device = %d; hipErrorInsufficientDriver\n",myid,n_device);
fflush(stderr);
return 1;
}
//set a cuda device for each process
hipSetDevice(myid%n_device);
//double check
hipGetDevice(&i_device);
// printf("In initialize_cuda_mpi: myid = %d, i_device = %d, n_device = %d\n",myid,i_device,n_device);
// fflush(stdout);
return 0;
}
| be7519778e8d065c85f1e1499f22bfb47499addc.cu | #include<stdio.h>
#include<cuda.h>
#include<cuda_runtime.h>
#include"io.h"
#include"cuda_mpi_routines.h"
/*! \fn int initialize_cuda_mpi(int myid, int nprocs);
* \brief CUDA initialization within MPI. */
int initialize_cuda_mpi(int myid, int nprocs)
{
int i_device = 0; //GPU device for this process
int n_device; //number of GPU devices available
cudaError_t flag_error;
//get the number of cuda devices
flag_error = cudaGetDeviceCount(&n_device);
//check for errors
if(flag_error!=cudaSuccess)
{
if(flag_error==cudaErrorNoDevice)
fprintf(stderr,"cudaGetDeviceCount: Error! for myid = %d and n_device = %d; cudaErrorNoDevice\n",myid,n_device);
if(flag_error==cudaErrorInsufficientDriver)
fprintf(stderr,"cudaGetDeviceCount: Error! for myid = %d and n_device = %d; cudaErrorInsufficientDriver\n",myid,n_device);
fflush(stderr);
return 1;
}
//set a cuda device for each process
cudaSetDevice(myid%n_device);
//double check
cudaGetDevice(&i_device);
// printf("In initialize_cuda_mpi: myid = %d, i_device = %d, n_device = %d\n",myid,i_device,n_device);
// fflush(stdout);
return 0;
}
|
3c8a9a074837cd94a14665d3b15b08f377d89039.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<bits/stdc++.h>
#include<cuda.h>
using namespace std;
struct edge{
int u, v, c, f;
};
// pushes flow along the edges adjacent to a vertex, concurrently for all vertices
__global__ void push(int n, int* excess, int* excess_inc, int* prefix_deg, int* adj, int* height, int* new_height,
edge* d_edges){
int id = blockIdx.x * blockDim.x + threadIdx.x;
// push flow only for intermediate vertices (non-terminals)
if(id > 0 && id < n - 1){
new_height[id] = 3 * n;
for(int i = prefix_deg[id]; i < prefix_deg[id + 1] && excess[id]; i++){
int idx = adj[i];
int u = d_edges[idx].u, v = d_edges[idx].v, c = d_edges[idx].c, f = d_edges[idx].f;
// pushes flow along forward edge
if(u == id && f < c && height[u] > height[v]){
int push_flow = min(c - f, excess[u]);
atomicAdd(excess_inc + v, push_flow);
atomicAdd(&(d_edges[idx].f), push_flow);
excess[u] -= push_flow;
}
// pushes flow along reverse edge
if(v == id && f && height[v] > height[u]){
int push_flow = min(f, excess[v]);
atomicAdd(excess_inc + u, push_flow);
atomicAdd(&(d_edges[idx].f), -push_flow);
excess[v] -= push_flow;
}
}
}
}
// computes labels (out of place)
__global__ void compute_label(int n, int m, int* excess, int* excess_inc, int* height, int* new_height, edge* d_edges){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < m){
int u = d_edges[id].u, v = d_edges[id].v, c = d_edges[id].c, f = d_edges[id].f;
if(u > 0 && u < n - 1 && (excess[u] || excess_inc[u]) && f < c)
atomicMin(new_height + u, height[v] + 1);
if(v > 0 && v < n - 1 && (excess[v] || excess_inc[v]) && f)
atomicMin(new_height + v, height[u] + 1);
}
}
// applies the labels found in computer_label and updates excess of each vertex
__global__ void relabel(int n, int* excess, int* excess_inc, int* height, int* new_height, int* is_excess){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id > 0 && id < n - 1){
if(new_height[id] != 3 * n)
height[id] = new_height[id];
excess[id] += excess_inc[id];
excess_inc[id] = 0;
if(excess[id])
atomicAdd(is_excess, 1);
}
}
// computes the flow out of source
__global__ void compute_maxflow(int m, int* total_flow, edge* d_edges)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < m){
int u = d_edges[id].u, v = d_edges[id].v, f = d_edges[id].f;
if(!u) atomicAdd(total_flow, f);
if(!v) atomicAdd(total_flow, -f);
}
}
// global relabeling heuristic - performs BFS from sink to find lower bound on labels
__global__ void global_label(int m, int cur_wave, int* height, int* wave, int* progress, edge* d_edges)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < m){
int u = d_edges[id].u, v = d_edges[id].v, c = d_edges[id].c, f = d_edges[id].f;
if(wave[v] == cur_wave && f < c && atomicCAS(wave + u, -1, cur_wave + 1) == -1){
height[u] = height[v] + 1;
atomicAdd(progress, 1);
}
if(wave[u] == cur_wave && f && atomicCAS(wave + v, -1, cur_wave + 1) == -1){
height[v]=height[u] + 1;
atomicAdd(progress, 1);
}
}
}
int main(int argc, char* argv[]){
auto clk=clock();
if(argc < 3){
cout<<"Enter file name (graph) and global relabel heuristic flag (0 or 1)"<<endl;
return 0;
}
int n, m;
edge *edges, *d_edges;
int *excess, *d_excess, *prefix_deg, *d_prefix_deg, *adj, *d_adj, *d_excess_inc, *d_is_excess, *height, *new_height,
*d_total_flow, *progress, *wave;
ifstream fin(argv[1]);
int global_relabel_flag = atoi(argv[2]);
fin >> n >> m;
vector<int> deg(n);
vector<vector<int>> edge_idx(n);
edges = new edge[m];
for(int i = 0; i < m; i++){
fin >> edges[i].u >> edges[i].v >> edges[i].c;
edges[i].u--;
edges[i].v--;
deg[edges[i].u]++;
deg[edges[i].v]++;
edge_idx[edges[i].u].push_back(i);
edge_idx[edges[i].v].push_back(i);
edges[i].f = 0;
}
int* reset_height = new int[n];
for(int i = 0; i < n;i++)
reset_height[i] = n;
excess = (int*) malloc(n * sizeof(int));
memset(excess, 0, n * sizeof(int));
for(int i: edge_idx[0]){
int u = edges[i].u, v = edges[i].v, c = edges[i].c;
if(!u){
edges[i].f = edges[i].c;
excess[v] += c;
}
}
prefix_deg = (int*) malloc((n + 1) * sizeof(int));
prefix_deg[0] = 0;
for(int i = 1; i <= n; i++){
prefix_deg[i] = prefix_deg[i-1] + deg[i-1];
}
adj = (int*) malloc(2 * m * sizeof(int));
for(int i = 0, c = 0; i < n ; i++){
for(int j = 0; j < edge_idx[i].size(); j++, c++){
adj[c] = edge_idx[i][j];
}
}
hipMalloc(&d_excess, n * sizeof(int));
hipMalloc(&d_excess_inc, n * sizeof(int));
hipMalloc(&progress, sizeof(int));
hipMalloc(&wave, n * sizeof(int));
hipMalloc(&d_prefix_deg, (n + 1) * sizeof(int));
hipMalloc(&d_adj, 2 * m * sizeof(int));
hipMalloc(&height, n * sizeof(int));
hipMalloc(&new_height, n * sizeof(int));
hipMalloc(&d_edges, m * sizeof(edge));
hipMalloc(&d_is_excess, sizeof(int));
hipMemset(height, 0, n * sizeof(int));
hipMemset(d_excess_inc, 0, n * sizeof(int));
hipMemcpy(d_excess, excess, n * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_prefix_deg, prefix_deg, (n + 1) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_adj, adj, 2 * m * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(height, &n, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_edges, edges, m * sizeof(edge), hipMemcpyHostToDevice);
int threads = 1024;
int m_blocks = ceil((float)m/threads);
int n_blocks = ceil((float)n/threads);
int total_flow = 0;
int is_excess;
int iter = 0,prog;
// loop to push flow along edges
// if there is no excess vertex, the loop breaks
do{
iter++;
hipLaunchKernelGGL(( push), dim3(n_blocks), dim3(threads), 0, 0, n, d_excess, d_excess_inc, d_prefix_deg, d_adj, height, new_height, d_edges);
hipLaunchKernelGGL(( compute_label), dim3(m_blocks), dim3(threads), 0, 0, n, m, d_excess, d_excess_inc, height, new_height, d_edges);
hipMemset(d_is_excess, 0, sizeof(int));
hipLaunchKernelGGL(( relabel), dim3(n_blocks), dim3(threads), 0, 0, n, d_excess, d_excess_inc, height, new_height, d_is_excess);
hipMemcpy(&is_excess, d_is_excess, sizeof(int), hipMemcpyDeviceToHost);
// applies global relabeling every n iterations
if(global_relabel_flag && iter % n == 0){ // perform global relabeling heuristic
hipMemset(wave, -1, n * sizeof(int));
hipMemset(wave + (n - 1) , 0, sizeof(int));
int cur_wave = 0;
do{
hipMemset(progress, 0, sizeof(int));
hipLaunchKernelGGL(( global_label), dim3(m_blocks), dim3(threads), 0, 0, m, cur_wave++, height, wave, progress, d_edges);
hipMemcpy(&prog, progress, sizeof(int), hipMemcpyDeviceToHost);
}while(prog);
}
}while(is_excess);
hipMalloc(&d_total_flow, sizeof(int));
hipMemset(d_total_flow, 0, sizeof(int));
hipLaunchKernelGGL(( compute_maxflow), dim3(m_blocks), dim3(threads), 0, 0, m, d_total_flow, d_edges);
hipMemcpy(&total_flow, d_total_flow, sizeof(int), hipMemcpyDeviceToHost);
double t_elapsed = (double)(clock()-clk)/CLOCKS_PER_SEC;
printf("|V|:%d |E|:%d Flow:%d\nTime:%f\n", n, m, total_flow, t_elapsed);
} | 3c8a9a074837cd94a14665d3b15b08f377d89039.cu | #include<bits/stdc++.h>
#include<cuda.h>
using namespace std;
struct edge{
int u, v, c, f;
};
// pushes flow along the edges adjacent to a vertex, concurrently for all vertices
__global__ void push(int n, int* excess, int* excess_inc, int* prefix_deg, int* adj, int* height, int* new_height,
edge* d_edges){
int id = blockIdx.x * blockDim.x + threadIdx.x;
// push flow only for intermediate vertices (non-terminals)
if(id > 0 && id < n - 1){
new_height[id] = 3 * n;
for(int i = prefix_deg[id]; i < prefix_deg[id + 1] && excess[id]; i++){
int idx = adj[i];
int u = d_edges[idx].u, v = d_edges[idx].v, c = d_edges[idx].c, f = d_edges[idx].f;
// pushes flow along forward edge
if(u == id && f < c && height[u] > height[v]){
int push_flow = min(c - f, excess[u]);
atomicAdd(excess_inc + v, push_flow);
atomicAdd(&(d_edges[idx].f), push_flow);
excess[u] -= push_flow;
}
// pushes flow along reverse edge
if(v == id && f && height[v] > height[u]){
int push_flow = min(f, excess[v]);
atomicAdd(excess_inc + u, push_flow);
atomicAdd(&(d_edges[idx].f), -push_flow);
excess[v] -= push_flow;
}
}
}
}
// computes labels (out of place)
__global__ void compute_label(int n, int m, int* excess, int* excess_inc, int* height, int* new_height, edge* d_edges){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < m){
int u = d_edges[id].u, v = d_edges[id].v, c = d_edges[id].c, f = d_edges[id].f;
if(u > 0 && u < n - 1 && (excess[u] || excess_inc[u]) && f < c)
atomicMin(new_height + u, height[v] + 1);
if(v > 0 && v < n - 1 && (excess[v] || excess_inc[v]) && f)
atomicMin(new_height + v, height[u] + 1);
}
}
// applies the labels found in computer_label and updates excess of each vertex
__global__ void relabel(int n, int* excess, int* excess_inc, int* height, int* new_height, int* is_excess){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id > 0 && id < n - 1){
if(new_height[id] != 3 * n)
height[id] = new_height[id];
excess[id] += excess_inc[id];
excess_inc[id] = 0;
if(excess[id])
atomicAdd(is_excess, 1);
}
}
// computes the flow out of source
__global__ void compute_maxflow(int m, int* total_flow, edge* d_edges)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < m){
int u = d_edges[id].u, v = d_edges[id].v, f = d_edges[id].f;
if(!u) atomicAdd(total_flow, f);
if(!v) atomicAdd(total_flow, -f);
}
}
// global relabeling heuristic - performs BFS from sink to find lower bound on labels
__global__ void global_label(int m, int cur_wave, int* height, int* wave, int* progress, edge* d_edges)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < m){
int u = d_edges[id].u, v = d_edges[id].v, c = d_edges[id].c, f = d_edges[id].f;
if(wave[v] == cur_wave && f < c && atomicCAS(wave + u, -1, cur_wave + 1) == -1){
height[u] = height[v] + 1;
atomicAdd(progress, 1);
}
if(wave[u] == cur_wave && f && atomicCAS(wave + v, -1, cur_wave + 1) == -1){
height[v]=height[u] + 1;
atomicAdd(progress, 1);
}
}
}
int main(int argc, char* argv[]){
auto clk=clock();
if(argc < 3){
cout<<"Enter file name (graph) and global relabel heuristic flag (0 or 1)"<<endl;
return 0;
}
int n, m;
edge *edges, *d_edges;
int *excess, *d_excess, *prefix_deg, *d_prefix_deg, *adj, *d_adj, *d_excess_inc, *d_is_excess, *height, *new_height,
*d_total_flow, *progress, *wave;
ifstream fin(argv[1]);
int global_relabel_flag = atoi(argv[2]);
fin >> n >> m;
vector<int> deg(n);
vector<vector<int>> edge_idx(n);
edges = new edge[m];
for(int i = 0; i < m; i++){
fin >> edges[i].u >> edges[i].v >> edges[i].c;
edges[i].u--;
edges[i].v--;
deg[edges[i].u]++;
deg[edges[i].v]++;
edge_idx[edges[i].u].push_back(i);
edge_idx[edges[i].v].push_back(i);
edges[i].f = 0;
}
int* reset_height = new int[n];
for(int i = 0; i < n;i++)
reset_height[i] = n;
excess = (int*) malloc(n * sizeof(int));
memset(excess, 0, n * sizeof(int));
for(int i: edge_idx[0]){
int u = edges[i].u, v = edges[i].v, c = edges[i].c;
if(!u){
edges[i].f = edges[i].c;
excess[v] += c;
}
}
prefix_deg = (int*) malloc((n + 1) * sizeof(int));
prefix_deg[0] = 0;
for(int i = 1; i <= n; i++){
prefix_deg[i] = prefix_deg[i-1] + deg[i-1];
}
adj = (int*) malloc(2 * m * sizeof(int));
for(int i = 0, c = 0; i < n ; i++){
for(int j = 0; j < edge_idx[i].size(); j++, c++){
adj[c] = edge_idx[i][j];
}
}
cudaMalloc(&d_excess, n * sizeof(int));
cudaMalloc(&d_excess_inc, n * sizeof(int));
cudaMalloc(&progress, sizeof(int));
cudaMalloc(&wave, n * sizeof(int));
cudaMalloc(&d_prefix_deg, (n + 1) * sizeof(int));
cudaMalloc(&d_adj, 2 * m * sizeof(int));
cudaMalloc(&height, n * sizeof(int));
cudaMalloc(&new_height, n * sizeof(int));
cudaMalloc(&d_edges, m * sizeof(edge));
cudaMalloc(&d_is_excess, sizeof(int));
cudaMemset(height, 0, n * sizeof(int));
cudaMemset(d_excess_inc, 0, n * sizeof(int));
cudaMemcpy(d_excess, excess, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_prefix_deg, prefix_deg, (n + 1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_adj, adj, 2 * m * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(height, &n, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_edges, edges, m * sizeof(edge), cudaMemcpyHostToDevice);
int threads = 1024;
int m_blocks = ceil((float)m/threads);
int n_blocks = ceil((float)n/threads);
int total_flow = 0;
int is_excess;
int iter = 0,prog;
// loop to push flow along edges
// if there is no excess vertex, the loop breaks
do{
iter++;
push<<<n_blocks, threads>>>(n, d_excess, d_excess_inc, d_prefix_deg, d_adj, height, new_height, d_edges);
compute_label<<<m_blocks, threads>>>(n, m, d_excess, d_excess_inc, height, new_height, d_edges);
cudaMemset(d_is_excess, 0, sizeof(int));
relabel<<<n_blocks, threads>>>(n, d_excess, d_excess_inc, height, new_height, d_is_excess);
cudaMemcpy(&is_excess, d_is_excess, sizeof(int), cudaMemcpyDeviceToHost);
// applies global relabeling every n iterations
if(global_relabel_flag && iter % n == 0){ // perform global relabeling heuristic
cudaMemset(wave, -1, n * sizeof(int));
cudaMemset(wave + (n - 1) , 0, sizeof(int));
int cur_wave = 0;
do{
cudaMemset(progress, 0, sizeof(int));
global_label<<<m_blocks, threads>>>(m, cur_wave++, height, wave, progress, d_edges);
cudaMemcpy(&prog, progress, sizeof(int), cudaMemcpyDeviceToHost);
}while(prog);
}
}while(is_excess);
cudaMalloc(&d_total_flow, sizeof(int));
cudaMemset(d_total_flow, 0, sizeof(int));
compute_maxflow<<<m_blocks, threads>>>(m, d_total_flow, d_edges);
cudaMemcpy(&total_flow, d_total_flow, sizeof(int), cudaMemcpyDeviceToHost);
double t_elapsed = (double)(clock()-clk)/CLOCKS_PER_SEC;
printf("|V|:%d |E|:%d Flow:%d\nTime:%f\n", n, m, total_flow, t_elapsed);
} |
c77b40b27c2fcc86306ce71e39acf14cedddf073.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
//#include <hip/hip_cooperative_groups.h>
#include <math.h>
#include <sstream>
#include <string.h>
#include <fstream>
//#include <bits/stdc++.h>
//#include <stdlib.h>
//# <time.h>
using namespace std;
//using namespace cooperative_groups;
/***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/
//#define N 128
#define C 3
#define H 227
#define W 227
#define R 11
#define S 11
#define M 96
#define E 55
#define F 55
#define U 4
__global__ void red_ch(float* d_r, float* d_o, int num_ch, int num_img, int num_wt, int height, int width)
{
int row = threadIdx.y; int col = threadIdx.x;
for (int y=0;y<2;y++){
for (int x=0;x<2;x++){
float red_sum = 0;
for(int i=0; i<num_ch; i++)
{ //if((2*row+y<height)&&(2*col+x<width))
red_sum += d_o[i*(num_wt*num_img*height*width)+blockIdx.x*num_wt*height*width+blockIdx.y*height*width+(row+y*blockDim.y)*width+(col+x*blockDim.x)] ;
}
if((row+y*blockDim.y<height)&&(col+x*blockDim.x<width))
d_r[blockIdx.x*num_wt*height*width+blockIdx.y*height*width+(row+y*blockDim.y)*width+(col+x*blockDim.x)] = red_sum;
}
}}
__global__
void ew_gpu_mmul(float* d_o, float* d_i, float* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch)
{//float prod=0;
int row = threadIdx.y; int col = threadIdx.x;
__shared__ float s_w[R*S];
if(row*blockDim.x+col<R*S)
{
s_w[row*blockDim.x+col] = d_w[blockIdx.y*num_ch*wt_width*wt_width+blockIdx.z*wt_width*wt_width+(row*blockDim.x+col)];
}
__syncthreads();
for (int y=0; y<2; y++){
for (int x=0; x<2; x++){
float prod = 0;
for (int i=0; i<wt_width; i++){
float3 wt1 = *((float3*)(s_w+i*wt_width)); float3 ip1 = *((float3*)(d_i+blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+(stride*(row+y*blockDim.y)+i)*ip_height+stride*(col+x*blockDim.x)));
float3 wt2 = *((float3*)(s_w+i*wt_width+3));float3 ip2 = *((float3*)(d_i+blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+(stride*(row+y*blockDim.y)+i)*ip_height+3+stride*(col+x*blockDim.x)));
float3 wt3 = *((float3*)(s_w+i*wt_width+6));float3 ip3 = *((float3*)(d_i+blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+(stride*(row+y*blockDim.y)+i)*ip_height+6+stride*(col+x*blockDim.x)));
float3 wt4 = *((float3*)(s_w+i*wt_width+9));float3 ip4 = *((float3*)(d_i+blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+(stride*(row+y*blockDim.y)+i)*ip_height+9+stride*(col+x*blockDim.x)));
prod += ip1.x*wt1.x+ip1.y*wt1.y+ip1.z*wt1.z+ip2.x*wt2.x+ip2.y*wt2.y+ip2.z*wt2.z+ip3.x*wt3.x+ip3.y*wt3.y+ip3.z*wt3.z+ip4.x*wt4.x+ip4.y*wt4.y;
__syncthreads();
}
if((row+y*blockDim.y<height)&&(col+x*blockDim.x<width))
{if (prod>=0)
d_o[0*(num_wt*num_img*height*width)+blockIdx.x*num_wt*height*width+blockIdx.y*height*width+(row+y*blockDim.y)*width+(col+x*blockDim.x)] += prod;
}
}
}
if(row*blockDim.x+col < R*S)
{
s_w[(row*blockDim.x+col)] = 0;
}
__syncthreads();
}
void element_wise_mmul(float* output, float* input, float* weight, int batch_size)
{
int x,y,i,j,m,n,k;
for(n=0; n<batch_size; n++){
for (m=0 ; m<M; m++){
for (x=0; x<F; x++){
for(y=0; y<E; y++){
// OP[x][y] = 0; // adding bias to output
for (i=0; i<R; i++){
for (j=0; j<S; j++){
for(k=0; k<C; k++){
float ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)];
float wt = weight[m*C*R*S+k*R*S+i*S+j];
float prod = ip*wt;
if(prod>=0)
output[n*E*F*M+m*E*F+x*E+y] += prod;
//OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j];
}}
}
}
}
}
}
}
int main(int argc, char* argv[])
{
int batch_size = atoi(argv[1]);
/*************INITALIZING MATRICES*********************************/
float *IP = (float*) malloc(batch_size*C*H*W*sizeof(float));
//float IP[H][W];
float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float));
//float OP[F][E];
float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float));
float *WT = (float*) malloc(M*C*R*S*sizeof(float));
//float WT[R][S];
float* d_o;
float* d_i;
float* d_w;
float* d_r;
//clock_t cpu_start, gpu_start, cpu_end, gpu_end;
//int a,b,c,d;
int c,d,m,n,k;
/* WEIGHT MATRIX*/
for (m=0; m<M; m++){
for(k=0;k<C;k++){
for (c=0; c<R; c++){
for(d=0; d<S; d++){
//WT[c][d] = 2.0;
//WT[m*C*R*S+k*R*S+c*S+d] = (int)k+1;
WT[m*C*R*S+k*R*S+c*S+d] = (float)rand()/(float)(RAND_MAX+1.0);
}
}
}
}
/*INITIALIZING OUTPUT MATRIX*/
for (n=0; n<batch_size;n++){
for (m=0; m<M; m++){
for (c=0; c<F; c++){
for(d=0; d<E; d++){
//OP[c][d] = 0;
OP[n*M*F*E+m*F*E+c*E+d] = 0;
}
}
}
}
/*INITIALIZING INPUT MATRIX*/
for (n=0; n<batch_size; n++){
for(k=0;k<C;k++){
for (c=0; c<H; c++){
for(d=0; d<W; d++){
// IP[c][d] = (a+b+c+d);
// IP[n*C*H*W+k*H*W+c*W+d] = (float)(k+c+d)/255;
IP[n*C*H*W+k*H*W+c*W+d] = (float)rand()/(float)(RAND_MAX+1.0);
}
}
}
}
if(hipSuccess != hipMalloc((void**) &d_i,batch_size*C*H*W*sizeof(float)))
{
printf("error in d_i malloc\n");
}
hipMemcpy(d_i, IP, batch_size*C*H*W*sizeof(float), hipMemcpyHostToDevice);
if(hipSuccess != hipMalloc((void**) &d_w, M*C*R*S*sizeof(float)))
{
printf("error in d_w malloc\n");
}
hipMemcpy(d_w, WT, M*C*R*S*sizeof(float), hipMemcpyHostToDevice);
if(hipSuccess != hipMalloc((void**) &d_o,(long int)batch_size*M*E*F*sizeof(float)))
{
printf("error in d_o malloc\n");
}
if(hipSuccess != hipMalloc((void**) &d_r,batch_size*M*E*F*sizeof(float)))
{
printf("error in d_r malloc\n");
}
clock_t cpu_start = clock();
//element_wise_mmul(OP, IP, WT, batch_size);
clock_t cpu_end = clock();
dim3 dimGrid(batch_size,96,3);
dim3 dimBlock(28,28,1);
//dim3 dimGridRed(batch_size,96,1);
//dim3 dimBlockRed(28,28,1);hipLaunchKernelGGL((
ew_gpu_mmul), dim3(dimGrid), dim3(dimBlock), 0, 0, d_o,d_i,d_w,55,55,4,227,11,96,batch_size,3);
hipDeviceSynchronize();
//red_ch<<<dimGridRed, dimBlockRed>>>(d_r,d_o,3,batch_size,96,55,55);
hipMemcpy(OPG,d_o,(long int)batch_size*M*E*F*sizeof(float), hipMemcpyDeviceToHost);
/**print outputs**/
//int e,f,g,h;
int g,h,s,u,t;
float max_error = 0;
string filename = "layer_1_"+to_string(batch_size);
ifstream fin(filename.c_str());
string line ;
//for (t=0;t<C;t++){
for (u=0;u<batch_size;u++){
for (s=0;s<M;s++){
for (g=0; g<F; g++){
for(h=0; h<E; h++){
getline(fin,line);
float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str()));
//float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]);
if(error > max_error)
max_error = error;
//printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
//printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
//printf("the output from GPU is %f for index,%d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
}
}
}
}
fin.close();
printf("max error is %f\n", max_error);
//}
cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
//cout<<"time taken by gpu call is "<<((double)(gpu_end-gpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
hipFree(d_o);
hipFree(d_i);
hipFree(d_w);
hipFree(d_r);
free(OPG);
free(IP);
free(WT);
free(OP);
return 0;
}
| c77b40b27c2fcc86306ce71e39acf14cedddf073.cu | #include <stdio.h>
#include <iostream>
//#include <cooperative_groups.h>
#include <math.h>
#include <sstream>
#include <string.h>
#include <fstream>
//#include <bits/stdc++.h>
//#include <stdlib.h>
//# <time.h>
using namespace std;
//using namespace cooperative_groups;
/***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/
//#define N 128
#define C 3
#define H 227
#define W 227
#define R 11
#define S 11
#define M 96
#define E 55
#define F 55
#define U 4
__global__ void red_ch(float* d_r, float* d_o, int num_ch, int num_img, int num_wt, int height, int width)
{
int row = threadIdx.y; int col = threadIdx.x;
for (int y=0;y<2;y++){
for (int x=0;x<2;x++){
float red_sum = 0;
for(int i=0; i<num_ch; i++)
{ //if((2*row+y<height)&&(2*col+x<width))
red_sum += d_o[i*(num_wt*num_img*height*width)+blockIdx.x*num_wt*height*width+blockIdx.y*height*width+(row+y*blockDim.y)*width+(col+x*blockDim.x)] ;
}
if((row+y*blockDim.y<height)&&(col+x*blockDim.x<width))
d_r[blockIdx.x*num_wt*height*width+blockIdx.y*height*width+(row+y*blockDim.y)*width+(col+x*blockDim.x)] = red_sum;
}
}}
__global__
void ew_gpu_mmul(float* d_o, float* d_i, float* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch)
{//float prod=0;
int row = threadIdx.y; int col = threadIdx.x;
__shared__ float s_w[R*S];
if(row*blockDim.x+col<R*S)
{
s_w[row*blockDim.x+col] = d_w[blockIdx.y*num_ch*wt_width*wt_width+blockIdx.z*wt_width*wt_width+(row*blockDim.x+col)];
}
__syncthreads();
for (int y=0; y<2; y++){
for (int x=0; x<2; x++){
float prod = 0;
for (int i=0; i<wt_width; i++){
float3 wt1 = *((float3*)(s_w+i*wt_width)); float3 ip1 = *((float3*)(d_i+blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+(stride*(row+y*blockDim.y)+i)*ip_height+stride*(col+x*blockDim.x)));
float3 wt2 = *((float3*)(s_w+i*wt_width+3));float3 ip2 = *((float3*)(d_i+blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+(stride*(row+y*blockDim.y)+i)*ip_height+3+stride*(col+x*blockDim.x)));
float3 wt3 = *((float3*)(s_w+i*wt_width+6));float3 ip3 = *((float3*)(d_i+blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+(stride*(row+y*blockDim.y)+i)*ip_height+6+stride*(col+x*blockDim.x)));
float3 wt4 = *((float3*)(s_w+i*wt_width+9));float3 ip4 = *((float3*)(d_i+blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+(stride*(row+y*blockDim.y)+i)*ip_height+9+stride*(col+x*blockDim.x)));
prod += ip1.x*wt1.x+ip1.y*wt1.y+ip1.z*wt1.z+ip2.x*wt2.x+ip2.y*wt2.y+ip2.z*wt2.z+ip3.x*wt3.x+ip3.y*wt3.y+ip3.z*wt3.z+ip4.x*wt4.x+ip4.y*wt4.y;
__syncthreads();
}
if((row+y*blockDim.y<height)&&(col+x*blockDim.x<width))
{if (prod>=0)
d_o[0*(num_wt*num_img*height*width)+blockIdx.x*num_wt*height*width+blockIdx.y*height*width+(row+y*blockDim.y)*width+(col+x*blockDim.x)] += prod;
}
}
}
if(row*blockDim.x+col < R*S)
{
s_w[(row*blockDim.x+col)] = 0;
}
__syncthreads();
}
void element_wise_mmul(float* output, float* input, float* weight, int batch_size)
{
int x,y,i,j,m,n,k;
for(n=0; n<batch_size; n++){
for (m=0 ; m<M; m++){
for (x=0; x<F; x++){
for(y=0; y<E; y++){
// OP[x][y] = 0; // adding bias to output
for (i=0; i<R; i++){
for (j=0; j<S; j++){
for(k=0; k<C; k++){
float ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)];
float wt = weight[m*C*R*S+k*R*S+i*S+j];
float prod = ip*wt;
if(prod>=0)
output[n*E*F*M+m*E*F+x*E+y] += prod;
//OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j];
}}
}
}
}
}
}
}
int main(int argc, char* argv[])
{
int batch_size = atoi(argv[1]);
/*************INITALIZING MATRICES*********************************/
float *IP = (float*) malloc(batch_size*C*H*W*sizeof(float));
//float IP[H][W];
float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float));
//float OP[F][E];
float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float));
float *WT = (float*) malloc(M*C*R*S*sizeof(float));
//float WT[R][S];
float* d_o;
float* d_i;
float* d_w;
float* d_r;
//clock_t cpu_start, gpu_start, cpu_end, gpu_end;
//int a,b,c,d;
int c,d,m,n,k;
/* WEIGHT MATRIX*/
for (m=0; m<M; m++){
for(k=0;k<C;k++){
for (c=0; c<R; c++){
for(d=0; d<S; d++){
//WT[c][d] = 2.0;
//WT[m*C*R*S+k*R*S+c*S+d] = (int)k+1;
WT[m*C*R*S+k*R*S+c*S+d] = (float)rand()/(float)(RAND_MAX+1.0);
}
}
}
}
/*INITIALIZING OUTPUT MATRIX*/
for (n=0; n<batch_size;n++){
for (m=0; m<M; m++){
for (c=0; c<F; c++){
for(d=0; d<E; d++){
//OP[c][d] = 0;
OP[n*M*F*E+m*F*E+c*E+d] = 0;
}
}
}
}
/*INITIALIZING INPUT MATRIX*/
for (n=0; n<batch_size; n++){
for(k=0;k<C;k++){
for (c=0; c<H; c++){
for(d=0; d<W; d++){
// IP[c][d] = (a+b+c+d);
// IP[n*C*H*W+k*H*W+c*W+d] = (float)(k+c+d)/255;
IP[n*C*H*W+k*H*W+c*W+d] = (float)rand()/(float)(RAND_MAX+1.0);
}
}
}
}
if(cudaSuccess != cudaMalloc((void**) &d_i,batch_size*C*H*W*sizeof(float)))
{
printf("error in d_i malloc\n");
}
cudaMemcpy(d_i, IP, batch_size*C*H*W*sizeof(float), cudaMemcpyHostToDevice);
if(cudaSuccess != cudaMalloc((void**) &d_w, M*C*R*S*sizeof(float)))
{
printf("error in d_w malloc\n");
}
cudaMemcpy(d_w, WT, M*C*R*S*sizeof(float), cudaMemcpyHostToDevice);
if(cudaSuccess != cudaMalloc((void**) &d_o,(long int)batch_size*M*E*F*sizeof(float)))
{
printf("error in d_o malloc\n");
}
if(cudaSuccess != cudaMalloc((void**) &d_r,batch_size*M*E*F*sizeof(float)))
{
printf("error in d_r malloc\n");
}
clock_t cpu_start = clock();
//element_wise_mmul(OP, IP, WT, batch_size);
clock_t cpu_end = clock();
dim3 dimGrid(batch_size,96,3);
dim3 dimBlock(28,28,1);
//dim3 dimGridRed(batch_size,96,1);
//dim3 dimBlockRed(28,28,1);
ew_gpu_mmul<<<dimGrid, dimBlock>>>(d_o,d_i,d_w,55,55,4,227,11,96,batch_size,3);
cudaDeviceSynchronize();
//red_ch<<<dimGridRed, dimBlockRed>>>(d_r,d_o,3,batch_size,96,55,55);
cudaMemcpy(OPG,d_o,(long int)batch_size*M*E*F*sizeof(float), cudaMemcpyDeviceToHost);
/**print outputs**/
//int e,f,g,h;
int g,h,s,u,t;
float max_error = 0;
string filename = "layer_1_"+to_string(batch_size);
ifstream fin(filename.c_str());
string line ;
//for (t=0;t<C;t++){
for (u=0;u<batch_size;u++){
for (s=0;s<M;s++){
for (g=0; g<F; g++){
for(h=0; h<E; h++){
getline(fin,line);
float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str()));
//float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]);
if(error > max_error)
max_error = error;
//printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
//printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
//printf("the output from GPU is %f for index,%d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
}
}
}
}
fin.close();
printf("max error is %f\n", max_error);
//}
cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
//cout<<"time taken by gpu call is "<<((double)(gpu_end-gpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
cudaFree(d_o);
cudaFree(d_i);
cudaFree(d_w);
cudaFree(d_r);
free(OPG);
free(IP);
free(WT);
free(OP);
return 0;
}
|
9eac6cef6eca2058615acde8ba5e6de3d9b4b15a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/TemporalUpSamplingLinear.cu"
#else
#include "../linear_upsampling.h"
static inline void THNN_(TemporalUpSamplingLinear_shapeCheck)
(THCState *state,
THCTensor *input, THCTensor *gradOutput,
int nBatch, int nChannels,
int inputWidth,
int outputWidth) {
THArgCheck(inputWidth > 0 && outputWidth > 0, 2,
"input and output sizes should be greater than 0,"
" but got input (W: %d) output (W: %d)",
inputWidth, outputWidth);
if (input != NULL) {
THCUNN_argCheck(state, input->nDimension == 3, 2, input,
"3D input tensor expected but got: %s");
}
if (gradOutput != NULL) {
THCUNN_check_dim_size(state, gradOutput, 3, 0, nBatch);
THCUNN_check_dim_size(state, gradOutput, 3, 1, nChannels);
THCUNN_check_dim_size(state, gradOutput, 3, 2, outputWidth);
}
}
void THNN_(TemporalUpSamplingLinear_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
int outputWidth,
bool align_corners)
{
int nbatch = THCTensor_(size)(state, input, 0);
int channels = THCTensor_(size)(state, input, 1);
int inputWidth = THCTensor_(size)(state, input, 2);
THNN_(TemporalUpSamplingLinear_shapeCheck)
(state, input, NULL,
nbatch, channels,
inputWidth, outputWidth);
input = THCTensor_(newContiguous)(state, input);
THCUNN_assertSameGPU(state, 2, input, output);
THCTensor_(resize3d)(state, output,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
outputWidth);
THCTensor_(zero)(state, output);
THCDeviceTensor<real, 3> idata = toDeviceTensor<real, 3>(state, input);
THCDeviceTensor<real, 3> odata = toDeviceTensor<real, 3>(state, output);
THAssert(inputWidth > 0 && outputWidth > 0);
const accreal rwidth = linear_upsampling_compute_scale<accreal>(inputWidth, outputWidth, align_corners);
const int num_kernels = outputWidth;
const int num_threads =
THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock;
hipStream_t stream = THCState_getCurrentStream(state);
hipLaunchKernelGGL(( caffe_gpu_interp2_kernel<real, accreal>) , dim3(THCCeilDiv(num_kernels, num_threads)), dim3(num_threads) ,
0 , stream, num_kernels, rwidth, align_corners, idata, odata);
THCudaCheck(hipGetLastError());
THCTensor_(free)(state, input);
}
void THNN_(TemporalUpSamplingLinear_updateGradInput)(
THCState *state,
THCTensor *gradOutput,
THCTensor *gradInput,
int nbatch,
int nchannels,
int inputWidth,
int outputWidth,
bool align_corners)
{
THNN_(TemporalUpSamplingLinear_shapeCheck)
(state, NULL, gradOutput,
nbatch, nchannels,
inputWidth, outputWidth);
gradInput = THCTensor_(newContiguous)(state, gradInput);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCUNN_assertSameGPU(state, 2, gradOutput, gradInput);
THCTensor_(resize3d)(state, gradInput, nbatch, nchannels, inputWidth);
THCTensor_(zero)(state, gradInput);
THCDeviceTensor<real, 3> data1 = toDeviceTensor<real, 3>(state, gradInput);
THCDeviceTensor<real, 3> data2 = toDeviceTensor<real, 3>(state, gradOutput);
const accreal rwidth = linear_upsampling_compute_scale<accreal>(inputWidth, outputWidth, align_corners);
const int num_kernels = outputWidth;
const int num_threads =
THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock;
hipStream_t stream = THCState_getCurrentStream(state);
hipLaunchKernelGGL(( caffe_gpu_interp2_kernel_backward<real ,accreal>) , dim3(THCCeilDiv(num_kernels, num_threads)),
dim3(num_threads), 0, stream, num_kernels, rwidth, align_corners, data1, data2);
THCudaCheck(hipGetLastError());
THCTensor_(free)(state, gradInput);
THCTensor_(free)(state, gradOutput);
}
#endif
| 9eac6cef6eca2058615acde8ba5e6de3d9b4b15a.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/TemporalUpSamplingLinear.cu"
#else
#include "../linear_upsampling.h"
static inline void THNN_(TemporalUpSamplingLinear_shapeCheck)
(THCState *state,
THCTensor *input, THCTensor *gradOutput,
int nBatch, int nChannels,
int inputWidth,
int outputWidth) {
THArgCheck(inputWidth > 0 && outputWidth > 0, 2,
"input and output sizes should be greater than 0,"
" but got input (W: %d) output (W: %d)",
inputWidth, outputWidth);
if (input != NULL) {
THCUNN_argCheck(state, input->nDimension == 3, 2, input,
"3D input tensor expected but got: %s");
}
if (gradOutput != NULL) {
THCUNN_check_dim_size(state, gradOutput, 3, 0, nBatch);
THCUNN_check_dim_size(state, gradOutput, 3, 1, nChannels);
THCUNN_check_dim_size(state, gradOutput, 3, 2, outputWidth);
}
}
void THNN_(TemporalUpSamplingLinear_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
int outputWidth,
bool align_corners)
{
int nbatch = THCTensor_(size)(state, input, 0);
int channels = THCTensor_(size)(state, input, 1);
int inputWidth = THCTensor_(size)(state, input, 2);
THNN_(TemporalUpSamplingLinear_shapeCheck)
(state, input, NULL,
nbatch, channels,
inputWidth, outputWidth);
input = THCTensor_(newContiguous)(state, input);
THCUNN_assertSameGPU(state, 2, input, output);
THCTensor_(resize3d)(state, output,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
outputWidth);
THCTensor_(zero)(state, output);
THCDeviceTensor<real, 3> idata = toDeviceTensor<real, 3>(state, input);
THCDeviceTensor<real, 3> odata = toDeviceTensor<real, 3>(state, output);
THAssert(inputWidth > 0 && outputWidth > 0);
const accreal rwidth = linear_upsampling_compute_scale<accreal>(inputWidth, outputWidth, align_corners);
const int num_kernels = outputWidth;
const int num_threads =
THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock;
cudaStream_t stream = THCState_getCurrentStream(state);
caffe_gpu_interp2_kernel<real, accreal> <<<THCCeilDiv(num_kernels, num_threads), num_threads ,
0 , stream>>>(num_kernels, rwidth, align_corners, idata, odata);
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, input);
}
void THNN_(TemporalUpSamplingLinear_updateGradInput)(
THCState *state,
THCTensor *gradOutput,
THCTensor *gradInput,
int nbatch,
int nchannels,
int inputWidth,
int outputWidth,
bool align_corners)
{
THNN_(TemporalUpSamplingLinear_shapeCheck)
(state, NULL, gradOutput,
nbatch, nchannels,
inputWidth, outputWidth);
gradInput = THCTensor_(newContiguous)(state, gradInput);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCUNN_assertSameGPU(state, 2, gradOutput, gradInput);
THCTensor_(resize3d)(state, gradInput, nbatch, nchannels, inputWidth);
THCTensor_(zero)(state, gradInput);
THCDeviceTensor<real, 3> data1 = toDeviceTensor<real, 3>(state, gradInput);
THCDeviceTensor<real, 3> data2 = toDeviceTensor<real, 3>(state, gradOutput);
const accreal rwidth = linear_upsampling_compute_scale<accreal>(inputWidth, outputWidth, align_corners);
const int num_kernels = outputWidth;
const int num_threads =
THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock;
cudaStream_t stream = THCState_getCurrentStream(state);
caffe_gpu_interp2_kernel_backward<real ,accreal> <<<THCCeilDiv(num_kernels, num_threads),
num_threads, 0, stream>>>(num_kernels, rwidth, align_corners, data1, data2);
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, gradInput);
THCTensor_(free)(state, gradOutput);
}
#endif
|
98109146bc35d1cde42dc1af00132fe8369a1406.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ReLUForward(const int n, const Dtype* in, Dtype* out,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data, negative_slope);
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
template <typename Dtype>
__global__ void ReLUBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * negative_slope);
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
vector<Blob<Dtype>*>* bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = (*bottom)[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
const int count = (*bottom)[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, bottom_data, bottom_diff, negative_slope);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_CLASS(ReLULayer);
} // namespace caffe | 98109146bc35d1cde42dc1af00132fe8369a1406.cu | #include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ReLUForward(const int n, const Dtype* in, Dtype* out,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
ReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data, negative_slope);
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
template <typename Dtype>
__global__ void ReLUBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * negative_slope);
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
vector<Blob<Dtype>*>* bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = (*bottom)[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
const int count = (*bottom)[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
ReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_data, bottom_diff, negative_slope);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_CLASS(ReLULayer);
} // namespace caffe |
67a8813190188b88fefb9ba846a2b79dfe92e135.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* dropc dev implementation
*
*/
#include <dropc/dropc_dev.hpp>
#include <cassert>
#include <dropc/cuda_common.hpp>
#define USE_TEXTURE_CACHE
//#define FCDROPC_BLK_SIZE 16
#define FCDROPC_BLK_SIZE 8
// texutre reference
#ifdef USE_TEXTURE_CACHE
texture<float,hipTextureType1D,hipReadModeElementType> texMaskWeights;
texture<float,hipTextureType1D,hipReadModeElementType> texMaskBiases;
#endif
// FCDrop connection fprop kernel
__global__ void kFCDropC_fprop(
const float* x, ///<[in] input matrix x, col major, numData x inDim
const float* w, ///<[in] weight matrix w, col major, inDim x outDim
const float* b, ///<[in] bias matrix, row major, 1 x outDim
int m, ///<[in] output dimension
int n, ///<[in] input dimension
int d, ///<[in] number of data in this batch
#ifndef USE_TEXTURE_CACHE
const float* mw, ///<[in] maskWeights, col major, inDim x (outDimxdataDim)
const float* mb, ///<[in] maskBiases, col major, dataDim x outDim
#endif
float* y ///<[in,out] target matrix y, col major, dataDim x outDim
){
// bx,by,tx,ty
int bx = blockIdx.x * FCDROPC_BLK_SIZE;
int by = blockIdx.y * FCDROPC_BLK_SIZE;
int tx = threadIdx.x;
int ty = threadIdx.y;
// define shared memory
__shared__ float sA[FCDROPC_BLK_SIZE][FCDROPC_BLK_SIZE];
__shared__ float sB[FCDROPC_BLK_SIZE][FCDROPC_BLK_SIZE];
float c = 0;
if( (bx+tx) < m && (by + ty) < d ) {
// get old y value
c = y[ (bx+tx)*d + (by+ty) ];
}
//loop over cols of x and rows of w
for( int i = 0; i < n; i += FCDROPC_BLK_SIZE ) {
// load value from x, w into shared memory and sync
if( (i+tx) < n && (by+ty) < d )
sA[ty][tx] = x[(i+tx)*d + (by+ty)];
else
sA[ty][tx] = 0.0f;
if( (i+ty) < n && (bx+tx) < m )
sB[ty][tx] = w[(tx+bx)*n + (i+ty)];
else
sB[ty][tx] = 0.0f;
__syncthreads();
// inc c value
if( (bx+tx) < m && (by + ty) < d ) {
#pragma unroll
for( int j = 0; j < FCDROPC_BLK_SIZE; j++ ) {
float maskW = 0.0f;
if( (i+j) < n ) {
// get m row: (i+j), col: (by+ty)^th matrix of col bx+tx
size_t maskW_index = size_t(m)*n*(by+ty)+(bx+tx)*n + (i+j);
#ifdef USE_TEXTURE_CACHE
// only hipArray can use tex1D which is faster than tex1Dfeatch
//maskW = tex1D( texMaskWeights, maskW_index );
maskW = tex1Dfetch( texMaskWeights, maskW_index );
#else
maskW = mw[ maskW_index ];
#endif
}
c += sA[ty][j] * sB[j][tx] * maskW;
}
}
__syncthreads();
}
if( (bx+tx) < m && (by + ty) < d ) {
#ifdef USE_TEXTURE_CACHE
float maskB = tex1Dfetch( texMaskBiases, (tx+bx)*d+(ty+by) );
#else
float maskB = mb[(tx+bx)*d+(ty+by)];
#endif
// inc c value by bias
c += b[tx+bx] * maskB;
y[ (bx+tx)*d + (by+ty) ] = c;
}
}
// FCDrop connection bprop act kernel
__global__ void kFCDropC_bpropa(
const float* v, ///<[in] bprop act from previous layer, col major,numData x outDim
const float* w, ///<[in] weight matrix w, col major, inDim x outDim
int m, ///<[in] output dimension
int n, ///<[in] input dimension
int d, ///<[in] number of data in this batch
float scale_g, ///<[in] input gradient scale
#ifndef USE_TEXTURE_CACHE
const float* mw, ///<[in] maskWeights, col major, inDim x (outDimxdataDim)
#endif
float* da, ///<[in,out] d-active, col major, numData x inDim
float scale_da ///<[in] da scale
){
// bx,by,tx,ty
int bx = blockIdx.x * FCDROPC_BLK_SIZE;
int by = blockIdx.y * FCDROPC_BLK_SIZE;
int tx = threadIdx.x;
int ty = threadIdx.y;
// define shared memory
__shared__ float sA[FCDROPC_BLK_SIZE+1][FCDROPC_BLK_SIZE+1];
__shared__ float sB[FCDROPC_BLK_SIZE+1][FCDROPC_BLK_SIZE+1];
float prev_da = 0;
if( (bx+tx) < n && (by+ty)<d ) {
// get old value
prev_da = da[ (bx+tx)*d + (by+ty) ];
}
float c = 0;
//loop over cols of v and rows of w^T(cols of w)
for( int i = 0; i < m; i+= FCDROPC_BLK_SIZE ) {
// load value from v and wt into shared memory and sync
if( (i+tx) < m && (by+ty) < d )
sA[ty][tx] = v[ (i+tx)*d + (by+ty) ];
else
sA[ty][tx] = 0.0f;
if( (i+ty) < m && (tx+bx) < n ){
// wt row: i+ty col: tx+bx
sB[ty][tx] = w[ (i+ty)*n + (tx+bx) ];
}
else
sB[ty][tx] = 0.0f;
__syncthreads();
// inc c value
if( (bx+tx) < n && (by+ty)<d ) {
#pragma unroll
for( int j = 0; j < FCDROPC_BLK_SIZE; j++ ) {
float maskW = 0.0f;
if( (i+j) < m ) {
size_t maskW_index = size_t(m)*n*(ty+by)+(i+j)*n+(tx+bx);
#ifdef USE_TEXTURE_CACHE
// only hipArray can use tex1D which is faster than tex1Dfeatch
//maskW = tex1D( texMaskWeights, maskW_index );
maskW = tex1Dfetch( texMaskWeights, maskW_index );
#else
maskW = mw[ maskW_index ];
#endif
}
c += sA[ty][j] * sB[j][tx] * maskW;
}
}
__syncthreads();
}
// set output data
if( (bx+tx) < n && (by+ty)<d ) {
da[ (bx+tx)*d + (by+ty) ] = prev_da * scale_da + scale_g * c;
}
}
//FCDrop connection bprop weights kernel
__global__ void kFCDropC_bpropw(
const float* a, ///<[in] prev activation matrix, col major, numData x inDim
const float* v, ///<[in] gradient matrix, col major, numData x outDim
int m, ///<[in] output dimension
int n, ///<[in] input dimension
int d, ///<[in] number of data in this batch
float scale_g, ///<[in] inc scale
#ifndef USE_TEXTURE_CACHE
const float* mw, ///<[in] maskWeights, col major, inDim x (outDimxdataDim)
#endif
float* dw, ///<[in,out] w gradient, col major, inDim x outDim
float scale_dw ///<[in] gradient scale
){
// bx,by,tx,ty
int bx = blockIdx.x * FCDROPC_BLK_SIZE;
int by = blockIdx.y * FCDROPC_BLK_SIZE;
int tx = threadIdx.x;
int ty = threadIdx.y;
// define shared memory
__shared__ float sA[FCDROPC_BLK_SIZE+1][FCDROPC_BLK_SIZE+1];
__shared__ float sB[FCDROPC_BLK_SIZE+1][FCDROPC_BLK_SIZE+1];
float prev_dw = 0;
if( (bx+tx) < m && (by+ty) < n ) {
// get the old value
prev_dw = dw[ (bx+tx)*n + (by+ty) ];
}
float c = 0;
// loop over cols of a^T(rows of a) and rows of v
for( int i = 0; i < d; i += FCDROPC_BLK_SIZE ) {
// load value from at and v into shared memory and sync
if( (ty+by) < n && (i+tx) < d )
sA[ty][tx] = a[ (by+ty)*d + (i+tx) ];
else
sA[ty][tx] = 0.0f;
if( (tx+bx) < m && (i+ty) < d )
sB[ty][tx] = v[ (bx+tx)*d + (i+ty) ];
else
sB[ty][tx] = 0.0f;
__syncthreads();
// inc c value
if( (bx+tx) < m && (by+ty) < n ) {
#pragma unroll
for( int j = 0; j < FCDROPC_BLK_SIZE; j++ ) {
float maskW = 0.0f;
if( (i+j) < d ) {
size_t maskW_index = size_t(m)*n*(i+j)+(bx+tx)*n+(by+ty);
#ifdef USE_TEXTURE_CACHE
// only hipArray can use tex1D which is faster than tex1Dfeatch
//maskW = tex1D( texMaskWeights, maskW_index );
maskW = tex1Dfetch( texMaskWeights, maskW_index );
#else
maskW = mw[ maskW_index ];
#endif
}
c += sA[ty][j] * sB[j][tx] * maskW;
}
}
__syncthreads();
}
// set output data
if( (bx+tx) < m && (by+ty) < n ) {
// get the old value
dw[ (bx+tx)*n + (by+ty) ] = prev_dw * scale_dw + scale_g * c;
}
}
//----------------------------------------------------
// main entry point: dropc_dev.cu
//----------------------------------------------------
void computeFCDropC_fprop_d(
const float* x, ///<[in] input matrix x, col major, numData x inDim
const float* w, ///<[in] weight matrix w, col major, inDim x outDim
const float* b, ///<[in] bias matrix, row major, 1 x outDim
int outDim, ///<[in] output dimension
int inDim, ///<[in] input dimension
int numData, ///<[in] number of data in this batch
const float * mw, ///<[in] maskWeights, col major, inDim x (outDimxdataDim)
const float * mb, ///<[in] maskBiases, col major, dataDim x outDim
float * y ///<[in,out] target matrix y, col major, dataDim x outDim
){
#ifdef USE_TEXTURE_CACHE
// bind texture for maskWeights
hipChannelFormatDesc channelDescFloat = hipCreateChannelDesc<float>();
size_t offset_w;
checkCuda( hipBindTexture( &offset_w, &texMaskWeights, mw,
&channelDescFloat, sizeof(float) * inDim * outDim * numData ) );
assert( offset_w == 0 );
// set clamp,point mode
texMaskWeights.addressMode[0] = hipAddressModeClamp;
texMaskWeights.filterMode = hipFilterModePoint;
texMaskWeights.normalized= false;
// bind texture for maskBiases
size_t offset_b;
checkCuda( hipBindTexture( &offset_b, &texMaskBiases, mb,
&channelDescFloat, sizeof(float) * numData * outDim ) );
assert( offset_b == 0 );
// set clamp,point mode
texMaskBiases.addressMode[0] = hipAddressModeClamp;
texMaskBiases.filterMode = hipFilterModePoint;
texMaskBiases.normalized= false;
#else
checkCuda( hipDeviceSetCacheConfig( hipFuncCachePreferL1 ) );
#endif
// define block/thread info
dim3 threads( FCDROPC_BLK_SIZE, FCDROPC_BLK_SIZE );
dim3 blocks( divup(outDim,FCDROPC_BLK_SIZE), divup(numData, FCDROPC_BLK_SIZE) );
// invoke kernel
hipLaunchKernelGGL(( kFCDropC_fprop), dim3(blocks),dim3(threads), 0, 0, x, w, b,
outDim, inDim, numData,
#ifndef USE_TEXTURE_CACHE
mw, mb,
#endif
y );
// check error
checkLastCudaError();
// unbind texture
#ifdef USE_TEXTURE_CACHE
checkCuda( hipUnbindTexture( &texMaskWeights ) );
checkCuda( hipUnbindTexture( &texMaskBiases ) );
#endif
}
void computeFCDropC_bpropActs_d(
const float* v, ///<[in] bprop act from previous layer, col major,numData x outDim
const float* w, ///<[in] weight matrix w, col major, inDim x outDim
int outDim, ///<[in] output dimension
int inDim, ///<[in] input dimension
int numData, ///<[in] number of data in this batch
float scale_g, ///<[in] input gradient scale
const float* mw, ///<[in] maskWeights, col major, inDim x (outDimxdataDim)
float* da, ///<[in,out] d-active, col major, numData x inDim
float scale_da ///<[in] da scale
){
#ifdef USE_TEXTURE_CACHE
// bind texture for maskWeights
hipChannelFormatDesc channelDescFloat = hipCreateChannelDesc<float>();
size_t offset_w;
checkCuda( hipBindTexture( &offset_w, &texMaskWeights, mw,
&channelDescFloat, sizeof(float) * inDim * outDim * numData ) );
assert( offset_w == 0 );
// set clamp,point mode
texMaskWeights.addressMode[0] = hipAddressModeClamp;
texMaskWeights.filterMode = hipFilterModePoint;
texMaskWeights.normalized= false;
#else
checkCuda( hipDeviceSetCacheConfig( hipFuncCachePreferL1 ) );
#endif
// define block/thread info
dim3 threads( FCDROPC_BLK_SIZE, FCDROPC_BLK_SIZE );
dim3 blocks( divup(inDim,FCDROPC_BLK_SIZE), divup(numData, FCDROPC_BLK_SIZE) );
// invoke kernel
hipLaunchKernelGGL(( kFCDropC_bpropa), dim3(blocks),dim3(threads), 0, 0, v, w,
outDim, inDim, numData,
scale_g,
#ifndef USE_TEXTURE_CACHE
mw,
#endif
da, scale_da );
// check error
checkLastCudaError();
// unbind texture
#ifdef USE_TEXTURE_CACHE
checkCuda( hipUnbindTexture( &texMaskWeights ) );
checkCuda( hipUnbindTexture( &texMaskBiases ) );
#endif
}
void computeFCDropC_bpropWeights_d(
const float* a, ///<[in] prev activation matrix, col major, numData x inDim
const float* v, ///<[in] gradient matrix, col major, numData x outDim
int outDim, ///<[in] output dimension
int inDim, ///<[in] input dimension
int numData, ///<[in] number of data in this batch
float scale_g, ///<[in] inc scale
const float* mw, ///<[in] maskWeights, col major, inDim x (outDimxdataDim)
float* dw, ///<[in,out] w gradient, col major, inDim x outDim
float scale_dw ///<[in] gradient scale
){
#ifdef USE_TEXTURE_CACHE
// bind texture for maskWeights
hipChannelFormatDesc channelDescFloat = hipCreateChannelDesc<float>();
size_t offset_w;
checkCuda( hipBindTexture( &offset_w, &texMaskWeights, mw,
&channelDescFloat, sizeof(float) * inDim * outDim * numData ) );
assert( offset_w == 0 );
// set clamp,point mode
texMaskWeights.addressMode[0] = hipAddressModeClamp;
texMaskWeights.filterMode = hipFilterModePoint;
texMaskWeights.normalized= false;
#else
checkCuda( hipDeviceSetCacheConfig( hipFuncCachePreferL1 ) );
#endif
// define block/thread info
dim3 threads( FCDROPC_BLK_SIZE, FCDROPC_BLK_SIZE );
dim3 blocks( divup(outDim,FCDROPC_BLK_SIZE), divup(inDim, FCDROPC_BLK_SIZE) );
// invoke kernel
hipLaunchKernelGGL(( kFCDropC_bpropw), dim3(blocks),dim3(threads), 0, 0, a, v,
outDim, inDim, numData,
scale_g,
#ifndef USE_TEXTURE_CACHE
mw,
#endif
dw, scale_dw );
// check error
checkLastCudaError();
// unbind texture
#ifdef USE_TEXTURE_CACHE
checkCuda( hipUnbindTexture( &texMaskWeights ) );
checkCuda( hipUnbindTexture( &texMaskBiases ) );
#endif
}
| 67a8813190188b88fefb9ba846a2b79dfe92e135.cu | /**
* dropc dev implementation
*
*/
#include <dropc/dropc_dev.hpp>
#include <cassert>
#include <dropc/cuda_common.hpp>
#define USE_TEXTURE_CACHE
//#define FCDROPC_BLK_SIZE 16
#define FCDROPC_BLK_SIZE 8
// texutre reference
#ifdef USE_TEXTURE_CACHE
texture<float,cudaTextureType1D,cudaReadModeElementType> texMaskWeights;
texture<float,cudaTextureType1D,cudaReadModeElementType> texMaskBiases;
#endif
// FCDrop connection fprop kernel
__global__ void kFCDropC_fprop(
const float* x, ///<[in] input matrix x, col major, numData x inDim
const float* w, ///<[in] weight matrix w, col major, inDim x outDim
const float* b, ///<[in] bias matrix, row major, 1 x outDim
int m, ///<[in] output dimension
int n, ///<[in] input dimension
int d, ///<[in] number of data in this batch
#ifndef USE_TEXTURE_CACHE
const float* mw, ///<[in] maskWeights, col major, inDim x (outDimxdataDim)
const float* mb, ///<[in] maskBiases, col major, dataDim x outDim
#endif
float* y ///<[in,out] target matrix y, col major, dataDim x outDim
){
// bx,by,tx,ty
int bx = blockIdx.x * FCDROPC_BLK_SIZE;
int by = blockIdx.y * FCDROPC_BLK_SIZE;
int tx = threadIdx.x;
int ty = threadIdx.y;
// define shared memory
__shared__ float sA[FCDROPC_BLK_SIZE][FCDROPC_BLK_SIZE];
__shared__ float sB[FCDROPC_BLK_SIZE][FCDROPC_BLK_SIZE];
float c = 0;
if( (bx+tx) < m && (by + ty) < d ) {
// get old y value
c = y[ (bx+tx)*d + (by+ty) ];
}
//loop over cols of x and rows of w
for( int i = 0; i < n; i += FCDROPC_BLK_SIZE ) {
// load value from x, w into shared memory and sync
if( (i+tx) < n && (by+ty) < d )
sA[ty][tx] = x[(i+tx)*d + (by+ty)];
else
sA[ty][tx] = 0.0f;
if( (i+ty) < n && (bx+tx) < m )
sB[ty][tx] = w[(tx+bx)*n + (i+ty)];
else
sB[ty][tx] = 0.0f;
__syncthreads();
// inc c value
if( (bx+tx) < m && (by + ty) < d ) {
#pragma unroll
for( int j = 0; j < FCDROPC_BLK_SIZE; j++ ) {
float maskW = 0.0f;
if( (i+j) < n ) {
// get m row: (i+j), col: (by+ty)^th matrix of col bx+tx
size_t maskW_index = size_t(m)*n*(by+ty)+(bx+tx)*n + (i+j);
#ifdef USE_TEXTURE_CACHE
// only cudaArray can use tex1D which is faster than tex1Dfeatch
//maskW = tex1D( texMaskWeights, maskW_index );
maskW = tex1Dfetch( texMaskWeights, maskW_index );
#else
maskW = mw[ maskW_index ];
#endif
}
c += sA[ty][j] * sB[j][tx] * maskW;
}
}
__syncthreads();
}
if( (bx+tx) < m && (by + ty) < d ) {
#ifdef USE_TEXTURE_CACHE
float maskB = tex1Dfetch( texMaskBiases, (tx+bx)*d+(ty+by) );
#else
float maskB = mb[(tx+bx)*d+(ty+by)];
#endif
// inc c value by bias
c += b[tx+bx] * maskB;
y[ (bx+tx)*d + (by+ty) ] = c;
}
}
// FCDrop connection bprop act kernel
__global__ void kFCDropC_bpropa(
const float* v, ///<[in] bprop act from previous layer, col major,numData x outDim
const float* w, ///<[in] weight matrix w, col major, inDim x outDim
int m, ///<[in] output dimension
int n, ///<[in] input dimension
int d, ///<[in] number of data in this batch
float scale_g, ///<[in] input gradient scale
#ifndef USE_TEXTURE_CACHE
const float* mw, ///<[in] maskWeights, col major, inDim x (outDimxdataDim)
#endif
float* da, ///<[in,out] d-active, col major, numData x inDim
float scale_da ///<[in] da scale
){
// bx,by,tx,ty
int bx = blockIdx.x * FCDROPC_BLK_SIZE;
int by = blockIdx.y * FCDROPC_BLK_SIZE;
int tx = threadIdx.x;
int ty = threadIdx.y;
// define shared memory
__shared__ float sA[FCDROPC_BLK_SIZE+1][FCDROPC_BLK_SIZE+1];
__shared__ float sB[FCDROPC_BLK_SIZE+1][FCDROPC_BLK_SIZE+1];
float prev_da = 0;
if( (bx+tx) < n && (by+ty)<d ) {
// get old value
prev_da = da[ (bx+tx)*d + (by+ty) ];
}
float c = 0;
//loop over cols of v and rows of w^T(cols of w)
for( int i = 0; i < m; i+= FCDROPC_BLK_SIZE ) {
// load value from v and wt into shared memory and sync
if( (i+tx) < m && (by+ty) < d )
sA[ty][tx] = v[ (i+tx)*d + (by+ty) ];
else
sA[ty][tx] = 0.0f;
if( (i+ty) < m && (tx+bx) < n ){
// wt row: i+ty col: tx+bx
sB[ty][tx] = w[ (i+ty)*n + (tx+bx) ];
}
else
sB[ty][tx] = 0.0f;
__syncthreads();
// inc c value
if( (bx+tx) < n && (by+ty)<d ) {
#pragma unroll
for( int j = 0; j < FCDROPC_BLK_SIZE; j++ ) {
float maskW = 0.0f;
if( (i+j) < m ) {
size_t maskW_index = size_t(m)*n*(ty+by)+(i+j)*n+(tx+bx);
#ifdef USE_TEXTURE_CACHE
// only cudaArray can use tex1D which is faster than tex1Dfeatch
//maskW = tex1D( texMaskWeights, maskW_index );
maskW = tex1Dfetch( texMaskWeights, maskW_index );
#else
maskW = mw[ maskW_index ];
#endif
}
c += sA[ty][j] * sB[j][tx] * maskW;
}
}
__syncthreads();
}
// set output data
if( (bx+tx) < n && (by+ty)<d ) {
da[ (bx+tx)*d + (by+ty) ] = prev_da * scale_da + scale_g * c;
}
}
//FCDrop connection bprop weights kernel
__global__ void kFCDropC_bpropw(
const float* a, ///<[in] prev activation matrix, col major, numData x inDim
const float* v, ///<[in] gradient matrix, col major, numData x outDim
int m, ///<[in] output dimension
int n, ///<[in] input dimension
int d, ///<[in] number of data in this batch
float scale_g, ///<[in] inc scale
#ifndef USE_TEXTURE_CACHE
const float* mw, ///<[in] maskWeights, col major, inDim x (outDimxdataDim)
#endif
float* dw, ///<[in,out] w gradient, col major, inDim x outDim
float scale_dw ///<[in] gradient scale
){
// bx,by,tx,ty
int bx = blockIdx.x * FCDROPC_BLK_SIZE;
int by = blockIdx.y * FCDROPC_BLK_SIZE;
int tx = threadIdx.x;
int ty = threadIdx.y;
// define shared memory
__shared__ float sA[FCDROPC_BLK_SIZE+1][FCDROPC_BLK_SIZE+1];
__shared__ float sB[FCDROPC_BLK_SIZE+1][FCDROPC_BLK_SIZE+1];
float prev_dw = 0;
if( (bx+tx) < m && (by+ty) < n ) {
// get the old value
prev_dw = dw[ (bx+tx)*n + (by+ty) ];
}
float c = 0;
// loop over cols of a^T(rows of a) and rows of v
for( int i = 0; i < d; i += FCDROPC_BLK_SIZE ) {
// load value from at and v into shared memory and sync
if( (ty+by) < n && (i+tx) < d )
sA[ty][tx] = a[ (by+ty)*d + (i+tx) ];
else
sA[ty][tx] = 0.0f;
if( (tx+bx) < m && (i+ty) < d )
sB[ty][tx] = v[ (bx+tx)*d + (i+ty) ];
else
sB[ty][tx] = 0.0f;
__syncthreads();
// inc c value
if( (bx+tx) < m && (by+ty) < n ) {
#pragma unroll
for( int j = 0; j < FCDROPC_BLK_SIZE; j++ ) {
float maskW = 0.0f;
if( (i+j) < d ) {
size_t maskW_index = size_t(m)*n*(i+j)+(bx+tx)*n+(by+ty);
#ifdef USE_TEXTURE_CACHE
// only cudaArray can use tex1D which is faster than tex1Dfeatch
//maskW = tex1D( texMaskWeights, maskW_index );
maskW = tex1Dfetch( texMaskWeights, maskW_index );
#else
maskW = mw[ maskW_index ];
#endif
}
c += sA[ty][j] * sB[j][tx] * maskW;
}
}
__syncthreads();
}
// set output data
if( (bx+tx) < m && (by+ty) < n ) {
// get the old value
dw[ (bx+tx)*n + (by+ty) ] = prev_dw * scale_dw + scale_g * c;
}
}
//----------------------------------------------------
// main entry point: dropc_dev.cu
//----------------------------------------------------
void computeFCDropC_fprop_d(
const float* x, ///<[in] input matrix x, col major, numData x inDim
const float* w, ///<[in] weight matrix w, col major, inDim x outDim
const float* b, ///<[in] bias matrix, row major, 1 x outDim
int outDim, ///<[in] output dimension
int inDim, ///<[in] input dimension
int numData, ///<[in] number of data in this batch
const float * mw, ///<[in] maskWeights, col major, inDim x (outDimxdataDim)
const float * mb, ///<[in] maskBiases, col major, dataDim x outDim
float * y ///<[in,out] target matrix y, col major, dataDim x outDim
){
#ifdef USE_TEXTURE_CACHE
// bind texture for maskWeights
cudaChannelFormatDesc channelDescFloat = cudaCreateChannelDesc<float>();
size_t offset_w;
checkCuda( cudaBindTexture( &offset_w, &texMaskWeights, mw,
&channelDescFloat, sizeof(float) * inDim * outDim * numData ) );
assert( offset_w == 0 );
// set clamp,point mode
texMaskWeights.addressMode[0] = cudaAddressModeClamp;
texMaskWeights.filterMode = cudaFilterModePoint;
texMaskWeights.normalized= false;
// bind texture for maskBiases
size_t offset_b;
checkCuda( cudaBindTexture( &offset_b, &texMaskBiases, mb,
&channelDescFloat, sizeof(float) * numData * outDim ) );
assert( offset_b == 0 );
// set clamp,point mode
texMaskBiases.addressMode[0] = cudaAddressModeClamp;
texMaskBiases.filterMode = cudaFilterModePoint;
texMaskBiases.normalized= false;
#else
checkCuda( cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 ) );
#endif
// define block/thread info
dim3 threads( FCDROPC_BLK_SIZE, FCDROPC_BLK_SIZE );
dim3 blocks( divup(outDim,FCDROPC_BLK_SIZE), divup(numData, FCDROPC_BLK_SIZE) );
// invoke kernel
kFCDropC_fprop<<<blocks,threads>>>( x, w, b,
outDim, inDim, numData,
#ifndef USE_TEXTURE_CACHE
mw, mb,
#endif
y );
// check error
checkLastCudaError();
// unbind texture
#ifdef USE_TEXTURE_CACHE
checkCuda( cudaUnbindTexture( &texMaskWeights ) );
checkCuda( cudaUnbindTexture( &texMaskBiases ) );
#endif
}
void computeFCDropC_bpropActs_d(
const float* v, ///<[in] bprop act from previous layer, col major,numData x outDim
const float* w, ///<[in] weight matrix w, col major, inDim x outDim
int outDim, ///<[in] output dimension
int inDim, ///<[in] input dimension
int numData, ///<[in] number of data in this batch
float scale_g, ///<[in] input gradient scale
const float* mw, ///<[in] maskWeights, col major, inDim x (outDimxdataDim)
float* da, ///<[in,out] d-active, col major, numData x inDim
float scale_da ///<[in] da scale
){
#ifdef USE_TEXTURE_CACHE
// bind texture for maskWeights
cudaChannelFormatDesc channelDescFloat = cudaCreateChannelDesc<float>();
size_t offset_w;
checkCuda( cudaBindTexture( &offset_w, &texMaskWeights, mw,
&channelDescFloat, sizeof(float) * inDim * outDim * numData ) );
assert( offset_w == 0 );
// set clamp,point mode
texMaskWeights.addressMode[0] = cudaAddressModeClamp;
texMaskWeights.filterMode = cudaFilterModePoint;
texMaskWeights.normalized= false;
#else
checkCuda( cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 ) );
#endif
// define block/thread info
dim3 threads( FCDROPC_BLK_SIZE, FCDROPC_BLK_SIZE );
dim3 blocks( divup(inDim,FCDROPC_BLK_SIZE), divup(numData, FCDROPC_BLK_SIZE) );
// invoke kernel
kFCDropC_bpropa<<<blocks,threads>>>( v, w,
outDim, inDim, numData,
scale_g,
#ifndef USE_TEXTURE_CACHE
mw,
#endif
da, scale_da );
// check error
checkLastCudaError();
// unbind texture
#ifdef USE_TEXTURE_CACHE
checkCuda( cudaUnbindTexture( &texMaskWeights ) );
checkCuda( cudaUnbindTexture( &texMaskBiases ) );
#endif
}
void computeFCDropC_bpropWeights_d(
const float* a, ///<[in] prev activation matrix, col major, numData x inDim
const float* v, ///<[in] gradient matrix, col major, numData x outDim
int outDim, ///<[in] output dimension
int inDim, ///<[in] input dimension
int numData, ///<[in] number of data in this batch
float scale_g, ///<[in] inc scale
const float* mw, ///<[in] maskWeights, col major, inDim x (outDimxdataDim)
float* dw, ///<[in,out] w gradient, col major, inDim x outDim
float scale_dw ///<[in] gradient scale
){
#ifdef USE_TEXTURE_CACHE
// bind texture for maskWeights
cudaChannelFormatDesc channelDescFloat = cudaCreateChannelDesc<float>();
size_t offset_w;
checkCuda( cudaBindTexture( &offset_w, &texMaskWeights, mw,
&channelDescFloat, sizeof(float) * inDim * outDim * numData ) );
assert( offset_w == 0 );
// set clamp,point mode
texMaskWeights.addressMode[0] = cudaAddressModeClamp;
texMaskWeights.filterMode = cudaFilterModePoint;
texMaskWeights.normalized= false;
#else
checkCuda( cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 ) );
#endif
// define block/thread info
dim3 threads( FCDROPC_BLK_SIZE, FCDROPC_BLK_SIZE );
dim3 blocks( divup(outDim,FCDROPC_BLK_SIZE), divup(inDim, FCDROPC_BLK_SIZE) );
// invoke kernel
kFCDropC_bpropw<<<blocks,threads>>>( a, v,
outDim, inDim, numData,
scale_g,
#ifndef USE_TEXTURE_CACHE
mw,
#endif
dw, scale_dw );
// check error
checkLastCudaError();
// unbind texture
#ifdef USE_TEXTURE_CACHE
checkCuda( cudaUnbindTexture( &texMaskWeights ) );
checkCuda( cudaUnbindTexture( &texMaskBiases ) );
#endif
}
|
bebf7249f23bae057d1b9e7084c1d08f3b16c5d6.hip | // !!! This is a file automatically generated by hipify!!!
/* ljForce.cu */
#include "ljForce.h"
//#include <hip/hip_runtime.h>
__global__ void ljForce(float *a, float *b)
{
//does nothing for now
}
extern "C" void lunch_ljForce_kernel(real_t *pot, int nLocalBoxes, int *nAtoms, int *gridSize, int *gid, real3 *r, real_t *U, real3 *f, int sz)
{
float *A_d;
float *B_d;
dim3 Dimgrid(1,1,1);
dim3 Dimblock(512,1,1);
hipMalloc((void**)&A_d, sizeof(float));
hipMalloc((void**)&B_d, sizeof(float));
hipLaunchKernelGGL(( ljForce), dim3(Dimgrid), dim3(Dimblock), 0, 0, A_d, B_d);
hipFree(A_d);
hipFree(B_d);
}
| bebf7249f23bae057d1b9e7084c1d08f3b16c5d6.cu | /* ljForce.cu */
#include "ljForce.h"
//#include <cuda_runtime.h>
__global__ void ljForce(float *a, float *b)
{
//does nothing for now
}
extern "C" void lunch_ljForce_kernel(real_t *pot, int nLocalBoxes, int *nAtoms, int *gridSize, int *gid, real3 *r, real_t *U, real3 *f, int sz)
{
float *A_d;
float *B_d;
dim3 Dimgrid(1,1,1);
dim3 Dimblock(512,1,1);
cudaMalloc((void**)&A_d, sizeof(float));
cudaMalloc((void**)&B_d, sizeof(float));
ljForce<<<Dimgrid, Dimblock>>>(A_d, B_d);
cudaFree(A_d);
cudaFree(B_d);
}
|
ecfc07e2d567026a5083f515d72384064aabafe3.hip | // !!! This is a file automatically generated by hipify!!!
//=================================================================//
// CUDA BFS kernel
// Topological-Driven: one node per thread, thread_centric,
// use atomicAdd instruction
//
// Reference:
// T. Schank. Algorithmic Aspects of Triangle-Based Network Analysis
//=================================================================//
#include <hip/hip_runtime.h>
#include <stdint.h>
#include <stdio.h>
#include "cudaGraph.h"
#define STACK_SZ 2048
#define SEED 123
__device__ void quicksort(uint64_t array[], unsigned len)
{
uint64_t left = 0, stack[STACK_SZ], pos = 0, seed = SEED;
for ( ; ; ) /* outer loop */
{
for (; left+1 < len; len++) /* sort left to len-1 */
{
if (pos == STACK_SZ) len = stack[pos = 0]; /* stack overflow, reset */
uint64_t pivot = array[left+seed%(len-left)]; /* pick random pivot */
seed = seed*69069+1; /* next pseudorandom number */
stack[pos++] = len; /* sort right part later */
for (unsigned right = left-1; ; ) /* inner loop: partitioning */
{
while (array[++right] < pivot); /* look for greater element */
while (pivot < array[--len]); /* look for smaller element */
if (right >= len) break; /* partition point found? */
uint64_t temp = array[right];
array[right] = array[len]; /* the only swap */
array[len] = temp;
} /* partitioned, continue left part */
}
if (pos == 0) break; /* stack empty? */
left = len; /* left to right is sorted */
len = stack[--pos]; /* get next range to sort */
}
}
__device__ unsigned get_intersect_cnt(uint64_t* setA, unsigned sizeA, uint64_t* setB, unsigned sizeB)
{
unsigned ret=0;
unsigned iter1=0, iter2=0;
while (iter1<sizeA && iter2<sizeB)
{
if (setA[iter1] < setB[iter2])
iter1++;
else if (setA[iter1] > setB[iter2])
iter2++;
else
{
ret++;
iter1++;
iter2++;
}
}
return ret;
}
__global__ void initialize(uint32_t * d_graph_property, uint64_t num_vertex)
{
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if ( tid < num_vertex )
{
d_graph_property[tid] = 0;
}
}
// sort the outgoing edges accoring to their ID order
__global__
void kernel_step1(uint32_t * vplist, cudaGraph graph)
{
uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= graph.vertex_cnt) return;
uint64_t start;
start = graph.get_firstedge_index(tid);
quicksort(graph.get_edge_ptr(start), graph.get_vertex_degree(tid));
}
// get neighbour set intersections of the vertices with edge links
__global__
void kernel_step2(uint32_t * vplist, cudaGraph graph)
{
uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= graph.vertex_cnt) return;
uint64_t start, end;
start = graph.get_firstedge_index(tid);
end = start + graph.get_vertex_degree(tid);
for (uint64_t i=start; i<end; i++)
{
uint64_t dest = graph.get_edge_dest(i);
if (tid > dest) continue; // skip reverse edges
uint64_t dest_start = graph.get_firstedge_index(dest);
unsigned cnt = get_intersect_cnt(graph.get_edge_ptr(start),
graph.get_vertex_degree(tid), graph.get_edge_ptr(dest_start),
graph.get_vertex_degree(dest));
atomicAdd(&(vplist[tid]), cnt);
atomicAdd(&(vplist[dest]), cnt);
}
}
// reduction to get the total count
__global__
void kernel_step3(uint32_t * vplist, cudaGraph graph, unsigned * d_tcount)
{
uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= graph.vertex_cnt) return;
vplist[tid] = vplist[tid] / 2;
atomicAdd(d_tcount, vplist[tid]);
}
unsigned cuda_triangle_count(uint64_t * vertexlist,
uint64_t * edgelist, uint32_t * vproplist,
uint64_t vertex_cnt, uint64_t edge_cnt)
{
unsigned tcount = 0;
uint32_t * device_vpl = 0;
unsigned * device_tcount = 0;
float h2d_copy_time = 0; // host to device data transfer time
float d2h_copy_time = 0; // device to host data transfer time
float kernel_time = 0; // kernel execution time
int device;
hipGetDevice(&device);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp,device);
// Try to use as many threads as possible so that each thread
// is processing one vertex. If max thread is reached,
// split them into multiple blocks.
unsigned int num_thread_per_block = (unsigned int) vertex_cnt;
if (num_thread_per_block > devProp.maxThreadsPerBlock)
num_thread_per_block = devProp.maxThreadsPerBlock;
unsigned int num_block = (unsigned int)ceil( vertex_cnt/(double)num_thread_per_block );
// malloc of gpu side
cudaErrCheck( hipMalloc((void**)&device_vpl, vertex_cnt*sizeof(uint32_t)) );
cudaErrCheck( hipMalloc((void**)&device_tcount, sizeof(unsigned)) );
hipEvent_t start_event, stop_event;
cudaErrCheck( hipEventCreate(&start_event) );
cudaErrCheck( hipEventCreate(&stop_event) );
// initialization
hipLaunchKernelGGL(( initialize), dim3(num_block), dim3(num_thread_per_block), 0, 0, device_vpl, vertex_cnt);
// prepare graph struct
// one for host side, one for device side
cudaGraph h_graph, d_graph;
// here copy only the pointers
h_graph.read(vertexlist, edgelist, vertex_cnt, edge_cnt);
// memcpy from host to device
hipEventRecord(start_event, 0);
// copy graph data to device
h_graph.cudaGraphCopy(&d_graph);
unsigned zeronum = 0;
cudaErrCheck( hipMemcpy(device_tcount, &zeronum, sizeof(unsigned),
hipMemcpyHostToDevice) );
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&h2d_copy_time, start_event, stop_event);
hipEventRecord(start_event, 0);
hipLaunchKernelGGL(( kernel_step1), dim3(num_block), dim3(num_thread_per_block), 0, 0, device_vpl, d_graph);
hipLaunchKernelGGL(( kernel_step2), dim3(num_block), dim3(num_thread_per_block), 0, 0, device_vpl, d_graph);
hipLaunchKernelGGL(( kernel_step3), dim3(num_block), dim3(num_thread_per_block), 0, 0, device_vpl, d_graph, device_tcount);
cudaErrCheck( hipMemcpy(&tcount, device_tcount, sizeof(unsigned), hipMemcpyDeviceToHost) );
tcount /= 3;
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&kernel_time, start_event, stop_event);
hipEventRecord(start_event, 0);
cudaErrCheck( hipMemcpy(vproplist, device_vpl, vertex_cnt*sizeof(uint32_t),
hipMemcpyDeviceToHost) );
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&d2h_copy_time, start_event, stop_event);
#ifndef ENABLE_VERIFY
printf("%f,", h2d_copy_time / 1000);
printf("%f,", d2h_copy_time / 1000);
printf("%f,", kernel_time / 1000);
printf("%f,", (h2d_copy_time + d2h_copy_time + kernel_time) / 1000);
// printf("== host->device copy time: %f ms\n", h2d_copy_time);
// printf("== device->host copy time: %f ms\n", d2h_copy_time);
// printf("== kernel time: %f ms\n", kernel_time);
#endif
hipEventDestroy(start_event);
hipEventDestroy(stop_event);
// free graph struct on device side
d_graph.cudaGraphFree();
cudaErrCheck( hipFree(device_vpl) );
cudaErrCheck( hipFree(device_tcount) );
return tcount;
}
| ecfc07e2d567026a5083f515d72384064aabafe3.cu | //=================================================================//
// CUDA BFS kernel
// Topological-Driven: one node per thread, thread_centric,
// use atomicAdd instruction
//
// Reference:
// T. Schank. Algorithmic Aspects of Triangle-Based Network Analysis
//=================================================================//
#include <cuda.h>
#include <stdint.h>
#include <stdio.h>
#include "cudaGraph.h"
#define STACK_SZ 2048
#define SEED 123
__device__ void quicksort(uint64_t array[], unsigned len)
{
uint64_t left = 0, stack[STACK_SZ], pos = 0, seed = SEED;
for ( ; ; ) /* outer loop */
{
for (; left+1 < len; len++) /* sort left to len-1 */
{
if (pos == STACK_SZ) len = stack[pos = 0]; /* stack overflow, reset */
uint64_t pivot = array[left+seed%(len-left)]; /* pick random pivot */
seed = seed*69069+1; /* next pseudorandom number */
stack[pos++] = len; /* sort right part later */
for (unsigned right = left-1; ; ) /* inner loop: partitioning */
{
while (array[++right] < pivot); /* look for greater element */
while (pivot < array[--len]); /* look for smaller element */
if (right >= len) break; /* partition point found? */
uint64_t temp = array[right];
array[right] = array[len]; /* the only swap */
array[len] = temp;
} /* partitioned, continue left part */
}
if (pos == 0) break; /* stack empty? */
left = len; /* left to right is sorted */
len = stack[--pos]; /* get next range to sort */
}
}
__device__ unsigned get_intersect_cnt(uint64_t* setA, unsigned sizeA, uint64_t* setB, unsigned sizeB)
{
unsigned ret=0;
unsigned iter1=0, iter2=0;
while (iter1<sizeA && iter2<sizeB)
{
if (setA[iter1] < setB[iter2])
iter1++;
else if (setA[iter1] > setB[iter2])
iter2++;
else
{
ret++;
iter1++;
iter2++;
}
}
return ret;
}
__global__ void initialize(uint32_t * d_graph_property, uint64_t num_vertex)
{
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if ( tid < num_vertex )
{
d_graph_property[tid] = 0;
}
}
// sort the outgoing edges accoring to their ID order
__global__
void kernel_step1(uint32_t * vplist, cudaGraph graph)
{
uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= graph.vertex_cnt) return;
uint64_t start;
start = graph.get_firstedge_index(tid);
quicksort(graph.get_edge_ptr(start), graph.get_vertex_degree(tid));
}
// get neighbour set intersections of the vertices with edge links
__global__
void kernel_step2(uint32_t * vplist, cudaGraph graph)
{
uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= graph.vertex_cnt) return;
uint64_t start, end;
start = graph.get_firstedge_index(tid);
end = start + graph.get_vertex_degree(tid);
for (uint64_t i=start; i<end; i++)
{
uint64_t dest = graph.get_edge_dest(i);
if (tid > dest) continue; // skip reverse edges
uint64_t dest_start = graph.get_firstedge_index(dest);
unsigned cnt = get_intersect_cnt(graph.get_edge_ptr(start),
graph.get_vertex_degree(tid), graph.get_edge_ptr(dest_start),
graph.get_vertex_degree(dest));
atomicAdd(&(vplist[tid]), cnt);
atomicAdd(&(vplist[dest]), cnt);
}
}
// reduction to get the total count
__global__
void kernel_step3(uint32_t * vplist, cudaGraph graph, unsigned * d_tcount)
{
uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= graph.vertex_cnt) return;
vplist[tid] = vplist[tid] / 2;
atomicAdd(d_tcount, vplist[tid]);
}
unsigned cuda_triangle_count(uint64_t * vertexlist,
uint64_t * edgelist, uint32_t * vproplist,
uint64_t vertex_cnt, uint64_t edge_cnt)
{
unsigned tcount = 0;
uint32_t * device_vpl = 0;
unsigned * device_tcount = 0;
float h2d_copy_time = 0; // host to device data transfer time
float d2h_copy_time = 0; // device to host data transfer time
float kernel_time = 0; // kernel execution time
int device;
cudaGetDevice(&device);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp,device);
// Try to use as many threads as possible so that each thread
// is processing one vertex. If max thread is reached,
// split them into multiple blocks.
unsigned int num_thread_per_block = (unsigned int) vertex_cnt;
if (num_thread_per_block > devProp.maxThreadsPerBlock)
num_thread_per_block = devProp.maxThreadsPerBlock;
unsigned int num_block = (unsigned int)ceil( vertex_cnt/(double)num_thread_per_block );
// malloc of gpu side
cudaErrCheck( cudaMalloc((void**)&device_vpl, vertex_cnt*sizeof(uint32_t)) );
cudaErrCheck( cudaMalloc((void**)&device_tcount, sizeof(unsigned)) );
cudaEvent_t start_event, stop_event;
cudaErrCheck( cudaEventCreate(&start_event) );
cudaErrCheck( cudaEventCreate(&stop_event) );
// initialization
initialize<<<num_block, num_thread_per_block>>>(device_vpl, vertex_cnt);
// prepare graph struct
// one for host side, one for device side
cudaGraph h_graph, d_graph;
// here copy only the pointers
h_graph.read(vertexlist, edgelist, vertex_cnt, edge_cnt);
// memcpy from host to device
cudaEventRecord(start_event, 0);
// copy graph data to device
h_graph.cudaGraphCopy(&d_graph);
unsigned zeronum = 0;
cudaErrCheck( cudaMemcpy(device_tcount, &zeronum, sizeof(unsigned),
cudaMemcpyHostToDevice) );
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&h2d_copy_time, start_event, stop_event);
cudaEventRecord(start_event, 0);
kernel_step1<<<num_block, num_thread_per_block>>>(device_vpl, d_graph);
kernel_step2<<<num_block, num_thread_per_block>>>(device_vpl, d_graph);
kernel_step3<<<num_block, num_thread_per_block>>>(device_vpl, d_graph, device_tcount);
cudaErrCheck( cudaMemcpy(&tcount, device_tcount, sizeof(unsigned), cudaMemcpyDeviceToHost) );
tcount /= 3;
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&kernel_time, start_event, stop_event);
cudaEventRecord(start_event, 0);
cudaErrCheck( cudaMemcpy(vproplist, device_vpl, vertex_cnt*sizeof(uint32_t),
cudaMemcpyDeviceToHost) );
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&d2h_copy_time, start_event, stop_event);
#ifndef ENABLE_VERIFY
printf("%f,", h2d_copy_time / 1000);
printf("%f,", d2h_copy_time / 1000);
printf("%f,", kernel_time / 1000);
printf("%f,", (h2d_copy_time + d2h_copy_time + kernel_time) / 1000);
// printf("== host->device copy time: %f ms\n", h2d_copy_time);
// printf("== device->host copy time: %f ms\n", d2h_copy_time);
// printf("== kernel time: %f ms\n", kernel_time);
#endif
cudaEventDestroy(start_event);
cudaEventDestroy(stop_event);
// free graph struct on device side
d_graph.cudaGraphFree();
cudaErrCheck( cudaFree(device_vpl) );
cudaErrCheck( cudaFree(device_tcount) );
return tcount;
}
|
5c6fdcc7696b1dd4f83864ce2d4f5f3ced402c07.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Student: Catalin Consantin Usurelu
// Grupa: 333CA
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <helper_cuda.h>
#include <helper_timer.h>
#include <helper_functions.h>
#include <helper_math.h>
// includes, project
#include "2Dconvolution.h"
#define OFFSET KERNEL_SIZE / 2
#define ALLIGN_MID KERNEL_SIZE / 2
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(int width, int height);
Matrix AllocateMatrix(int width, int height);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void ConvolutionOnDevice(const Matrix M, const Matrix N, Matrix P);
void ConvolutionOnDeviceShared(const Matrix M, const Matrix N, Matrix P);
// Get a matrix element
__device__ float GetElement(const Matrix A, int row, int col)
{
return A.elements[row * A.pitch + col];
}
// Set a matrix element
__device__ void SetElement(Matrix A, int row, int col, float value)
{
A.elements[row * A.pitch + col] = value;
}
////////////////////////////////////////////////////////////////////////////////
// nmulirea fr memorie partajat
////////////////////////////////////////////////////////////////////////////////
__global__ void ConvolutionKernel(Matrix M, Matrix N, Matrix P)
{
// Each thread computes one element of P
// by accumulating results into Pvalue
float Pvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Outside of range
if(row >= N.height || col >= N.width)
{
return;
}
// check the start and end values of m and n to prevent overrunning the
// matrix edges
unsigned int mbegin = (row < 2)? 2 - row : 0;
unsigned int mend = (row > (N.height - 3))?
N.height - row + 2 : 5;
unsigned int nbegin = (col < 2)? 2 - col : 0;
unsigned int nend = (col > (N.width - 3))?
(N.width - col) + 2 : 5;
// overlay A over B centered at element (i,j). For each
// overlapping element, multiply the two and accumulate
for(unsigned int m = mbegin; m < mend; m++)
{
for(unsigned int n = nbegin; n < nend; n++)
{
Pvalue += M.elements[m * 5 + n] *
N.elements[N.width * (row + m - 2) + (col + n - 2)];
}
}
// store the result
P.elements[row * N.width + col] = (float)Pvalue;
}
////////////////////////////////////////////////////////////////////////////////
// nmulirea cu memorie partajat
////////////////////////////////////////////////////////////////////////////////
__global__ void ConvolutionKernelShared(Matrix M, Matrix N, Matrix P)
{
float Pvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float Ms[KERNEL_SIZE * KERNEL_SIZE];
__shared__ float Ns[BLOCK_SIZE + KERNEL_SIZE - 1][BLOCK_SIZE + KERNEL_SIZE - 1];
// Primul bloc de 5X5 threaduri copiaza matricea shared
if(threadIdx.x < 5 && threadIdx.y < 5)
Ms[threadIdx.y * KERNEL_SIZE + threadIdx.x] = M.elements[threadIdx.y * KERNEL_SIZE + threadIdx.x];
__syncthreads();
// ns_row, ns_col sunt defapt niste indici care repezinta blocul de threaduri
// relativ la coltul stanga sus al lui Ns (matricea N shared)
// matricea este centrata in mijloc (adica decalam cu 2) - practic
// la inceput se suprapune cu blocul pentru care calculam valori
int ns_row = threadIdx.y + ALLIGN_MID;
int ns_col = threadIdx.x + ALLIGN_MID;
// Explicatii cod in Readme
// Aici doar folosim "blocul de threaduri" (sau parti ale lui) pentru a
// copia parti din N in Ns
if(row - OFFSET >= 0 && col - OFFSET >= 0)
Ns[ns_row - OFFSET][ns_col - OFFSET] = GetElement(N, row - OFFSET, col - OFFSET);
if(threadIdx.y >= BLOCK_SIZE - 4)
{
if(row + OFFSET < N.height && col - OFFSET >= 0)
Ns[ns_row + OFFSET][ns_col - OFFSET] = GetElement(N, row + OFFSET, col - OFFSET);
if(threadIdx.x >= BLOCK_SIZE - 4)
{
if(row + OFFSET < N.height && col + OFFSET < N.width)
Ns[ns_row + OFFSET][ns_col + OFFSET] = GetElement(N, row + OFFSET, col + OFFSET);
}
}
if(threadIdx.x >= BLOCK_SIZE - 4)
if(row - OFFSET >= 0 && col + OFFSET < N.width)
Ns[ns_row - OFFSET][ns_col + OFFSET] = GetElement(N, row - OFFSET, col + OFFSET);
// Aveam nevoie de toate thread-urile pentru partea de mai sus (mai usor de implementat)
if(row >= N.height || col >= N.width)
{
return;
}
// Asteptam sa se termine copierea in Ns
__syncthreads();
// Ne intereseaza doar "mijlocul lui Ns", adica fara margini
// => incepem cu un offset de 2 fata de coltul stanga sus
// Restul codului este ca la non-shared sau varianta seriala
// doar ca accesam matricile shared
int NsRow = threadIdx.y + KERNEL_SIZE / 2;
int NsCol = threadIdx.x + KERNEL_SIZE / 2;
// check the start and end values of m and n to prevent overrunning the
// matrix edges
unsigned int mbegin = (row < 2)? 2 - row : 0;
unsigned int mend = (row > (N.height - 3))?
N.height - row + 2 : 5;
unsigned int nbegin = (col < 2)? 2 - col : 0;
unsigned int nend = (col > (N.width - 3))?
(N.width - col) + 2 : 5;
// overlay A over B centered at element (i,j). For each
// overlapping element, multiply the two and accumulate
for(unsigned int m = mbegin; m < mend; m++)
{
for(unsigned int n = nbegin; n < nend; n++)
{
Pvalue += Ms[m * 5 + n] *
Ns[NsRow + m - 2][NsCol + n - 2];
}
}
// store the result
P.elements[row * N.width + col] = (float)Pvalue;
}
////////////////////////////////////////////////////////////////////////////////
// Returneaz 1 dac matricele sunt ~ egale
////////////////////////////////////////////////////////////////////////////////
int CompareMatrices(Matrix A, Matrix B)
{
int i;
if(A.width != B.width || A.height != B.height || A.pitch != B.pitch)
return 0;
int size = A.width * A.height;
for(i = 0; i < size; i++)
if(fabs(A.elements[i] - B.elements[i]) > MAX_ERR)
return 0;
return 1;
}
void GenerateRandomMatrix(Matrix m)
{
int i;
int size = m.width * m.height;
srand(time(NULL));
for(i = 0; i < size; i++)
m.elements[i] = rand() / (float)RAND_MAX;
}
////////////////////////////////////////////////////////////////////////////////
// main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
int width = 0, height = 0;
FILE *f, *out;
if(argc < 2)
{
printf("Argumente prea puine, trimitei id-ul testului care trebuie rulat\n");
return 0;
}
char name[100];
sprintf(name, "./tests/test_%s.txt", argv[1]);
f = fopen(name, "r");
out = fopen("out.txt", "a");
fscanf(f, "%d%d", &width, &height);
Matrix M;//kernel de pe host
Matrix N;//matrice iniial de pe host
Matrix P;//rezultat fr memorie partajat calculat pe GPU
Matrix PS;//rezultatul cu memorie partajat calculat pe GPU
M = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE);
N = AllocateMatrix(width, height);
P = AllocateMatrix(width, height);
PS = AllocateMatrix(width, height);
GenerateRandomMatrix(M);
GenerateRandomMatrix(N);
printf("Test for matrix size %dX%d = %d\n", height, width, height * width);
// M * N pe device
ConvolutionOnDevice(M, N, P);
// M * N pe device cu memorie partajat
ConvolutionOnDeviceShared(M, N, PS);
//pentru msurarea timpului de execuie pe CPU
StopWatchInterface *kernelTime = NULL;
sdkCreateTimer(&kernelTime);
sdkResetTimer(&kernelTime);
// calculeaz rezultatul pe CPU pentru comparaie
Matrix reference = AllocateMatrix(P.width, P.height);
sdkStartTimer(&kernelTime);
computeGold(reference.elements, M.elements, N.elements, N.height, N.width);
sdkStopTimer(&kernelTime);
printf ("Timp execuie CPU: %f ms\n", sdkGetTimerValue(&kernelTime));
// verific dac rezultatul obinut pe device este cel ateptat
int res = CompareMatrices(reference, P);
printf("Test global %s\n", (1 == res) ? "PASSED" : "FAILED");
fprintf(out, "Test global %s %s\n", argv[1], (1 == res) ? "PASSED" : "FAILED");
// verific dac rezultatul obinut pe device cu memorie partajat este cel ateptat
// int ress = CompareMatrices(reference, PS);
int ress = CompareMatrices(reference, PS);
printf("Test shared %s\n", (1 == ress) ? "PASSED" : "FAILED");
fprintf(out, "Test shared %s %s\n", argv[1], (1 == ress) ? "PASSED" : "FAILED");
printf("\n");
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
FreeMatrix(&PS);
fclose(f);
fclose(out);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void ConvolutionOnDevice(const Matrix M, const Matrix N, Matrix P)
{
Matrix Md, Nd, Pd; //matricele corespunztoare de pe device
size_t size;
//pentru msurarea timpului de execuie n kernel
StopWatchInterface *kernelTime = NULL;
sdkCreateTimer(&kernelTime);
sdkResetTimer(&kernelTime);
Md = AllocateDeviceMatrix(M.width, M.height);
Nd = AllocateDeviceMatrix(N.width, N.height);
Pd = AllocateDeviceMatrix(P.width, P.height);
//matrice kernel
size = M.width * M.height * sizeof(float);
hipMemcpy(Md.elements, M.elements, size, hipMemcpyHostToDevice);
//matrice iniial de pe host
size = N.width * N.height * sizeof(float);
hipMemcpy( Nd.elements, N.elements, size, hipMemcpyHostToDevice);
// dimGrid: daca nu se imparte perfect o dimensiune, facem ceil() pe rezultat
// ca sa acoperim toate cazurile
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((N.width + dimBlock.x - 1) / dimBlock.x, (N.height + dimBlock.y - 1) / dimBlock.y);
sdkStartTimer(&kernelTime);
hipLaunchKernelGGL(( ConvolutionKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Md, Nd, Pd);
hipDeviceSynchronize();
sdkStopTimer(&kernelTime);
printf ("Timp execuie kernel: %f ms\n", sdkGetTimerValue(&kernelTime));
size = P.width * P.height * sizeof(float);
hipMemcpy(P.elements, Pd.elements, size, hipMemcpyDeviceToHost);
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
}
void ConvolutionOnDeviceShared(const Matrix M, const Matrix N, Matrix P)
{
Matrix Md, Nd, Pd; //matricele corespunztoare de pe device
size_t size;
//pentru msurarea timpului de execuie n kernel
StopWatchInterface *kernelTime = NULL;
sdkCreateTimer(&kernelTime);
sdkResetTimer(&kernelTime);
Md = AllocateDeviceMatrix(M.width, M.height);
Nd = AllocateDeviceMatrix(N.width, N.height);
Pd = AllocateDeviceMatrix(P.width, P.height);
//matrice kernel
size = M.width * M.height * sizeof(float);
hipMemcpy(Md.elements, M.elements, size, hipMemcpyHostToDevice);
//matrice iniial de pe host
size = N.width * N.height * sizeof(float);
hipMemcpy( Nd.elements, N.elements, size, hipMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((N.width + dimBlock.x - 1) / dimBlock.x, (N.height + dimBlock.y - 1) / dimBlock.y);
sdkStartTimer(&kernelTime);
hipLaunchKernelGGL(( ConvolutionKernelShared), dim3(dimGrid), dim3(dimBlock), 0, 0, Md, Nd, Pd);
hipDeviceSynchronize();
sdkStopTimer(&kernelTime);
printf ("Timp execuie kernel cu memorie partajat: %f ms\n", sdkGetTimerValue(&kernelTime));
size = P.width * P.height * sizeof(float);
hipMemcpy(P.elements, Pd.elements, size, hipMemcpyDeviceToHost);
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
}
// Aloc o matrice de dimensiune height*width pe device
Matrix AllocateDeviceMatrix(int width, int height)
{
Matrix m;
m.width = width;
m.height = height;
m.pitch = width;
size_t size = m.width * m.height * sizeof(float);
hipMalloc( (void**) &(m.elements), size);
return m;
}
// Aloc matrice pe host de dimensiune height*width
Matrix AllocateMatrix(int width, int height)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = (float*) malloc(size*sizeof(float));
return M;
}
// Elibereaz o matrice de pe device
void FreeDeviceMatrix(Matrix* M)
{
hipFree(M->elements);
M->elements = NULL;
}
// Elibereaz o matrice de pe host
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
| 5c6fdcc7696b1dd4f83864ce2d4f5f3ced402c07.cu | // Student: Catalin Consantin Usurelu
// Grupa: 333CA
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <helper_cuda.h>
#include <helper_timer.h>
#include <helper_functions.h>
#include <helper_math.h>
// includes, project
#include "2Dconvolution.h"
#define OFFSET KERNEL_SIZE / 2
#define ALLIGN_MID KERNEL_SIZE / 2
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(int width, int height);
Matrix AllocateMatrix(int width, int height);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void ConvolutionOnDevice(const Matrix M, const Matrix N, Matrix P);
void ConvolutionOnDeviceShared(const Matrix M, const Matrix N, Matrix P);
// Get a matrix element
__device__ float GetElement(const Matrix A, int row, int col)
{
return A.elements[row * A.pitch + col];
}
// Set a matrix element
__device__ void SetElement(Matrix A, int row, int col, float value)
{
A.elements[row * A.pitch + col] = value;
}
////////////////////////////////////////////////////////////////////////////////
// Înmulțirea fără memorie partajată
////////////////////////////////////////////////////////////////////////////////
__global__ void ConvolutionKernel(Matrix M, Matrix N, Matrix P)
{
// Each thread computes one element of P
// by accumulating results into Pvalue
float Pvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Outside of range
if(row >= N.height || col >= N.width)
{
return;
}
// check the start and end values of m and n to prevent overrunning the
// matrix edges
unsigned int mbegin = (row < 2)? 2 - row : 0;
unsigned int mend = (row > (N.height - 3))?
N.height - row + 2 : 5;
unsigned int nbegin = (col < 2)? 2 - col : 0;
unsigned int nend = (col > (N.width - 3))?
(N.width - col) + 2 : 5;
// overlay A over B centered at element (i,j). For each
// overlapping element, multiply the two and accumulate
for(unsigned int m = mbegin; m < mend; m++)
{
for(unsigned int n = nbegin; n < nend; n++)
{
Pvalue += M.elements[m * 5 + n] *
N.elements[N.width * (row + m - 2) + (col + n - 2)];
}
}
// store the result
P.elements[row * N.width + col] = (float)Pvalue;
}
////////////////////////////////////////////////////////////////////////////////
// Înmulțirea cu memorie partajată
////////////////////////////////////////////////////////////////////////////////
__global__ void ConvolutionKernelShared(Matrix M, Matrix N, Matrix P)
{
float Pvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float Ms[KERNEL_SIZE * KERNEL_SIZE];
__shared__ float Ns[BLOCK_SIZE + KERNEL_SIZE - 1][BLOCK_SIZE + KERNEL_SIZE - 1];
// Primul bloc de 5X5 threaduri copiaza matricea shared
if(threadIdx.x < 5 && threadIdx.y < 5)
Ms[threadIdx.y * KERNEL_SIZE + threadIdx.x] = M.elements[threadIdx.y * KERNEL_SIZE + threadIdx.x];
__syncthreads();
// ns_row, ns_col sunt defapt niste indici care repezinta blocul de threaduri
// relativ la coltul stanga sus al lui Ns (matricea N shared)
// matricea este centrata in mijloc (adica decalam cu 2) - practic
// la inceput se suprapune cu blocul pentru care calculam valori
int ns_row = threadIdx.y + ALLIGN_MID;
int ns_col = threadIdx.x + ALLIGN_MID;
// Explicatii cod in Readme
// Aici doar folosim "blocul de threaduri" (sau parti ale lui) pentru a
// copia parti din N in Ns
if(row - OFFSET >= 0 && col - OFFSET >= 0)
Ns[ns_row - OFFSET][ns_col - OFFSET] = GetElement(N, row - OFFSET, col - OFFSET);
if(threadIdx.y >= BLOCK_SIZE - 4)
{
if(row + OFFSET < N.height && col - OFFSET >= 0)
Ns[ns_row + OFFSET][ns_col - OFFSET] = GetElement(N, row + OFFSET, col - OFFSET);
if(threadIdx.x >= BLOCK_SIZE - 4)
{
if(row + OFFSET < N.height && col + OFFSET < N.width)
Ns[ns_row + OFFSET][ns_col + OFFSET] = GetElement(N, row + OFFSET, col + OFFSET);
}
}
if(threadIdx.x >= BLOCK_SIZE - 4)
if(row - OFFSET >= 0 && col + OFFSET < N.width)
Ns[ns_row - OFFSET][ns_col + OFFSET] = GetElement(N, row - OFFSET, col + OFFSET);
// Aveam nevoie de toate thread-urile pentru partea de mai sus (mai usor de implementat)
if(row >= N.height || col >= N.width)
{
return;
}
// Asteptam sa se termine copierea in Ns
__syncthreads();
// Ne intereseaza doar "mijlocul lui Ns", adica fara margini
// => incepem cu un offset de 2 fata de coltul stanga sus
// Restul codului este ca la non-shared sau varianta seriala
// doar ca accesam matricile shared
int NsRow = threadIdx.y + KERNEL_SIZE / 2;
int NsCol = threadIdx.x + KERNEL_SIZE / 2;
// check the start and end values of m and n to prevent overrunning the
// matrix edges
unsigned int mbegin = (row < 2)? 2 - row : 0;
unsigned int mend = (row > (N.height - 3))?
N.height - row + 2 : 5;
unsigned int nbegin = (col < 2)? 2 - col : 0;
unsigned int nend = (col > (N.width - 3))?
(N.width - col) + 2 : 5;
// overlay A over B centered at element (i,j). For each
// overlapping element, multiply the two and accumulate
for(unsigned int m = mbegin; m < mend; m++)
{
for(unsigned int n = nbegin; n < nend; n++)
{
Pvalue += Ms[m * 5 + n] *
Ns[NsRow + m - 2][NsCol + n - 2];
}
}
// store the result
P.elements[row * N.width + col] = (float)Pvalue;
}
////////////////////////////////////////////////////////////////////////////////
// Returnează 1 dacă matricele sunt ~ egale
////////////////////////////////////////////////////////////////////////////////
int CompareMatrices(Matrix A, Matrix B)
{
int i;
if(A.width != B.width || A.height != B.height || A.pitch != B.pitch)
return 0;
int size = A.width * A.height;
for(i = 0; i < size; i++)
if(fabs(A.elements[i] - B.elements[i]) > MAX_ERR)
return 0;
return 1;
}
void GenerateRandomMatrix(Matrix m)
{
int i;
int size = m.width * m.height;
srand(time(NULL));
for(i = 0; i < size; i++)
m.elements[i] = rand() / (float)RAND_MAX;
}
////////////////////////////////////////////////////////////////////////////////
// main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
int width = 0, height = 0;
FILE *f, *out;
if(argc < 2)
{
printf("Argumente prea puține, trimiteți id-ul testului care trebuie rulat\n");
return 0;
}
char name[100];
sprintf(name, "./tests/test_%s.txt", argv[1]);
f = fopen(name, "r");
out = fopen("out.txt", "a");
fscanf(f, "%d%d", &width, &height);
Matrix M;//kernel de pe host
Matrix N;//matrice inițială de pe host
Matrix P;//rezultat fără memorie partajată calculat pe GPU
Matrix PS;//rezultatul cu memorie partajată calculat pe GPU
M = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE);
N = AllocateMatrix(width, height);
P = AllocateMatrix(width, height);
PS = AllocateMatrix(width, height);
GenerateRandomMatrix(M);
GenerateRandomMatrix(N);
printf("Test for matrix size %dX%d = %d\n", height, width, height * width);
// M * N pe device
ConvolutionOnDevice(M, N, P);
// M * N pe device cu memorie partajată
ConvolutionOnDeviceShared(M, N, PS);
//pentru măsurarea timpului de execuție pe CPU
StopWatchInterface *kernelTime = NULL;
sdkCreateTimer(&kernelTime);
sdkResetTimer(&kernelTime);
// calculează rezultatul pe CPU pentru comparație
Matrix reference = AllocateMatrix(P.width, P.height);
sdkStartTimer(&kernelTime);
computeGold(reference.elements, M.elements, N.elements, N.height, N.width);
sdkStopTimer(&kernelTime);
printf ("Timp execuție CPU: %f ms\n", sdkGetTimerValue(&kernelTime));
// verifică dacă rezultatul obținut pe device este cel așteptat
int res = CompareMatrices(reference, P);
printf("Test global %s\n", (1 == res) ? "PASSED" : "FAILED");
fprintf(out, "Test global %s %s\n", argv[1], (1 == res) ? "PASSED" : "FAILED");
// verifică dacă rezultatul obținut pe device cu memorie partajată este cel așteptat
// int ress = CompareMatrices(reference, PS);
int ress = CompareMatrices(reference, PS);
printf("Test shared %s\n", (1 == ress) ? "PASSED" : "FAILED");
fprintf(out, "Test shared %s %s\n", argv[1], (1 == ress) ? "PASSED" : "FAILED");
printf("\n");
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
FreeMatrix(&PS);
fclose(f);
fclose(out);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void ConvolutionOnDevice(const Matrix M, const Matrix N, Matrix P)
{
Matrix Md, Nd, Pd; //matricele corespunzătoare de pe device
size_t size;
//pentru măsurarea timpului de execuție în kernel
StopWatchInterface *kernelTime = NULL;
sdkCreateTimer(&kernelTime);
sdkResetTimer(&kernelTime);
Md = AllocateDeviceMatrix(M.width, M.height);
Nd = AllocateDeviceMatrix(N.width, N.height);
Pd = AllocateDeviceMatrix(P.width, P.height);
//matrice kernel
size = M.width * M.height * sizeof(float);
cudaMemcpy(Md.elements, M.elements, size, cudaMemcpyHostToDevice);
//matrice inițială de pe host
size = N.width * N.height * sizeof(float);
cudaMemcpy( Nd.elements, N.elements, size, cudaMemcpyHostToDevice);
// dimGrid: daca nu se imparte perfect o dimensiune, facem ceil() pe rezultat
// ca sa acoperim toate cazurile
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((N.width + dimBlock.x - 1) / dimBlock.x, (N.height + dimBlock.y - 1) / dimBlock.y);
sdkStartTimer(&kernelTime);
ConvolutionKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd);
cudaThreadSynchronize();
sdkStopTimer(&kernelTime);
printf ("Timp execuție kernel: %f ms\n", sdkGetTimerValue(&kernelTime));
size = P.width * P.height * sizeof(float);
cudaMemcpy(P.elements, Pd.elements, size, cudaMemcpyDeviceToHost);
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
}
void ConvolutionOnDeviceShared(const Matrix M, const Matrix N, Matrix P)
{
Matrix Md, Nd, Pd; //matricele corespunzătoare de pe device
size_t size;
//pentru măsurarea timpului de execuție în kernel
StopWatchInterface *kernelTime = NULL;
sdkCreateTimer(&kernelTime);
sdkResetTimer(&kernelTime);
Md = AllocateDeviceMatrix(M.width, M.height);
Nd = AllocateDeviceMatrix(N.width, N.height);
Pd = AllocateDeviceMatrix(P.width, P.height);
//matrice kernel
size = M.width * M.height * sizeof(float);
cudaMemcpy(Md.elements, M.elements, size, cudaMemcpyHostToDevice);
//matrice inițială de pe host
size = N.width * N.height * sizeof(float);
cudaMemcpy( Nd.elements, N.elements, size, cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((N.width + dimBlock.x - 1) / dimBlock.x, (N.height + dimBlock.y - 1) / dimBlock.y);
sdkStartTimer(&kernelTime);
ConvolutionKernelShared<<<dimGrid, dimBlock>>>(Md, Nd, Pd);
cudaThreadSynchronize();
sdkStopTimer(&kernelTime);
printf ("Timp execuție kernel cu memorie partajată: %f ms\n", sdkGetTimerValue(&kernelTime));
size = P.width * P.height * sizeof(float);
cudaMemcpy(P.elements, Pd.elements, size, cudaMemcpyDeviceToHost);
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
}
// Alocă o matrice de dimensiune height*width pe device
Matrix AllocateDeviceMatrix(int width, int height)
{
Matrix m;
m.width = width;
m.height = height;
m.pitch = width;
size_t size = m.width * m.height * sizeof(float);
cudaMalloc( (void**) &(m.elements), size);
return m;
}
// Alocă matrice pe host de dimensiune height*width
Matrix AllocateMatrix(int width, int height)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = (float*) malloc(size*sizeof(float));
return M;
}
// Eliberează o matrice de pe device
void FreeDeviceMatrix(Matrix* M)
{
cudaFree(M->elements);
M->elements = NULL;
}
// Eliberează o matrice de pe host
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
|
5bd11317328aca4993f169dd0fcc0edc503513d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "matrixUtility.h"
#include <iostream>
EXPORT void testPrintMatrix() {
std::cout << " testing matrix operations" << std::endl;
}
//considers the matrix is in GPU
EXPORT LweSample_16* matrixToVector(LweSample_16 ***matrix, int row, int col, int bitSize,
const TFheGateBootstrappingCloudKeySet *bk){
std::cout << "testing matrix to vector" << std::endl;
const int n = bk->bk->in_out_params->n;
int vLength = row * col;
int totalBitSize = vLength * bitSize;
LweSample_16 *vector = convertBitToNumberZero_GPU(totalBitSize, bk);
for (int i = 0; i < row; ++i) {
for (int j = 0; j < col; ++j) {
int index = i * col + j;
int sI = index * bitSize;
hipMemcpy(vector->a + sI * n, matrix[i][j]->a, bitSize * n * sizeof(int), hipMemcpyDeviceToDevice);
memcpy(vector->b + sI, matrix[i][j]->b, bitSize * sizeof(int));
memcpy(vector->current_variance + sI, matrix[i][j]->current_variance, bitSize * sizeof(double));
}
}
return vector;
}
//considers the matrix is in GPU
EXPORT LweSample_16*** vectorToMatrix(LweSample_16 *vector, int row, int col, int bitSize,
const TFheGateBootstrappingCloudKeySet *bk){
std::cout << "testing vector to matrix" << std::endl;
const int n = bk->bk->in_out_params->n;
LweSample_16 ***matrix = new LweSample_16**[row];
for (int i = 0; i < row; ++i) {
matrix[i] = new LweSample_16*[col];
for (int j = 0; j < col; ++j) {
matrix[i][j] = convertBitToNumberZero_GPU(bitSize, bk);
int index = i * col + j;
int sI = index * bitSize;
hipMemcpy(matrix[i][j]->a, vector->a + sI * n, bitSize * n * sizeof(int), hipMemcpyDeviceToDevice);
memcpy(matrix[i][j]->b, vector->b + sI, bitSize * sizeof(int));
memcpy(matrix[i][j]->current_variance, vector->current_variance + sI, bitSize * sizeof(double));
}
}
return matrix;
}
//port the matrix to GPU
EXPORT LweSample_16*** matrixToDevice(LweSample_16 ***h_matrix, int row, int col, int bitSize,
const TFheGateBootstrappingCloudKeySet *bk) {
std::cout << "taking matrix to device" << std::endl;
const int n = bk->bk->in_out_params->n;
LweSample_16 ***d_matrix = new LweSample_16**[row];
for (int i = 0; i < row; ++i) {
d_matrix[i] = new LweSample_16*[col];
for (int j = 0; j < col; ++j) {
d_matrix[i][j] = convertBitToNumberZero_GPU(bitSize, bk);
hipMemcpy(d_matrix[i][j]->a, h_matrix[i][j]->a, bitSize * n * sizeof(int), hipMemcpyHostToDevice);
memcpy(d_matrix[i][j]->b, h_matrix[i][j]->b, bitSize * sizeof(int));
memcpy(d_matrix[i][j]->current_variance, h_matrix[i][j]->current_variance, bitSize * sizeof(double));
}
}
return d_matrix;
}
//prepare left matrix to multiply and create LweSample array (vector)
EXPORT LweSample_16** matMul_prepareLeftMat(LweSample_16 ***matrix, int row, int col, int nDuplication, int bitSize,
const TFheGateBootstrappingCloudKeySet *bk) {
std::cout << "Preparing left matrix to multiply" << std::endl;
int vLen = row * col * nDuplication;
LweSample_16 **vector = new LweSample_16*[vLen];
for (int i = 0; i < row; ++i) {
for (int k = 0; k < nDuplication; ++k) {
for (int j = 0; j < col; ++j) {
int index = i * nDuplication * col + k * col + j;
vector[index] = matrix[i][j];
}
}
}
return vector;
}
//prepare right matrix to multiply and create LweSample array (vector)
EXPORT LweSample_16** matMul_prepareRightMat(LweSample_16 ***matrix, int row, int col, int nDuplication, int bitSize,
const TFheGateBootstrappingCloudKeySet *bk) {
std::cout << "Preparing right matrix to multiply" << std::endl;
int vLen = row * col * nDuplication;
LweSample_16 **vector = new LweSample_16*[vLen];
for (int k = 0; k < nDuplication; ++k) {
for (int i = 0; i < col; ++i) {
for (int j = 0; j < row; ++j) {
int index = k * col * row + i * row + j;
vector[index] = matrix[j][i];
}
}
}
return vector;
}
| 5bd11317328aca4993f169dd0fcc0edc503513d5.cu | #include "matrixUtility.h"
#include <iostream>
EXPORT void testPrintMatrix() {
std::cout << " testing matrix operations" << std::endl;
}
//considers the matrix is in GPU
EXPORT LweSample_16* matrixToVector(LweSample_16 ***matrix, int row, int col, int bitSize,
const TFheGateBootstrappingCloudKeySet *bk){
std::cout << "testing matrix to vector" << std::endl;
const int n = bk->bk->in_out_params->n;
int vLength = row * col;
int totalBitSize = vLength * bitSize;
LweSample_16 *vector = convertBitToNumberZero_GPU(totalBitSize, bk);
for (int i = 0; i < row; ++i) {
for (int j = 0; j < col; ++j) {
int index = i * col + j;
int sI = index * bitSize;
cudaMemcpy(vector->a + sI * n, matrix[i][j]->a, bitSize * n * sizeof(int), cudaMemcpyDeviceToDevice);
memcpy(vector->b + sI, matrix[i][j]->b, bitSize * sizeof(int));
memcpy(vector->current_variance + sI, matrix[i][j]->current_variance, bitSize * sizeof(double));
}
}
return vector;
}
//considers the matrix is in GPU
EXPORT LweSample_16*** vectorToMatrix(LweSample_16 *vector, int row, int col, int bitSize,
const TFheGateBootstrappingCloudKeySet *bk){
std::cout << "testing vector to matrix" << std::endl;
const int n = bk->bk->in_out_params->n;
LweSample_16 ***matrix = new LweSample_16**[row];
for (int i = 0; i < row; ++i) {
matrix[i] = new LweSample_16*[col];
for (int j = 0; j < col; ++j) {
matrix[i][j] = convertBitToNumberZero_GPU(bitSize, bk);
int index = i * col + j;
int sI = index * bitSize;
cudaMemcpy(matrix[i][j]->a, vector->a + sI * n, bitSize * n * sizeof(int), cudaMemcpyDeviceToDevice);
memcpy(matrix[i][j]->b, vector->b + sI, bitSize * sizeof(int));
memcpy(matrix[i][j]->current_variance, vector->current_variance + sI, bitSize * sizeof(double));
}
}
return matrix;
}
//port the matrix to GPU
EXPORT LweSample_16*** matrixToDevice(LweSample_16 ***h_matrix, int row, int col, int bitSize,
const TFheGateBootstrappingCloudKeySet *bk) {
std::cout << "taking matrix to device" << std::endl;
const int n = bk->bk->in_out_params->n;
LweSample_16 ***d_matrix = new LweSample_16**[row];
for (int i = 0; i < row; ++i) {
d_matrix[i] = new LweSample_16*[col];
for (int j = 0; j < col; ++j) {
d_matrix[i][j] = convertBitToNumberZero_GPU(bitSize, bk);
cudaMemcpy(d_matrix[i][j]->a, h_matrix[i][j]->a, bitSize * n * sizeof(int), cudaMemcpyHostToDevice);
memcpy(d_matrix[i][j]->b, h_matrix[i][j]->b, bitSize * sizeof(int));
memcpy(d_matrix[i][j]->current_variance, h_matrix[i][j]->current_variance, bitSize * sizeof(double));
}
}
return d_matrix;
}
//prepare left matrix to multiply and create LweSample array (vector)
EXPORT LweSample_16** matMul_prepareLeftMat(LweSample_16 ***matrix, int row, int col, int nDuplication, int bitSize,
const TFheGateBootstrappingCloudKeySet *bk) {
std::cout << "Preparing left matrix to multiply" << std::endl;
int vLen = row * col * nDuplication;
LweSample_16 **vector = new LweSample_16*[vLen];
for (int i = 0; i < row; ++i) {
for (int k = 0; k < nDuplication; ++k) {
for (int j = 0; j < col; ++j) {
int index = i * nDuplication * col + k * col + j;
vector[index] = matrix[i][j];
}
}
}
return vector;
}
//prepare right matrix to multiply and create LweSample array (vector)
EXPORT LweSample_16** matMul_prepareRightMat(LweSample_16 ***matrix, int row, int col, int nDuplication, int bitSize,
const TFheGateBootstrappingCloudKeySet *bk) {
std::cout << "Preparing right matrix to multiply" << std::endl;
int vLen = row * col * nDuplication;
LweSample_16 **vector = new LweSample_16*[vLen];
for (int k = 0; k < nDuplication; ++k) {
for (int i = 0; i < col; ++i) {
for (int j = 0; j < row; ++j) {
int index = k * col * row + i * row + j;
vector[index] = matrix[j][i];
}
}
}
return vector;
}
|
9584be70e9bc560e4cbe901b5473381483087b29.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
namespace {
} // namespace
__global__ void join_add(const int *d1, const int *d2, int *d3) { d3[0] = d1[0] + d2[0]; } | 9584be70e9bc560e4cbe901b5473381483087b29.cu | #include "includes.h"
namespace {
} // namespace
__global__ void join_add(const int *d1, const int *d2, int *d3) { d3[0] = d1[0] + d2[0]; } |
51c4985b3ac51dcc58e6cbb11af48e2e93e7e8e2.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include <ctime>
#include "device_launch_parameters.h"
#include <limits.h>
#define PRINT_MATRIX true
#define CHECK(value) {\
hipError_t _m_cudaStat = value;\
if (_m_cudaStat != hipSuccess) {\
cout<< "Error:" << hipGetErrorString(_m_cudaStat) \
<< " at line " << __LINE__ << " in file " << __FILE__ << "\n"; \
exit(1);\
} }
#define MAX_MEMORY_VECTOR 104857600 //100 Mb
#define COUNT_OF_ELEMENTS_IN_SYSTEM 1024 //Count of elements in system of vectors
#define COUNT_OF_VECTORS_IN_SYSTEM 12 //Count of vectors in system
#define COUNT_OF_ELEMENTS_IN_VECTOR (COUNT_OF_ELEMENTS_IN_SYSTEM / COUNT_OF_VECTORS_IN_SYSTEM) //Count of elements in one vector
#define SIZE_GRAM_MATRIX (COUNT_OF_VECTORS_IN_SYSTEM * COUNT_OF_VECTORS_IN_SYSTEM)
using namespace std;
inline void Info()
{
cout << "Size of system: " << COUNT_OF_ELEMENTS_IN_SYSTEM
<< "\nCount of vectors: " << COUNT_OF_VECTORS_IN_SYSTEM
<< "\nCount of elements in one vector: " << COUNT_OF_ELEMENTS_IN_VECTOR << endl;
}
void InfoResult(unsigned char*, unsigned char*);
void PrintSystemOfVectors(unsigned char*);
void PrintVector(unsigned char*, size_t);
unsigned char* GetRandomSystemOfVectors();
unsigned char* GetGramMatrixCPU(unsigned char* systemOfVectors, float& time);
unsigned char* GetGramMatrixGPU(unsigned char* systemOfVectors, float& time);
bool IsEqual(unsigned char* firstVector, unsigned char* secondVector, size_t size);
void Check(unsigned char* matrix_Host, unsigned char* matrix_Device);
__global__ void calculate_GramMatrix_GPU(unsigned char* systemOfVectors, unsigned char* gramMatrix)
{
__shared__ unsigned char cache[1024];
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= SIZE_GRAM_MATRIX) return;
for (int j = 0; j < COUNT_OF_ELEMENTS_IN_VECTOR; j++)
{
cache[threadIdx.x] +=
systemOfVectors[(index / COUNT_OF_VECTORS_IN_SYSTEM) * COUNT_OF_ELEMENTS_IN_VECTOR + j] *
systemOfVectors[(index % COUNT_OF_VECTORS_IN_SYSTEM) * COUNT_OF_ELEMENTS_IN_VECTOR + j];
}
__syncthreads();
gramMatrix[index] = cache[threadIdx.x];
}
int main()
{
Info();
float timeCPU = 0.0f, timeGPU = 0.0f;
unsigned char* systemOfVectors = GetRandomSystemOfVectors();
bool isForPrint = COUNT_OF_ELEMENTS_IN_SYSTEM <= 2048;
if (isForPrint) PrintSystemOfVectors(systemOfVectors);
cout << "\nSize Gram matrix: " << SIZE_GRAM_MATRIX << "\n\n";
unsigned char* matrixGramCPU = GetGramMatrixCPU(systemOfVectors, timeCPU);
unsigned char* matrixGramGPU = GetGramMatrixGPU(systemOfVectors, timeGPU);
Check(matrixGramCPU, matrixGramGPU);
cout << "\n--------\n";
cout << "Time CPU: " << timeCPU << endl;
cout << "Time GPU: " << timeGPU << endl;
cout << "\n--------\n";
InfoResult(matrixGramCPU, matrixGramGPU);
cin.get();
return 0;
}
unsigned char* GetGramMatrixGPU(unsigned char* systemOfVectors, float& time_d)
{
cout << "\n---------\n";
unsigned char* matrixGram = new unsigned char[SIZE_GRAM_MATRIX];
int memoryForGramMatrix = sizeof(unsigned char) * SIZE_GRAM_MATRIX;
int memoryForBigVector = sizeof(unsigned char) * COUNT_OF_ELEMENTS_IN_SYSTEM;
for (int i = 0; i < SIZE_GRAM_MATRIX; i++)
matrixGram[i] = 0;
unsigned char* systemOfVectors_GPU;
unsigned char* matrixGram_GPU;
hipEvent_t startCUDA, stopCUDA;
CHECK(hipEventCreate(&startCUDA));
CHECK(hipEventCreate(&stopCUDA));
CHECK(hipMalloc(&systemOfVectors_GPU, memoryForBigVector));
CHECK(hipMalloc(&matrixGram_GPU, memoryForGramMatrix));
CHECK(hipMemcpy(systemOfVectors_GPU, systemOfVectors, memoryForBigVector, hipMemcpyHostToDevice));
CHECK(hipMemcpy(matrixGram_GPU, matrixGram, memoryForGramMatrix, hipMemcpyHostToDevice));
CHECK(hipEventRecord(startCUDA, 0));
cout << "Calculate on DEVICE...\n";
int countOfBlocks = (SIZE_GRAM_MATRIX + 1023) / 1024;
hipLaunchKernelGGL(( calculate_GramMatrix_GPU), dim3(countOfBlocks), dim3(1024), 0, 0, systemOfVectors_GPU, matrixGram_GPU);
cout << "Count of blocks: " << countOfBlocks << endl;
hipEventRecord(stopCUDA, 0);
hipEventSynchronize(stopCUDA);
hipEventElapsedTime(&time_d, startCUDA, stopCUDA);
time_d /= 1000;
CHECK(hipMemcpy(matrixGram, matrixGram_GPU, memoryForGramMatrix, hipMemcpyDeviceToHost));
cout << "Done\n";
hipFree(systemOfVectors_GPU);
hipFree(matrixGram_GPU);
return matrixGram;
}
unsigned char* GetGramMatrixCPU(unsigned char* systemOfVectors, float& time_h)
{
unsigned char* matrixGram = new unsigned char[SIZE_GRAM_MATRIX];
for (int i = 0; i < SIZE_GRAM_MATRIX; i++) matrixGram[i] = 0;
cout << "Calculate on HOST...\n";
time_h = clock();
for (int i = 0; i < SIZE_GRAM_MATRIX; i++)
{
int currentRow = (i / COUNT_OF_VECTORS_IN_SYSTEM) * COUNT_OF_VECTORS_IN_SYSTEM;
int shiftCol = (i / COUNT_OF_VECTORS_IN_SYSTEM);
int currentIndexMainDiag = currentRow + shiftCol;
if (i < currentIndexMainDiag) continue;
unsigned char temp = 0;
for (int j = 0; j < COUNT_OF_ELEMENTS_IN_VECTOR; j++)
temp +=
systemOfVectors[(i / COUNT_OF_VECTORS_IN_SYSTEM) * COUNT_OF_ELEMENTS_IN_VECTOR + j] *
systemOfVectors[(i % COUNT_OF_VECTORS_IN_SYSTEM) * COUNT_OF_ELEMENTS_IN_VECTOR + j];
matrixGram[currentIndexMainDiag + (i - currentIndexMainDiag) * COUNT_OF_VECTORS_IN_SYSTEM] = matrixGram[i] = temp;
}
cout << "Done\n";
time_h /= CLOCKS_PER_SEC;
return matrixGram;
}
void Check(unsigned char* matrix_Host, unsigned char* matrix_Device)
{
cout << "\nCheck...\n";
if (IsEqual(matrix_Host, matrix_Device, SIZE_GRAM_MATRIX))
cout << "That's right! :)\n";
else cout << "Wrong! :(\n";
}
bool IsEqual(unsigned char* firstVector, unsigned char* secondVector, size_t size)
{
for (int i = 0; i < size; i++)
if (firstVector[i] != secondVector[i])
return false;
return true;
}
unsigned char* GetRandomSystemOfVectors()
{
unsigned char* vector = new unsigned char[COUNT_OF_ELEMENTS_IN_SYSTEM];
for (int i = 0; i < COUNT_OF_ELEMENTS_IN_SYSTEM; i++)
vector[i] = rand() % 9 + 1;
return vector;
}
void InfoResult(unsigned char* matrix_Host, unsigned char* matrix_Device)
{
cout << "\nGram matrix CPU: " << endl;
PrintVector(matrix_Host, SIZE_GRAM_MATRIX);
cout << "\nGram matrix GPU: " << endl;
PrintVector(matrix_Device, SIZE_GRAM_MATRIX);
}
void PrintSystemOfVectors(unsigned char* systemOfVectors)
{
bool step = COUNT_OF_ELEMENTS_IN_SYSTEM < 10;
cout << "\nBig vector:\n\n";
for (int i = 0, j = 0; i < COUNT_OF_ELEMENTS_IN_SYSTEM; i++, j++)
{
if (j == COUNT_OF_ELEMENTS_IN_VECTOR && step)
{
cout << endl;
j = 0;
}
cout << (int)systemOfVectors[i] << " ";
}
cout << endl;
}
void PrintVector(unsigned char* vector, size_t size)
{
if (PRINT_MATRIX)
{
for (int i = 0; i < COUNT_OF_VECTORS_IN_SYSTEM; i++)
{
for (int j = 0; j < COUNT_OF_VECTORS_IN_SYSTEM; j++)
{
cout << (int)vector[i * COUNT_OF_VECTORS_IN_SYSTEM + j] << "\t";
}
cout << endl;
}
}
else
{
for (int i = 0; i < size; i++)
cout << (int)vector[i] << " ";
cout << endl;
}
}
| 51c4985b3ac51dcc58e6cbb11af48e2e93e7e8e2.cu | #include <cuda_runtime.h>
#include <iostream>
#include <ctime>
#include "device_launch_parameters.h"
#include <limits.h>
#define PRINT_MATRIX true
#define CHECK(value) {\
cudaError_t _m_cudaStat = value;\
if (_m_cudaStat != cudaSuccess) {\
cout<< "Error:" << cudaGetErrorString(_m_cudaStat) \
<< " at line " << __LINE__ << " in file " << __FILE__ << "\n"; \
exit(1);\
} }
#define MAX_MEMORY_VECTOR 104857600 //100 Mb
#define COUNT_OF_ELEMENTS_IN_SYSTEM 1024 //Count of elements in system of vectors
#define COUNT_OF_VECTORS_IN_SYSTEM 12 //Count of vectors in system
#define COUNT_OF_ELEMENTS_IN_VECTOR (COUNT_OF_ELEMENTS_IN_SYSTEM / COUNT_OF_VECTORS_IN_SYSTEM) //Count of elements in one vector
#define SIZE_GRAM_MATRIX (COUNT_OF_VECTORS_IN_SYSTEM * COUNT_OF_VECTORS_IN_SYSTEM)
using namespace std;
inline void Info()
{
cout << "Size of system: " << COUNT_OF_ELEMENTS_IN_SYSTEM
<< "\nCount of vectors: " << COUNT_OF_VECTORS_IN_SYSTEM
<< "\nCount of elements in one vector: " << COUNT_OF_ELEMENTS_IN_VECTOR << endl;
}
void InfoResult(unsigned char*, unsigned char*);
void PrintSystemOfVectors(unsigned char*);
void PrintVector(unsigned char*, size_t);
unsigned char* GetRandomSystemOfVectors();
unsigned char* GetGramMatrixCPU(unsigned char* systemOfVectors, float& time);
unsigned char* GetGramMatrixGPU(unsigned char* systemOfVectors, float& time);
bool IsEqual(unsigned char* firstVector, unsigned char* secondVector, size_t size);
void Check(unsigned char* matrix_Host, unsigned char* matrix_Device);
__global__ void calculate_GramMatrix_GPU(unsigned char* systemOfVectors, unsigned char* gramMatrix)
{
__shared__ unsigned char cache[1024];
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= SIZE_GRAM_MATRIX) return;
for (int j = 0; j < COUNT_OF_ELEMENTS_IN_VECTOR; j++)
{
cache[threadIdx.x] +=
systemOfVectors[(index / COUNT_OF_VECTORS_IN_SYSTEM) * COUNT_OF_ELEMENTS_IN_VECTOR + j] *
systemOfVectors[(index % COUNT_OF_VECTORS_IN_SYSTEM) * COUNT_OF_ELEMENTS_IN_VECTOR + j];
}
__syncthreads();
gramMatrix[index] = cache[threadIdx.x];
}
int main()
{
Info();
float timeCPU = 0.0f, timeGPU = 0.0f;
unsigned char* systemOfVectors = GetRandomSystemOfVectors();
bool isForPrint = COUNT_OF_ELEMENTS_IN_SYSTEM <= 2048;
if (isForPrint) PrintSystemOfVectors(systemOfVectors);
cout << "\nSize Gram matrix: " << SIZE_GRAM_MATRIX << "\n\n";
unsigned char* matrixGramCPU = GetGramMatrixCPU(systemOfVectors, timeCPU);
unsigned char* matrixGramGPU = GetGramMatrixGPU(systemOfVectors, timeGPU);
Check(matrixGramCPU, matrixGramGPU);
cout << "\n--------\n";
cout << "Time CPU: " << timeCPU << endl;
cout << "Time GPU: " << timeGPU << endl;
cout << "\n--------\n";
InfoResult(matrixGramCPU, matrixGramGPU);
cin.get();
return 0;
}
unsigned char* GetGramMatrixGPU(unsigned char* systemOfVectors, float& time_d)
{
cout << "\n---------\n";
unsigned char* matrixGram = new unsigned char[SIZE_GRAM_MATRIX];
int memoryForGramMatrix = sizeof(unsigned char) * SIZE_GRAM_MATRIX;
int memoryForBigVector = sizeof(unsigned char) * COUNT_OF_ELEMENTS_IN_SYSTEM;
for (int i = 0; i < SIZE_GRAM_MATRIX; i++)
matrixGram[i] = 0;
unsigned char* systemOfVectors_GPU;
unsigned char* matrixGram_GPU;
cudaEvent_t startCUDA, stopCUDA;
CHECK(cudaEventCreate(&startCUDA));
CHECK(cudaEventCreate(&stopCUDA));
CHECK(cudaMalloc(&systemOfVectors_GPU, memoryForBigVector));
CHECK(cudaMalloc(&matrixGram_GPU, memoryForGramMatrix));
CHECK(cudaMemcpy(systemOfVectors_GPU, systemOfVectors, memoryForBigVector, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(matrixGram_GPU, matrixGram, memoryForGramMatrix, cudaMemcpyHostToDevice));
CHECK(cudaEventRecord(startCUDA, 0));
cout << "Calculate on DEVICE...\n";
int countOfBlocks = (SIZE_GRAM_MATRIX + 1023) / 1024;
calculate_GramMatrix_GPU<<<countOfBlocks, 1024>>>(systemOfVectors_GPU, matrixGram_GPU);
cout << "Count of blocks: " << countOfBlocks << endl;
cudaEventRecord(stopCUDA, 0);
cudaEventSynchronize(stopCUDA);
cudaEventElapsedTime(&time_d, startCUDA, stopCUDA);
time_d /= 1000;
CHECK(cudaMemcpy(matrixGram, matrixGram_GPU, memoryForGramMatrix, cudaMemcpyDeviceToHost));
cout << "Done\n";
cudaFree(systemOfVectors_GPU);
cudaFree(matrixGram_GPU);
return matrixGram;
}
unsigned char* GetGramMatrixCPU(unsigned char* systemOfVectors, float& time_h)
{
unsigned char* matrixGram = new unsigned char[SIZE_GRAM_MATRIX];
for (int i = 0; i < SIZE_GRAM_MATRIX; i++) matrixGram[i] = 0;
cout << "Calculate on HOST...\n";
time_h = clock();
for (int i = 0; i < SIZE_GRAM_MATRIX; i++)
{
int currentRow = (i / COUNT_OF_VECTORS_IN_SYSTEM) * COUNT_OF_VECTORS_IN_SYSTEM;
int shiftCol = (i / COUNT_OF_VECTORS_IN_SYSTEM);
int currentIndexMainDiag = currentRow + shiftCol;
if (i < currentIndexMainDiag) continue;
unsigned char temp = 0;
for (int j = 0; j < COUNT_OF_ELEMENTS_IN_VECTOR; j++)
temp +=
systemOfVectors[(i / COUNT_OF_VECTORS_IN_SYSTEM) * COUNT_OF_ELEMENTS_IN_VECTOR + j] *
systemOfVectors[(i % COUNT_OF_VECTORS_IN_SYSTEM) * COUNT_OF_ELEMENTS_IN_VECTOR + j];
matrixGram[currentIndexMainDiag + (i - currentIndexMainDiag) * COUNT_OF_VECTORS_IN_SYSTEM] = matrixGram[i] = temp;
}
cout << "Done\n";
time_h /= CLOCKS_PER_SEC;
return matrixGram;
}
void Check(unsigned char* matrix_Host, unsigned char* matrix_Device)
{
cout << "\nCheck...\n";
if (IsEqual(matrix_Host, matrix_Device, SIZE_GRAM_MATRIX))
cout << "That's right! :)\n";
else cout << "Wrong! :(\n";
}
bool IsEqual(unsigned char* firstVector, unsigned char* secondVector, size_t size)
{
for (int i = 0; i < size; i++)
if (firstVector[i] != secondVector[i])
return false;
return true;
}
unsigned char* GetRandomSystemOfVectors()
{
unsigned char* vector = new unsigned char[COUNT_OF_ELEMENTS_IN_SYSTEM];
for (int i = 0; i < COUNT_OF_ELEMENTS_IN_SYSTEM; i++)
vector[i] = rand() % 9 + 1;
return vector;
}
void InfoResult(unsigned char* matrix_Host, unsigned char* matrix_Device)
{
cout << "\nGram matrix CPU: " << endl;
PrintVector(matrix_Host, SIZE_GRAM_MATRIX);
cout << "\nGram matrix GPU: " << endl;
PrintVector(matrix_Device, SIZE_GRAM_MATRIX);
}
void PrintSystemOfVectors(unsigned char* systemOfVectors)
{
bool step = COUNT_OF_ELEMENTS_IN_SYSTEM < 10;
cout << "\nBig vector:\n\n";
for (int i = 0, j = 0; i < COUNT_OF_ELEMENTS_IN_SYSTEM; i++, j++)
{
if (j == COUNT_OF_ELEMENTS_IN_VECTOR && step)
{
cout << endl;
j = 0;
}
cout << (int)systemOfVectors[i] << " ";
}
cout << endl;
}
void PrintVector(unsigned char* vector, size_t size)
{
if (PRINT_MATRIX)
{
for (int i = 0; i < COUNT_OF_VECTORS_IN_SYSTEM; i++)
{
for (int j = 0; j < COUNT_OF_VECTORS_IN_SYSTEM; j++)
{
cout << (int)vector[i * COUNT_OF_VECTORS_IN_SYSTEM + j] << "\t";
}
cout << endl;
}
}
else
{
for (int i = 0; i < size; i++)
cout << (int)vector[i] << " ";
cout << endl;
}
}
|
b2c934cfdeb50491e4b22b8f375d3582752964cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "saber/funcs/impl/cuda/saber_permute.h"
#include "hip/hip_fp16.h"
namespace anakin {
namespace saber {
const int TRANS_BLOCK_SIZE = 16;
template <typename Dtype>
__global__ void ker_permute_fwd(Dtype * out_data, const int num_axes,\
const int count, const int * permute_order,\
const int * new_steps, const int * old_steps,\
const Dtype* in_data)
{
CUDA_KERNEL_LOOP(tid, count){
int org_idx = tid;
int in_idx = 0;
#pragma unroll
for (int i = 0; i < num_axes; i++) {
int order = permute_order[i];
int new_step = new_steps[i];
int old_step = old_steps[order];
in_idx += (org_idx / new_step) * old_step;
org_idx %= new_step;
}
out_data[tid] = in_data[in_idx];
}
}
template <typename Dtype>
__global__ void ker_permute_fwd(Dtype * out_data, const int num_axes,\
const int count, const int * permute_order,\
const int * new_steps, const int * old_steps,\
const int * new_valid_shape,
const Dtype* in_data)
{
CUDA_KERNEL_LOOP(tid, count){
int in_idx = 0;
int out_idx = 0;
int new_valid_stride = 1;
#pragma unroll
for (int i = num_axes - 1; i >= 0; --i) {
int order = permute_order[i];
int new_step = new_steps[i];
int old_step = old_steps[order];
int id = (tid / new_valid_stride) % new_valid_shape[i];
in_idx += id * old_step;
out_idx += id * new_step;
new_valid_stride *= new_valid_shape[i];
}
out_data[out_idx] = in_data[in_idx];
}
}
/*in this kernel, we suppose img with format (1, h, w, c) tranform to (1, c, h, w),
and c = 3. out_h = c, out_w = h * w. each thread process one pixel*/
template<typename Dtype>
__global__ void ker_permute_fwd_transpose(Dtype * out_data, \
const int out_h, const int out_w, \
const Dtype* in_data)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ float tile[3][CUDA_NUM_THREADS];
if (tid < out_w) {
int offset = tid * out_h;
tile[0][threadIdx.x] = in_data[offset];
tile[1][threadIdx.x] = in_data[offset + 1];
tile[2][threadIdx.x] = in_data[offset + 2];
}
__syncthreads();
if (tid < out_w) {
out_data[0 *out_w + tid] = tile[0][threadIdx.x];
out_data[1 *out_w + tid] = tile[1][threadIdx.x];
out_data[2 *out_w + tid] = tile[2][threadIdx.x];
}
}
template<typename Dtype>
__global__ void ker_permute_fwd_transpose(Dtype * out_data, \
const int n,
const int c,
const int h,
const int w,
const int * out_stride,
const int * in_stride,
const Dtype* in_data)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ float tile[3][CUDA_NUM_THREADS];
int out_w_id = tid % w;
int out_h_id = (tid / w) % h;
int out_n_id = tid / (h * w);
int out_offset = out_n_id * out_stride[0] + out_h_id * out_stride[2] + out_w_id * out_stride[3];
int in_offset = out_n_id * in_stride[0] + out_h_id * in_stride[1] + out_w_id * in_stride[2];
if (tid < n * h * w) {
tile[0][threadIdx.x] = in_data[in_offset];
tile[1][threadIdx.x] = in_data[in_offset + 1];
tile[2][threadIdx.x] = in_data[in_offset + 2];
}
__syncthreads();
if (tid < n * h * w ){
out_data[out_offset + out_stride[1] * 0] = tile[0][threadIdx.x];
out_data[out_offset + out_stride[1] * 1] = tile[1][threadIdx.x];
out_data[out_offset + out_stride[1] * 2] = tile[2][threadIdx.x];
}
}
/*in this kernel, we suppose img with format (1, c, h, w) tranform to (1, h, w, c),
and out_h = h*w, out_w = c. each thread process one data. we use share memory*/
template<typename Dtype>
__global__ void ker_transpose(Dtype * out_data, \
const int out_h, const int out_w, \
const Dtype* in_data)
{
__shared__ float tile[TRANS_BLOCK_SIZE][TRANS_BLOCK_SIZE];
int tid_x = threadIdx.x + blockIdx.x * blockDim.x;//in index
int tid_y = threadIdx.y + blockIdx.y * blockDim.y;//in index
if (tid_x < out_h && tid_y < out_w) {
tile[threadIdx.x][threadIdx.y] = in_data[tid_x + tid_y * out_h];
}
__syncthreads();
if (tid_x < out_h && tid_y < out_w) {
out_data[tid_x * out_w + tid_y] = tile[threadIdx.x][threadIdx.y];
}
}
template<typename Dtype>
__global__ void ker_nchw_to_nhwc(Dtype * out_data,
const int n,
const int c,
const int h,
const int w,
const int * out_stride,
const int * in_stride,
const Dtype* in_data)
{
__shared__ float tile[TRANS_BLOCK_SIZE][TRANS_BLOCK_SIZE];
int tid_x = threadIdx.x + blockIdx.x * blockDim.x;//in index
int tid_y = threadIdx.y + blockIdx.y * blockDim.y;//in index
int w_id = tid_y % w;
int h_id = tid_y / w;
int c_id = tid_x % c;
int n_id = tid_x / c;
int in_offset = n_id * in_stride[0] + c_id * in_stride[1] \
+ h_id * in_stride[2] + w_id * in_stride[3];
int out_offset = n_id * out_stride[0] + h_id * out_stride[1] + \
w_id * out_stride[2] + c_id * out_stride[3];
if (tid_x < n*c && tid_y < h*w) {
tile[threadIdx.x][threadIdx.y] = in_data[in_offset];
}
__syncthreads();
if (tid_x < n*c && tid_y < h*w) {
out_data[out_offset] = tile[threadIdx.x][threadIdx.y];
}
}
template <DataType OpDtype,
DataType inDtype,
DataType outDtype,
typename LayOutType_op,
typename LayOutType_in,
typename LayOutType_out>
SaberStatus SaberPermute<NV, OpDtype, inDtype, outDtype,\
LayOutType_op, LayOutType_in, LayOutType_out>::dispatch(\
const std::vector<DataTensor_in *>& inputs, \
std::vector<DataTensor_out *>& outputs, \
PermuteParam<OpTensor>& param) {
hipStream_t cuda_stream = this->_ctx.get_compute_stream();
const InDataType * in_data = inputs[0]->data();
OutDataType * out_data = outputs[0]->mutable_data();
int count = outputs[0]->valid_size();
const int * permute_order = _permute_order.data();
const int * new_steps = _out_steps.data();
const int * old_steps = _in_steps.data();
const int * out_valid_shape = _out_valid_shape.data();
std::vector<int> permute_order_nhwc_to_nchw = {0, 3, 1, 2};
PermuteParam<OpTensor> param_nhwc_to_nchw(permute_order_nhwc_to_nchw);
std::vector<int> permute_order_nchw_to_nhwc = {0, 2, 3, 1};
PermuteParam<OpTensor> param_nchw_to_nhwc(permute_order_nchw_to_nhwc);
if (inputs[0]->is_continue_mem() && outputs[0]->is_continue_mem()) {
if (_need_permute) {
if (inputs[0]->num() == 1 && inputs[0]->width() == 3
&& param == param_nhwc_to_nchw) {
int out_w = outputs[0]->width() * outputs[0]->height();
int out_h = outputs[0]->channel();
hipLaunchKernelGGL(( ker_permute_fwd_transpose<OpDataType>)\
, dim3(CUDA_GET_BLOCKS(out_w)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, \
out_data, out_h, out_w, in_data);
} else if (inputs[0]->num() == 1 && param == param_nchw_to_nhwc) {
int out_h = inputs[0]->width() * inputs[0]->height();
int out_w = inputs[0]->channel();
dim3 block_size(TRANS_BLOCK_SIZE, TRANS_BLOCK_SIZE);
dim3 grid_size((out_h + TRANS_BLOCK_SIZE - 1) / TRANS_BLOCK_SIZE, (out_w + TRANS_BLOCK_SIZE - 1) / TRANS_BLOCK_SIZE);
hipLaunchKernelGGL(( ker_transpose<OpDataType>)\
, dim3(grid_size), dim3(block_size), 0, cuda_stream, \
out_data, out_h, out_w, in_data);
} else {
hipLaunchKernelGGL(( ker_permute_fwd<OpDataType>)\
, dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, \
out_data, _num_axes, count, permute_order, \
new_steps, old_steps, in_data);
}
} else {
//outputs[0]->share_from(inputs[0]);
}
} else {
if (_need_permute) {
if (inputs[0]->num() == 1 && inputs[0]->width() == 3
&& param == param_nhwc_to_nchw) {
int out_w = outputs[0]->width() * outputs[0]->height();
int out_h = outputs[0]->channel();
hipLaunchKernelGGL(( ker_permute_fwd_transpose<OpDataType>)\
, dim3(CUDA_GET_BLOCKS(out_w)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, \
out_data, outputs[0]->num(), outputs[0]->channel(), \
outputs[0]->height(), outputs[0]->width(),
new_steps, old_steps, in_data);
} else if (inputs[0]->num() == 1 && param == param_nchw_to_nhwc) {
dim3 block_size(TRANS_BLOCK_SIZE, TRANS_BLOCK_SIZE);
dim3 grid_size((inputs[0]->num() * inputs[0]->channel() + TRANS_BLOCK_SIZE - 1) / TRANS_BLOCK_SIZE,
(inputs[0]->height() * inputs[0]->width() + TRANS_BLOCK_SIZE - 1) / TRANS_BLOCK_SIZE);
hipLaunchKernelGGL(( ker_nchw_to_nhwc<OpDataType>)\
, dim3(grid_size), dim3(block_size), 0, cuda_stream, \
out_data, inputs[0]->num(), inputs[0]->channel(),\
inputs[0]->height(), inputs[0]->width(),\
new_steps, old_steps, in_data);
} else {
hipLaunchKernelGGL(( ker_permute_fwd<OpDataType>)\
, dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, \
out_data, _num_axes, count, permute_order, \
new_steps, old_steps, in_data);
}
} else {
//outputs[0]->share_from(inputs[0]);
}
}
return SaberSuccess;
}
}
}
| b2c934cfdeb50491e4b22b8f375d3582752964cb.cu | #include "saber/funcs/impl/cuda/saber_permute.h"
#include "cuda_fp16.h"
namespace anakin {
namespace saber {
const int TRANS_BLOCK_SIZE = 16;
template <typename Dtype>
__global__ void ker_permute_fwd(Dtype * out_data, const int num_axes,\
const int count, const int * permute_order,\
const int * new_steps, const int * old_steps,\
const Dtype* in_data)
{
CUDA_KERNEL_LOOP(tid, count){
int org_idx = tid;
int in_idx = 0;
#pragma unroll
for (int i = 0; i < num_axes; i++) {
int order = permute_order[i];
int new_step = new_steps[i];
int old_step = old_steps[order];
in_idx += (org_idx / new_step) * old_step;
org_idx %= new_step;
}
out_data[tid] = in_data[in_idx];
}
}
template <typename Dtype>
__global__ void ker_permute_fwd(Dtype * out_data, const int num_axes,\
const int count, const int * permute_order,\
const int * new_steps, const int * old_steps,\
const int * new_valid_shape,
const Dtype* in_data)
{
CUDA_KERNEL_LOOP(tid, count){
int in_idx = 0;
int out_idx = 0;
int new_valid_stride = 1;
#pragma unroll
for (int i = num_axes - 1; i >= 0; --i) {
int order = permute_order[i];
int new_step = new_steps[i];
int old_step = old_steps[order];
int id = (tid / new_valid_stride) % new_valid_shape[i];
in_idx += id * old_step;
out_idx += id * new_step;
new_valid_stride *= new_valid_shape[i];
}
out_data[out_idx] = in_data[in_idx];
}
}
/*in this kernel, we suppose img with format (1, h, w, c) tranform to (1, c, h, w),
and c = 3. out_h = c, out_w = h * w. each thread process one pixel*/
template<typename Dtype>
__global__ void ker_permute_fwd_transpose(Dtype * out_data, \
const int out_h, const int out_w, \
const Dtype* in_data)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ float tile[3][CUDA_NUM_THREADS];
if (tid < out_w) {
int offset = tid * out_h;
tile[0][threadIdx.x] = in_data[offset];
tile[1][threadIdx.x] = in_data[offset + 1];
tile[2][threadIdx.x] = in_data[offset + 2];
}
__syncthreads();
if (tid < out_w) {
out_data[0 *out_w + tid] = tile[0][threadIdx.x];
out_data[1 *out_w + tid] = tile[1][threadIdx.x];
out_data[2 *out_w + tid] = tile[2][threadIdx.x];
}
}
template<typename Dtype>
__global__ void ker_permute_fwd_transpose(Dtype * out_data, \
const int n,
const int c,
const int h,
const int w,
const int * out_stride,
const int * in_stride,
const Dtype* in_data)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ float tile[3][CUDA_NUM_THREADS];
int out_w_id = tid % w;
int out_h_id = (tid / w) % h;
int out_n_id = tid / (h * w);
int out_offset = out_n_id * out_stride[0] + out_h_id * out_stride[2] + out_w_id * out_stride[3];
int in_offset = out_n_id * in_stride[0] + out_h_id * in_stride[1] + out_w_id * in_stride[2];
if (tid < n * h * w) {
tile[0][threadIdx.x] = in_data[in_offset];
tile[1][threadIdx.x] = in_data[in_offset + 1];
tile[2][threadIdx.x] = in_data[in_offset + 2];
}
__syncthreads();
if (tid < n * h * w ){
out_data[out_offset + out_stride[1] * 0] = tile[0][threadIdx.x];
out_data[out_offset + out_stride[1] * 1] = tile[1][threadIdx.x];
out_data[out_offset + out_stride[1] * 2] = tile[2][threadIdx.x];
}
}
/*in this kernel, we suppose img with format (1, c, h, w) tranform to (1, h, w, c),
and out_h = h*w, out_w = c. each thread process one data. we use share memory*/
template<typename Dtype>
__global__ void ker_transpose(Dtype * out_data, \
const int out_h, const int out_w, \
const Dtype* in_data)
{
__shared__ float tile[TRANS_BLOCK_SIZE][TRANS_BLOCK_SIZE];
int tid_x = threadIdx.x + blockIdx.x * blockDim.x;//in index
int tid_y = threadIdx.y + blockIdx.y * blockDim.y;//in index
if (tid_x < out_h && tid_y < out_w) {
tile[threadIdx.x][threadIdx.y] = in_data[tid_x + tid_y * out_h];
}
__syncthreads();
if (tid_x < out_h && tid_y < out_w) {
out_data[tid_x * out_w + tid_y] = tile[threadIdx.x][threadIdx.y];
}
}
template<typename Dtype>
__global__ void ker_nchw_to_nhwc(Dtype * out_data,
const int n,
const int c,
const int h,
const int w,
const int * out_stride,
const int * in_stride,
const Dtype* in_data)
{
__shared__ float tile[TRANS_BLOCK_SIZE][TRANS_BLOCK_SIZE];
int tid_x = threadIdx.x + blockIdx.x * blockDim.x;//in index
int tid_y = threadIdx.y + blockIdx.y * blockDim.y;//in index
int w_id = tid_y % w;
int h_id = tid_y / w;
int c_id = tid_x % c;
int n_id = tid_x / c;
int in_offset = n_id * in_stride[0] + c_id * in_stride[1] \
+ h_id * in_stride[2] + w_id * in_stride[3];
int out_offset = n_id * out_stride[0] + h_id * out_stride[1] + \
w_id * out_stride[2] + c_id * out_stride[3];
if (tid_x < n*c && tid_y < h*w) {
tile[threadIdx.x][threadIdx.y] = in_data[in_offset];
}
__syncthreads();
if (tid_x < n*c && tid_y < h*w) {
out_data[out_offset] = tile[threadIdx.x][threadIdx.y];
}
}
template <DataType OpDtype,
DataType inDtype,
DataType outDtype,
typename LayOutType_op,
typename LayOutType_in,
typename LayOutType_out>
SaberStatus SaberPermute<NV, OpDtype, inDtype, outDtype,\
LayOutType_op, LayOutType_in, LayOutType_out>::dispatch(\
const std::vector<DataTensor_in *>& inputs, \
std::vector<DataTensor_out *>& outputs, \
PermuteParam<OpTensor>& param) {
cudaStream_t cuda_stream = this->_ctx.get_compute_stream();
const InDataType * in_data = inputs[0]->data();
OutDataType * out_data = outputs[0]->mutable_data();
int count = outputs[0]->valid_size();
const int * permute_order = _permute_order.data();
const int * new_steps = _out_steps.data();
const int * old_steps = _in_steps.data();
const int * out_valid_shape = _out_valid_shape.data();
std::vector<int> permute_order_nhwc_to_nchw = {0, 3, 1, 2};
PermuteParam<OpTensor> param_nhwc_to_nchw(permute_order_nhwc_to_nchw);
std::vector<int> permute_order_nchw_to_nhwc = {0, 2, 3, 1};
PermuteParam<OpTensor> param_nchw_to_nhwc(permute_order_nchw_to_nhwc);
if (inputs[0]->is_continue_mem() && outputs[0]->is_continue_mem()) {
if (_need_permute) {
if (inputs[0]->num() == 1 && inputs[0]->width() == 3
&& param == param_nhwc_to_nchw) {
int out_w = outputs[0]->width() * outputs[0]->height();
int out_h = outputs[0]->channel();
ker_permute_fwd_transpose<OpDataType>\
<<<CUDA_GET_BLOCKS(out_w), CUDA_NUM_THREADS, 0, cuda_stream>>>(\
out_data, out_h, out_w, in_data);
} else if (inputs[0]->num() == 1 && param == param_nchw_to_nhwc) {
int out_h = inputs[0]->width() * inputs[0]->height();
int out_w = inputs[0]->channel();
dim3 block_size(TRANS_BLOCK_SIZE, TRANS_BLOCK_SIZE);
dim3 grid_size((out_h + TRANS_BLOCK_SIZE - 1) / TRANS_BLOCK_SIZE, (out_w + TRANS_BLOCK_SIZE - 1) / TRANS_BLOCK_SIZE);
ker_transpose<OpDataType>\
<<<grid_size, block_size, 0, cuda_stream>>>(\
out_data, out_h, out_w, in_data);
} else {
ker_permute_fwd<OpDataType>\
<<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(\
out_data, _num_axes, count, permute_order, \
new_steps, old_steps, in_data);
}
} else {
//outputs[0]->share_from(inputs[0]);
}
} else {
if (_need_permute) {
if (inputs[0]->num() == 1 && inputs[0]->width() == 3
&& param == param_nhwc_to_nchw) {
int out_w = outputs[0]->width() * outputs[0]->height();
int out_h = outputs[0]->channel();
ker_permute_fwd_transpose<OpDataType>\
<<<CUDA_GET_BLOCKS(out_w), CUDA_NUM_THREADS, 0, cuda_stream>>>(\
out_data, outputs[0]->num(), outputs[0]->channel(), \
outputs[0]->height(), outputs[0]->width(),
new_steps, old_steps, in_data);
} else if (inputs[0]->num() == 1 && param == param_nchw_to_nhwc) {
dim3 block_size(TRANS_BLOCK_SIZE, TRANS_BLOCK_SIZE);
dim3 grid_size((inputs[0]->num() * inputs[0]->channel() + TRANS_BLOCK_SIZE - 1) / TRANS_BLOCK_SIZE,
(inputs[0]->height() * inputs[0]->width() + TRANS_BLOCK_SIZE - 1) / TRANS_BLOCK_SIZE);
ker_nchw_to_nhwc<OpDataType>\
<<<grid_size, block_size, 0, cuda_stream>>>(\
out_data, inputs[0]->num(), inputs[0]->channel(),\
inputs[0]->height(), inputs[0]->width(),\
new_steps, old_steps, in_data);
} else {
ker_permute_fwd<OpDataType>\
<<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(\
out_data, _num_axes, count, permute_order, \
new_steps, old_steps, in_data);
}
} else {
//outputs[0]->share_from(inputs[0]);
}
}
return SaberSuccess;
}
}
}
|
67f09c9ecd7eaa2acb27afa9b9d59ea295acbaee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#####################################################################
//Removed For Anonymity: Copyright Authors of Submission pap173s1
// Distributed under the FreeBSD license (see license.txt)
//#####################################################################
#include <iostream>
#include "Laplace_Helper_CUDA.h"
#include <SPGrid_Fluids/Simulation/FLUIDS_SIMULATION_FLAGS.h>
using namespace SPGrid;
using namespace PhysBAM;
#define THREADBLOCK 1024
#define PREFETCH 1024
template <class T, int log2_struct,int xsize,int ysize,int zsize,class T_offset_ptr>
__global__ void Laplace_Kernel_3D(const unsigned* masks, T* out,const T* in,const T scale_nonuniform,const T scale_uniform,
const T_offset_ptr* offsets,
const T_offset_ptr* const b_x_minus,
const T_offset_ptr* const b_x_plus,
const T_offset_ptr* const b_y_minus,
const T_offset_ptr* const b_y_plus,
const T_offset_ptr* const b_z_minus,
const T_offset_ptr* const b_z_plus,
int max_block){
enum {
DATABLOCK=xsize*ysize*zsize,
span=THREADBLOCK/DATABLOCK,
xstride=ysize*zsize,
ystride=zsize,
zstride=1
};
const unsigned int block = threadIdx.x / DATABLOCK;
const unsigned int entry = threadIdx.x % DATABLOCK;
const unsigned int z = entry % zsize;
const unsigned int y = entry / zsize % ysize;
const unsigned int x = entry / zsize / ysize;
typedef SPGrid_Mask<log2_struct, NextLogTwo<sizeof(T)>::value,3> T_MASK;
__shared__ T_offset_ptr block_index[PREFETCH];
__shared__ T_offset_ptr block_minus_x_index[PREFETCH];
__shared__ T_offset_ptr block_plus_x_index[PREFETCH];
__shared__ T_offset_ptr block_minus_y_index[PREFETCH];
__shared__ T_offset_ptr block_plus_y_index[PREFETCH];
__shared__ T_offset_ptr block_minus_z_index[PREFETCH];
__shared__ T_offset_ptr block_plus_z_index[PREFETCH];
if(threadIdx.x < PREFETCH)
if(blockIdx.x * PREFETCH + threadIdx.x < max_block){
block_index[threadIdx.x] = offsets[blockIdx.x * PREFETCH + threadIdx.x];
block_minus_x_index[threadIdx.x]=b_x_minus[blockIdx.x * PREFETCH + threadIdx.x];
block_plus_x_index[threadIdx.x]=b_x_plus[blockIdx.x * PREFETCH + threadIdx.x];
block_minus_y_index[threadIdx.x]=b_y_minus[blockIdx.x * PREFETCH + threadIdx.x];
block_plus_y_index[threadIdx.x]=b_y_plus[blockIdx.x * PREFETCH + threadIdx.x];
block_minus_z_index[threadIdx.x]=b_z_minus[blockIdx.x * PREFETCH + threadIdx.x];
block_plus_z_index[threadIdx.x]=b_z_plus[blockIdx.x * PREFETCH + threadIdx.x];
/*block_minus_x_index[threadIdx.x]=T_MASK::Packed_OffsetXdim_Device<-xsize>(block_index[threadIdx.x]);
block_plus_x_index[threadIdx.x]=T_MASK::Packed_OffsetXdim_Device<+xsize>(block_index[threadIdx.x]);
block_minus_y_index[threadIdx.x]=T_MASK::Packed_OffsetYdim_Device<-ysize>(block_index[threadIdx.x]);
block_plus_y_index[threadIdx.x]=T_MASK::Packed_OffsetYdim_Device<+ysize>(block_index[threadIdx.x]);
block_minus_z_index[threadIdx.x]=T_MASK::Packed_OffsetZdim_Device<-zsize>(block_index[threadIdx.x]);
block_plus_z_index[threadIdx.x]=T_MASK::Packed_OffsetZdim_Device<+zsize>(block_index[threadIdx.x]);*/
}
__syncthreads();
for(int i = 0;i < PREFETCH;i += span){
if (blockIdx.x * PREFETCH + i + block < max_block){
T* out_base = reinterpret_cast<T*>((unsigned long)out + (unsigned long)block_index[i + block]);
unsigned mask_value = reinterpret_cast<unsigned*>((unsigned long)masks + (unsigned long)block_index[i + block])[entry];
if(mask_value & (SPGrid_Cell_Type_Active|SPGrid_Cell_Type_Ghost)){
T center_value = reinterpret_cast<T*>((unsigned long)in + (unsigned long)block_index[i + block])[entry];
T& x_minus_value = (x==0)
? reinterpret_cast<T*>((unsigned long)in + (unsigned long)block_minus_x_index[i + block])[entry+(xsize-1)*xstride]
: reinterpret_cast<T*>((unsigned long)in + (unsigned long)block_index[i + block])[entry-xstride];
T& x_plus_value = (x==xsize-1)
? reinterpret_cast<T*>((unsigned long)in + (unsigned long)block_plus_x_index[i + block])[entry-(xsize-1)*xstride]
: reinterpret_cast<T*>((unsigned long)in + (unsigned long)block_index[i + block])[entry+xstride];
T& y_minus_value = (y==0)
? reinterpret_cast<T*>((unsigned long)in + (unsigned long)block_minus_y_index[i + block])[entry+(ysize-1)*ystride]
: reinterpret_cast<T*>((unsigned long)in + (unsigned long)block_index[i + block])[entry-ystride];
T& y_plus_value = (y==ysize-1)
? reinterpret_cast<T*>((unsigned long)in + (unsigned long)block_plus_y_index[i + block])[entry-(ysize-1)*ystride]
: reinterpret_cast<T*>((unsigned long)in + (unsigned long)block_index[i + block])[entry+ystride];
T& z_minus_value = (z==0)
? reinterpret_cast<T*>((unsigned long)in + (unsigned long)block_minus_z_index[i + block])[entry+(zsize-1)*zstride]
: reinterpret_cast<T*>((unsigned long)in + (unsigned long)block_index[i + block])[entry-zstride];
T& z_plus_value = (z==zsize-1)
? reinterpret_cast<T*>((unsigned long)in + (unsigned long)block_plus_z_index[i + block])[entry-(zsize-1)*zstride]
: reinterpret_cast<T*>((unsigned long)in + (unsigned long)block_index[i + block])[entry+zstride];
T result=0;
T x_minus = (x_minus_value - center_value);
T x_plus = (x_plus_value - center_value);
T y_minus = (y_minus_value - center_value);
T y_plus = (y_plus_value - center_value);
T z_minus = (z_minus_value - center_value);
T z_plus = (z_plus_value - center_value);
if (mask_value & (SPGrid_Face_Minus_X_Scaled))
result += scale_nonuniform * x_minus;
else if (mask_value & (SPGrid_Face_Minus_X_Active))
result += scale_uniform * x_minus;
if (mask_value & (SPGrid_Face_Plus_X_Scaled))
result += scale_nonuniform * x_plus;
else if (mask_value & (SPGrid_Face_Plus_X_Active))
result += scale_uniform * x_plus;
if (mask_value & (SPGrid_Face_Minus_Y_Scaled))
result += scale_nonuniform * y_minus;
else if (mask_value & (SPGrid_Face_Minus_Y_Active))
result += scale_uniform * y_minus;
if (mask_value & (SPGrid_Face_Plus_Y_Scaled))
result += scale_nonuniform * y_plus;
else if (mask_value & (SPGrid_Face_Plus_Y_Active))
result += scale_uniform * y_plus;
if (mask_value & (SPGrid_Face_Minus_Z_Scaled))
result += scale_nonuniform * z_minus;
else if (mask_value & (SPGrid_Face_Minus_Z_Active))
result += scale_uniform * z_minus;
if (mask_value & (SPGrid_Face_Plus_Z_Scaled))
result += scale_nonuniform * z_plus;
else if (mask_value & (SPGrid_Face_Plus_Z_Active))
result += scale_uniform * z_plus;
out_base[entry] = result;
}
}
}
}
//#####################################################################
// Constructor 3D
//#####################################################################
template <class T, int log2_struct,class T_offset_ptr>
Laplace_Helper_CUDA<T,log2_struct,3,T_offset_ptr>::Laplace_Helper_CUDA(T* const x_input,const T* const y_input,const unsigned* const mask_input,
const T_offset_ptr* const b_input,
const T_offset_ptr* const b_x_minus_input,
const T_offset_ptr* const b_x_plus_input,
const T_offset_ptr* const b_y_minus_input,
const T_offset_ptr* const b_y_plus_input,
const T_offset_ptr* const b_z_minus_input,
const T_offset_ptr* const b_z_plus_input,
const int size_input,const T scale_uniform_in,const T scale_nonuniform_in)
:x(x_input),y(y_input),mask(mask_input),
b(b_input),
b_x_minus(b_x_minus_input),b_x_plus(b_x_plus_input),
b_y_minus(b_y_minus_input),b_y_plus(b_y_plus_input),
b_z_minus(b_z_minus_input),b_z_plus(b_z_plus_input),
size(size_input),scale_uniform(scale_uniform_in),scale_nonuniform(scale_nonuniform_in)
{
}
//#####################################################################
// Function Run_Index_Range
//#####################################################################
// T_MASK corresponds to the mask for the data (not the mask channel)
template <class T,int log2_struct,class T_offset_ptr>
void Laplace_Helper_CUDA<T,log2_struct,3,T_offset_ptr>::Run_Index_Range(const unsigned int index_start,const unsigned int index_end)
{
int number_of_blocks=index_end-index_start+1;
hipLaunchKernelGGL(( Laplace_Kernel_3D<T,log2_struct,block_xsize,block_ysize,block_zsize,T_offset_ptr>)
, dim3((number_of_blocks%PREFETCH)?(number_of_blocks/PREFETCH+1):(number_of_blocks/PREFETCH)),dim3(THREADBLOCK), 0, 0, // Fix round-up division
mask,x,y,scale_nonuniform,scale_uniform,
b+index_start,
b_x_minus+index_start,b_x_plus+index_start,
b_y_minus+index_start,b_y_plus+index_start,
b_z_minus+index_start,b_z_plus+index_start,
number_of_blocks);
hipDeviceSynchronize();
}
//template class Laplace_Helper_CUDA<float,5,2>;
//template class Laplace_Helper_CUDA<float,6,2>;
template class Laplace_Helper_CUDA<float,4,3,unsigned int>;
//template class Laplace_Helper_CUDA<float,5,3>;
//template class Laplace_Helper_CUDA<float,6,3>;
| 67f09c9ecd7eaa2acb27afa9b9d59ea295acbaee.cu | //#####################################################################
//Removed For Anonymity: Copyright Authors of Submission pap173s1
// Distributed under the FreeBSD license (see license.txt)
//#####################################################################
#include <iostream>
#include "Laplace_Helper_CUDA.h"
#include <SPGrid_Fluids/Simulation/FLUIDS_SIMULATION_FLAGS.h>
using namespace SPGrid;
using namespace PhysBAM;
#define THREADBLOCK 1024
#define PREFETCH 1024
template <class T, int log2_struct,int xsize,int ysize,int zsize,class T_offset_ptr>
__global__ void Laplace_Kernel_3D(const unsigned* masks, T* out,const T* in,const T scale_nonuniform,const T scale_uniform,
const T_offset_ptr* offsets,
const T_offset_ptr* const b_x_minus,
const T_offset_ptr* const b_x_plus,
const T_offset_ptr* const b_y_minus,
const T_offset_ptr* const b_y_plus,
const T_offset_ptr* const b_z_minus,
const T_offset_ptr* const b_z_plus,
int max_block){
enum {
DATABLOCK=xsize*ysize*zsize,
span=THREADBLOCK/DATABLOCK,
xstride=ysize*zsize,
ystride=zsize,
zstride=1
};
const unsigned int block = threadIdx.x / DATABLOCK;
const unsigned int entry = threadIdx.x % DATABLOCK;
const unsigned int z = entry % zsize;
const unsigned int y = entry / zsize % ysize;
const unsigned int x = entry / zsize / ysize;
typedef SPGrid_Mask<log2_struct, NextLogTwo<sizeof(T)>::value,3> T_MASK;
__shared__ T_offset_ptr block_index[PREFETCH];
__shared__ T_offset_ptr block_minus_x_index[PREFETCH];
__shared__ T_offset_ptr block_plus_x_index[PREFETCH];
__shared__ T_offset_ptr block_minus_y_index[PREFETCH];
__shared__ T_offset_ptr block_plus_y_index[PREFETCH];
__shared__ T_offset_ptr block_minus_z_index[PREFETCH];
__shared__ T_offset_ptr block_plus_z_index[PREFETCH];
if(threadIdx.x < PREFETCH)
if(blockIdx.x * PREFETCH + threadIdx.x < max_block){
block_index[threadIdx.x] = offsets[blockIdx.x * PREFETCH + threadIdx.x];
block_minus_x_index[threadIdx.x]=b_x_minus[blockIdx.x * PREFETCH + threadIdx.x];
block_plus_x_index[threadIdx.x]=b_x_plus[blockIdx.x * PREFETCH + threadIdx.x];
block_minus_y_index[threadIdx.x]=b_y_minus[blockIdx.x * PREFETCH + threadIdx.x];
block_plus_y_index[threadIdx.x]=b_y_plus[blockIdx.x * PREFETCH + threadIdx.x];
block_minus_z_index[threadIdx.x]=b_z_minus[blockIdx.x * PREFETCH + threadIdx.x];
block_plus_z_index[threadIdx.x]=b_z_plus[blockIdx.x * PREFETCH + threadIdx.x];
/*block_minus_x_index[threadIdx.x]=T_MASK::Packed_OffsetXdim_Device<-xsize>(block_index[threadIdx.x]);
block_plus_x_index[threadIdx.x]=T_MASK::Packed_OffsetXdim_Device<+xsize>(block_index[threadIdx.x]);
block_minus_y_index[threadIdx.x]=T_MASK::Packed_OffsetYdim_Device<-ysize>(block_index[threadIdx.x]);
block_plus_y_index[threadIdx.x]=T_MASK::Packed_OffsetYdim_Device<+ysize>(block_index[threadIdx.x]);
block_minus_z_index[threadIdx.x]=T_MASK::Packed_OffsetZdim_Device<-zsize>(block_index[threadIdx.x]);
block_plus_z_index[threadIdx.x]=T_MASK::Packed_OffsetZdim_Device<+zsize>(block_index[threadIdx.x]);*/
}
__syncthreads();
for(int i = 0;i < PREFETCH;i += span){
if (blockIdx.x * PREFETCH + i + block < max_block){
T* out_base = reinterpret_cast<T*>((unsigned long)out + (unsigned long)block_index[i + block]);
unsigned mask_value = reinterpret_cast<unsigned*>((unsigned long)masks + (unsigned long)block_index[i + block])[entry];
if(mask_value & (SPGrid_Cell_Type_Active|SPGrid_Cell_Type_Ghost)){
T center_value = reinterpret_cast<T*>((unsigned long)in + (unsigned long)block_index[i + block])[entry];
T& x_minus_value = (x==0)
? reinterpret_cast<T*>((unsigned long)in + (unsigned long)block_minus_x_index[i + block])[entry+(xsize-1)*xstride]
: reinterpret_cast<T*>((unsigned long)in + (unsigned long)block_index[i + block])[entry-xstride];
T& x_plus_value = (x==xsize-1)
? reinterpret_cast<T*>((unsigned long)in + (unsigned long)block_plus_x_index[i + block])[entry-(xsize-1)*xstride]
: reinterpret_cast<T*>((unsigned long)in + (unsigned long)block_index[i + block])[entry+xstride];
T& y_minus_value = (y==0)
? reinterpret_cast<T*>((unsigned long)in + (unsigned long)block_minus_y_index[i + block])[entry+(ysize-1)*ystride]
: reinterpret_cast<T*>((unsigned long)in + (unsigned long)block_index[i + block])[entry-ystride];
T& y_plus_value = (y==ysize-1)
? reinterpret_cast<T*>((unsigned long)in + (unsigned long)block_plus_y_index[i + block])[entry-(ysize-1)*ystride]
: reinterpret_cast<T*>((unsigned long)in + (unsigned long)block_index[i + block])[entry+ystride];
T& z_minus_value = (z==0)
? reinterpret_cast<T*>((unsigned long)in + (unsigned long)block_minus_z_index[i + block])[entry+(zsize-1)*zstride]
: reinterpret_cast<T*>((unsigned long)in + (unsigned long)block_index[i + block])[entry-zstride];
T& z_plus_value = (z==zsize-1)
? reinterpret_cast<T*>((unsigned long)in + (unsigned long)block_plus_z_index[i + block])[entry-(zsize-1)*zstride]
: reinterpret_cast<T*>((unsigned long)in + (unsigned long)block_index[i + block])[entry+zstride];
T result=0;
T x_minus = (x_minus_value - center_value);
T x_plus = (x_plus_value - center_value);
T y_minus = (y_minus_value - center_value);
T y_plus = (y_plus_value - center_value);
T z_minus = (z_minus_value - center_value);
T z_plus = (z_plus_value - center_value);
if (mask_value & (SPGrid_Face_Minus_X_Scaled))
result += scale_nonuniform * x_minus;
else if (mask_value & (SPGrid_Face_Minus_X_Active))
result += scale_uniform * x_minus;
if (mask_value & (SPGrid_Face_Plus_X_Scaled))
result += scale_nonuniform * x_plus;
else if (mask_value & (SPGrid_Face_Plus_X_Active))
result += scale_uniform * x_plus;
if (mask_value & (SPGrid_Face_Minus_Y_Scaled))
result += scale_nonuniform * y_minus;
else if (mask_value & (SPGrid_Face_Minus_Y_Active))
result += scale_uniform * y_minus;
if (mask_value & (SPGrid_Face_Plus_Y_Scaled))
result += scale_nonuniform * y_plus;
else if (mask_value & (SPGrid_Face_Plus_Y_Active))
result += scale_uniform * y_plus;
if (mask_value & (SPGrid_Face_Minus_Z_Scaled))
result += scale_nonuniform * z_minus;
else if (mask_value & (SPGrid_Face_Minus_Z_Active))
result += scale_uniform * z_minus;
if (mask_value & (SPGrid_Face_Plus_Z_Scaled))
result += scale_nonuniform * z_plus;
else if (mask_value & (SPGrid_Face_Plus_Z_Active))
result += scale_uniform * z_plus;
out_base[entry] = result;
}
}
}
}
//#####################################################################
// Constructor 3D
//#####################################################################
template <class T, int log2_struct,class T_offset_ptr>
Laplace_Helper_CUDA<T,log2_struct,3,T_offset_ptr>::Laplace_Helper_CUDA(T* const x_input,const T* const y_input,const unsigned* const mask_input,
const T_offset_ptr* const b_input,
const T_offset_ptr* const b_x_minus_input,
const T_offset_ptr* const b_x_plus_input,
const T_offset_ptr* const b_y_minus_input,
const T_offset_ptr* const b_y_plus_input,
const T_offset_ptr* const b_z_minus_input,
const T_offset_ptr* const b_z_plus_input,
const int size_input,const T scale_uniform_in,const T scale_nonuniform_in)
:x(x_input),y(y_input),mask(mask_input),
b(b_input),
b_x_minus(b_x_minus_input),b_x_plus(b_x_plus_input),
b_y_minus(b_y_minus_input),b_y_plus(b_y_plus_input),
b_z_minus(b_z_minus_input),b_z_plus(b_z_plus_input),
size(size_input),scale_uniform(scale_uniform_in),scale_nonuniform(scale_nonuniform_in)
{
}
//#####################################################################
// Function Run_Index_Range
//#####################################################################
// T_MASK corresponds to the mask for the data (not the mask channel)
template <class T,int log2_struct,class T_offset_ptr>
void Laplace_Helper_CUDA<T,log2_struct,3,T_offset_ptr>::Run_Index_Range(const unsigned int index_start,const unsigned int index_end)
{
int number_of_blocks=index_end-index_start+1;
Laplace_Kernel_3D<T,log2_struct,block_xsize,block_ysize,block_zsize,T_offset_ptr>
<<<(number_of_blocks%PREFETCH)?(number_of_blocks/PREFETCH+1):(number_of_blocks/PREFETCH),THREADBLOCK>>> // Fix round-up division
(mask,x,y,scale_nonuniform,scale_uniform,
b+index_start,
b_x_minus+index_start,b_x_plus+index_start,
b_y_minus+index_start,b_y_plus+index_start,
b_z_minus+index_start,b_z_plus+index_start,
number_of_blocks);
cudaDeviceSynchronize();
}
//template class Laplace_Helper_CUDA<float,5,2>;
//template class Laplace_Helper_CUDA<float,6,2>;
template class Laplace_Helper_CUDA<float,4,3,unsigned int>;
//template class Laplace_Helper_CUDA<float,5,3>;
//template class Laplace_Helper_CUDA<float,6,3>;
|
7b5c94858b6108448e90d6187581f6666243a4af.hip | // !!! This is a file automatically generated by hipify!!!
#include "GpuProjectionProcessing.cuh"
#include "GpuUtmWgsTransform.cuh"
#include "GpuTimer_hip.cuh"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
using namespace winGpu;
__global__ void applyTransformUtmToWgsCoordsGpu(double xOrigin, double yOrigin, double xPixelSize, double yPixelSize,
int height, int width, int zone, bool southhemi, double* lon, double* lat)
{
int h = blockDim.x * blockIdx.x + threadIdx.x;
int w = blockDim.y * blockIdx.y + threadIdx.y;
if (height <= h || width <= w)
{
return;
}
double x = xOrigin + xPixelSize * w;
double y = yOrigin + yPixelSize * h;
double newLon = 0.0;
double newLat = 0.0;
UtmXYToLatLonGpu(x, y, zone, southhemi, newLon, newLat);
lon[h * width + w] = newLon;
lat[h * width + w] = newLat;
}
double winGpu::doTransformUtmToWgsCoordsGpu(double xOrigin, double yOrigin, double xPixelSize, double yPixelSize,
int height, int width, int zone, bool southhemi, double* lon, double* lat)
{
const size_t maxAvaliableCoords = 1500000;
int countRowsPerIter = maxAvaliableCoords / width;
int countIter = height / countRowsPerIter + 1;
const size_t size = width * countRowsPerIter;
dim3 threadsPerBlock(16, 16);
dim3 numBlocks(countRowsPerIter / threadsPerBlock.x + 1, width / threadsPerBlock.y + 1);
double* newLon = new double[size];
double* newLat = new double[size];
double* dev_lon = 0;
double* dev_lat = 0;
float time;
hipSetDevice(0);
hipMalloc((void**)&dev_lon, size * sizeof(double));
hipMalloc((void**)&dev_lat, size * sizeof(double));
GPU_TIMER_START;
for (int i = 0; i < countIter; i++)
{
double newYOrigin = yOrigin + i * yPixelSize * countRowsPerIter;
applyTransformUtmToWgsCoordsGpu << <numBlocks, threadsPerBlock >> > (xOrigin, newYOrigin,
xPixelSize, yPixelSize, countRowsPerIter, width, zone, southhemi, dev_lon, dev_lat);
hipDeviceSynchronize();
hipMemcpy(newLon, dev_lon, size * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(newLat, dev_lat, size * sizeof(double), hipMemcpyDeviceToHost);
size_t countCoordsForCopy = i != countIter - 1 ? size :
width * height - countRowsPerIter * width * i;
for (int j = 0; j < countCoordsForCopy; j++)
{
lon[i * size + j] = newLon[j];
lat[i * size + j] = newLat[j];
}
}
GPU_TIMER_STOP(time);
hipFree(dev_lon);
hipFree(dev_lat);
delete[] newLon;
delete[] newLat;
return (double)time;
}
__global__ void applyTransformWgsToUtmCoordsGpu(double xOrigin, double yOrigin, double xPixelSize, double yPixelSize,
int height, int width, int zone, double* x, double* y)
{
int h = blockDim.x * blockIdx.x + threadIdx.x;
int w = blockDim.y * blockIdx.y + threadIdx.y;
if (height <= h || width <= w)
{
return;
}
double lon = xOrigin + xPixelSize * w;
double lat = yOrigin + yPixelSize * h;
double newX = 0.0;
double newY = 0.0;
LatLonToUtmXYGpu(lon, lat, zone, newX, newY);
x[h * width + w] = newX;
y[h * width + w] = newY;
}
double winGpu::doTransformWgsToUtmCoordsGpu(double xOrigin, double yOrigin, double xPixelSize, double yPixelSize,
int height, int width, int zone, double* x, double* y)
{
const size_t maxAvaliableCoords = 2000000;
int countRowsPerIter = maxAvaliableCoords / width;
int countIter = height / countRowsPerIter + 1;
const size_t size = width * countRowsPerIter;
dim3 threadsPerBlock(16, 16);
dim3 numBlocks(countRowsPerIter / threadsPerBlock.x + 1, width / threadsPerBlock.y + 1);
double* newX = new double[size];
double* newY = new double[size];
double* dev_x = 0;
double* dev_y = 0;
float time;
hipSetDevice(0);
hipMalloc((void**)&dev_x, size * sizeof(double));
hipMalloc((void**)&dev_y, size * sizeof(double));
GPU_TIMER_START;
for (int i = 0; i < countIter; i++)
{
double newYOrigin = yOrigin + i * yPixelSize * countRowsPerIter;
applyTransformWgsToUtmCoordsGpu << <numBlocks, threadsPerBlock >> > (xOrigin, newYOrigin,
xPixelSize, yPixelSize, countRowsPerIter, width, zone, dev_x, dev_y);
hipDeviceSynchronize();
hipMemcpy(newX, dev_x, size * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(newY, dev_y, size * sizeof(double), hipMemcpyDeviceToHost);
size_t countCoordsForCopy = i != countIter - 1 ? size :
width * height - countRowsPerIter * width * i;
for (int j = 0; j < countCoordsForCopy; j++)
{
x[i * size + j] = newX[j];
y[i * size + j] = newY[j];
}
}
GPU_TIMER_STOP(time);
hipFree(dev_x);
hipFree(dev_y);
delete[] newX;
delete[] newY;
return (double)time;
}
| 7b5c94858b6108448e90d6187581f6666243a4af.cu | #include "GpuProjectionProcessing.cuh"
#include "GpuUtmWgsTransform.cuh"
#include "GpuTimer.cuh"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
using namespace winGpu;
__global__ void applyTransformUtmToWgsCoordsGpu(double xOrigin, double yOrigin, double xPixelSize, double yPixelSize,
int height, int width, int zone, bool southhemi, double* lon, double* lat)
{
int h = blockDim.x * blockIdx.x + threadIdx.x;
int w = blockDim.y * blockIdx.y + threadIdx.y;
if (height <= h || width <= w)
{
return;
}
double x = xOrigin + xPixelSize * w;
double y = yOrigin + yPixelSize * h;
double newLon = 0.0;
double newLat = 0.0;
UtmXYToLatLonGpu(x, y, zone, southhemi, newLon, newLat);
lon[h * width + w] = newLon;
lat[h * width + w] = newLat;
}
double winGpu::doTransformUtmToWgsCoordsGpu(double xOrigin, double yOrigin, double xPixelSize, double yPixelSize,
int height, int width, int zone, bool southhemi, double* lon, double* lat)
{
const size_t maxAvaliableCoords = 1500000;
int countRowsPerIter = maxAvaliableCoords / width;
int countIter = height / countRowsPerIter + 1;
const size_t size = width * countRowsPerIter;
dim3 threadsPerBlock(16, 16);
dim3 numBlocks(countRowsPerIter / threadsPerBlock.x + 1, width / threadsPerBlock.y + 1);
double* newLon = new double[size];
double* newLat = new double[size];
double* dev_lon = 0;
double* dev_lat = 0;
float time;
cudaSetDevice(0);
cudaMalloc((void**)&dev_lon, size * sizeof(double));
cudaMalloc((void**)&dev_lat, size * sizeof(double));
GPU_TIMER_START;
for (int i = 0; i < countIter; i++)
{
double newYOrigin = yOrigin + i * yPixelSize * countRowsPerIter;
applyTransformUtmToWgsCoordsGpu << <numBlocks, threadsPerBlock >> > (xOrigin, newYOrigin,
xPixelSize, yPixelSize, countRowsPerIter, width, zone, southhemi, dev_lon, dev_lat);
cudaDeviceSynchronize();
cudaMemcpy(newLon, dev_lon, size * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(newLat, dev_lat, size * sizeof(double), cudaMemcpyDeviceToHost);
size_t countCoordsForCopy = i != countIter - 1 ? size :
width * height - countRowsPerIter * width * i;
for (int j = 0; j < countCoordsForCopy; j++)
{
lon[i * size + j] = newLon[j];
lat[i * size + j] = newLat[j];
}
}
GPU_TIMER_STOP(time);
cudaFree(dev_lon);
cudaFree(dev_lat);
delete[] newLon;
delete[] newLat;
return (double)time;
}
__global__ void applyTransformWgsToUtmCoordsGpu(double xOrigin, double yOrigin, double xPixelSize, double yPixelSize,
int height, int width, int zone, double* x, double* y)
{
int h = blockDim.x * blockIdx.x + threadIdx.x;
int w = blockDim.y * blockIdx.y + threadIdx.y;
if (height <= h || width <= w)
{
return;
}
double lon = xOrigin + xPixelSize * w;
double lat = yOrigin + yPixelSize * h;
double newX = 0.0;
double newY = 0.0;
LatLonToUtmXYGpu(lon, lat, zone, newX, newY);
x[h * width + w] = newX;
y[h * width + w] = newY;
}
double winGpu::doTransformWgsToUtmCoordsGpu(double xOrigin, double yOrigin, double xPixelSize, double yPixelSize,
int height, int width, int zone, double* x, double* y)
{
const size_t maxAvaliableCoords = 2000000;
int countRowsPerIter = maxAvaliableCoords / width;
int countIter = height / countRowsPerIter + 1;
const size_t size = width * countRowsPerIter;
dim3 threadsPerBlock(16, 16);
dim3 numBlocks(countRowsPerIter / threadsPerBlock.x + 1, width / threadsPerBlock.y + 1);
double* newX = new double[size];
double* newY = new double[size];
double* dev_x = 0;
double* dev_y = 0;
float time;
cudaSetDevice(0);
cudaMalloc((void**)&dev_x, size * sizeof(double));
cudaMalloc((void**)&dev_y, size * sizeof(double));
GPU_TIMER_START;
for (int i = 0; i < countIter; i++)
{
double newYOrigin = yOrigin + i * yPixelSize * countRowsPerIter;
applyTransformWgsToUtmCoordsGpu << <numBlocks, threadsPerBlock >> > (xOrigin, newYOrigin,
xPixelSize, yPixelSize, countRowsPerIter, width, zone, dev_x, dev_y);
cudaDeviceSynchronize();
cudaMemcpy(newX, dev_x, size * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(newY, dev_y, size * sizeof(double), cudaMemcpyDeviceToHost);
size_t countCoordsForCopy = i != countIter - 1 ? size :
width * height - countRowsPerIter * width * i;
for (int j = 0; j < countCoordsForCopy; j++)
{
x[i * size + j] = newX[j];
y[i * size + j] = newY[j];
}
}
GPU_TIMER_STOP(time);
cudaFree(dev_x);
cudaFree(dev_y);
delete[] newX;
delete[] newY;
return (double)time;
}
|
908985a5d0a50e7a6e19c4ccb3bc152d9c55c7e1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
__global__ void empty() {}
int main() {
float ms;
hipEvent_t start, end; //make event vars
hipEventCreate(&start); //create start event
hipEventCreate(&end); //create end event
hipEventRecord(start, 0); //start recording
hipLaunchKernelGGL(( empty), dim3(1),dim3(1) , 0, 0, );
hipEventRecord(end, 0); //end recording
hipEventSynchronize(end); //sync, you have to do this!
hipEventElapsedTime(&ms, start, end); //get elapsed time, put in timing var
printf("GPU = %.20lf\n", ms); //print timing
| 908985a5d0a50e7a6e19c4ccb3bc152d9c55c7e1.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void empty() {}
int main() {
float ms;
cudaEvent_t start, end; //make event vars
cudaEventCreate(&start); //create start event
cudaEventCreate(&end); //create end event
cudaEventRecord(start, 0); //start recording
empty<<< 1,1 >>> ();
cudaEventRecord(end, 0); //end recording
cudaEventSynchronize(end); //sync, you have to do this!
cudaEventElapsedTime(&ms, start, end); //get elapsed time, put in timing var
printf("GPU = %.20lf\n", ms); //print timing
|
f570b8571b75d41024566ba5fe852c7d8e65c29a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
//Cuda checks
__global__ void matrix_multiply_kernel(unsigned char *temp, unsigned char *matrix, float *kernal, int order, int middle, int windowSizeX, int windowSizeY){
//Find place in the execution
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0.0;
//If out of bounds, do nothing
if(y >= windowSizeY || x >= windowSizeX){
return;
}
//Else do function
for(int y2 = 0; y2 < order; y2++){
for(int x2 = 0; x2 < order; x2++){
int tempX = x - middle + x2, tempY = y - middle + y2;
if(tempX < 0){
tempX = 0;
}else if(tempX >= windowSizeX){
tempX = windowSizeX - 1;
}
if(tempY < 0){
tempY = 0;
}else if(tempY >= windowSizeY){
tempY = windowSizeY - 1;
}
sum += temp[(windowSizeX * tempY) + tempX] * kernal[(order * x2) + y2];
}
}
//Clamp the sum value
if(sum < 0){
sum = 0;
}else if(sum > 255){
sum = 255;
}
//Add sum value to matrix
matrix[(windowSizeX * y) + x] = (unsigned char) sum;
} | f570b8571b75d41024566ba5fe852c7d8e65c29a.cu | #include "includes.h"
//Cuda checks
__global__ void matrix_multiply_kernel(unsigned char *temp, unsigned char *matrix, float *kernal, int order, int middle, int windowSizeX, int windowSizeY){
//Find place in the execution
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0.0;
//If out of bounds, do nothing
if(y >= windowSizeY || x >= windowSizeX){
return;
}
//Else do function
for(int y2 = 0; y2 < order; y2++){
for(int x2 = 0; x2 < order; x2++){
int tempX = x - middle + x2, tempY = y - middle + y2;
if(tempX < 0){
tempX = 0;
}else if(tempX >= windowSizeX){
tempX = windowSizeX - 1;
}
if(tempY < 0){
tempY = 0;
}else if(tempY >= windowSizeY){
tempY = windowSizeY - 1;
}
sum += temp[(windowSizeX * tempY) + tempX] * kernal[(order * x2) + y2];
}
}
//Clamp the sum value
if(sum < 0){
sum = 0;
}else if(sum > 255){
sum = 255;
}
//Add sum value to matrix
matrix[(windowSizeX * y) + x] = (unsigned char) sum;
} |
7b524292e483cff1b71424f03e4af500f3b09477.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void approxmatch(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,float * __restrict__ match,float * temp){
float * remainL=temp+blockIdx.x*(n+m)*2, * remainR=temp+blockIdx.x*(n+m)*2+n,*ratioL=temp+blockIdx.x*(n+m)*2+n+m,*ratioR=temp+blockIdx.x*(n+m)*2+n+m+n;
float multiL,multiR;
if (n>=m){
multiL=1;
multiR=n/m;
}else{
multiL=m/n;
multiR=1;
}
const int Block=1024;
__shared__ float buf[Block*4];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x;j<n*m;j+=blockDim.x)
match[i*n*m+j]=0;
for (int j=threadIdx.x;j<n;j+=blockDim.x)
remainL[j]=multiL;
for (int j=threadIdx.x;j<m;j+=blockDim.x)
remainR[j]=multiR;
__syncthreads();
for (int j=7;j>=-2;j--){
float level=-powf(4.0f,j);
if (j==-2){
level=0;
}
for (int k0=0;k0<n;k0+=blockDim.x){
int k=k0+threadIdx.x;
float x1=0,y1=0,z1=0;
if (k<n){
x1=xyz1[i*n*3+k*3+0];
y1=xyz1[i*n*3+k*3+1];
z1=xyz1[i*n*3+k*3+2];
}
float suml=1e-9f;
for (int l0=0;l0<m;l0+=Block){
int lend=min(m,l0+Block)-l0;
for (int l=threadIdx.x;l<lend;l+=blockDim.x){
float x2=xyz2[i*m*3+l0*3+l*3+0];
float y2=xyz2[i*m*3+l0*3+l*3+1];
float z2=xyz2[i*m*3+l0*3+l*3+2];
buf[l*4+0]=x2;
buf[l*4+1]=y2;
buf[l*4+2]=z2;
buf[l*4+3]=remainR[l0+l];
}
__syncthreads();
for (int l=0;l<lend;l++){
float x2=buf[l*4+0];
float y2=buf[l*4+1];
float z2=buf[l*4+2];
float d=level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1));
float w=__expf(d)*buf[l*4+3];
suml+=w;
}
__syncthreads();
}
if (k<n)
ratioL[k]=remainL[k]/suml;
}
/*for (int k=threadIdx.x;k<n;k+=gridDim.x){
float x1=xyz1[i*n*3+k*3+0];
float y1=xyz1[i*n*3+k*3+1];
float z1=xyz1[i*n*3+k*3+2];
float suml=1e-9f;
for (int l=0;l<m;l++){
float x2=xyz2[i*m*3+l*3+0];
float y2=xyz2[i*m*3+l*3+1];
float z2=xyz2[i*m*3+l*3+2];
float w=expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*remainR[l];
suml+=w;
}
ratioL[k]=remainL[k]/suml;
}*/
__syncthreads();
for (int l0=0;l0<m;l0+=blockDim.x){
int l=l0+threadIdx.x;
float x2=0,y2=0,z2=0;
if (l<m){
x2=xyz2[i*m*3+l*3+0];
y2=xyz2[i*m*3+l*3+1];
z2=xyz2[i*m*3+l*3+2];
}
float sumr=0;
for (int k0=0;k0<n;k0+=Block){
int kend=min(n,k0+Block)-k0;
for (int k=threadIdx.x;k<kend;k+=blockDim.x){
buf[k*4+0]=xyz1[i*n*3+k0*3+k*3+0];
buf[k*4+1]=xyz1[i*n*3+k0*3+k*3+1];
buf[k*4+2]=xyz1[i*n*3+k0*3+k*3+2];
buf[k*4+3]=ratioL[k0+k];
}
__syncthreads();
for (int k=0;k<kend;k++){
float x1=buf[k*4+0];
float y1=buf[k*4+1];
float z1=buf[k*4+2];
float w=__expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*buf[k*4+3];
sumr+=w;
}
__syncthreads();
}
if (l<m){
sumr*=remainR[l];
float consumption=fminf(remainR[l]/(sumr+1e-9f),1.0f);
ratioR[l]=consumption*remainR[l];
remainR[l]=fmaxf(0.0f,remainR[l]-sumr);
}
}
/*for (int l=threadIdx.x;l<m;l+=blockDim.x){
float x2=xyz2[i*m*3+l*3+0];
float y2=xyz2[i*m*3+l*3+1];
float z2=xyz2[i*m*3+l*3+2];
float sumr=0;
for (int k=0;k<n;k++){
float x1=xyz1[i*n*3+k*3+0];
float y1=xyz1[i*n*3+k*3+1];
float z1=xyz1[i*n*3+k*3+2];
float w=expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*ratioL[k];
sumr+=w;
}
sumr*=remainR[l];
float consumption=fminf(remainR[l]/(sumr+1e-9f),1.0f);
ratioR[l]=consumption*remainR[l];
remainR[l]=fmaxf(0.0f,remainR[l]-sumr);
}*/
__syncthreads();
for (int k0=0;k0<n;k0+=blockDim.x){
int k=k0+threadIdx.x;
float x1=0,y1=0,z1=0;
if (k<n){
x1=xyz1[i*n*3+k*3+0];
y1=xyz1[i*n*3+k*3+1];
z1=xyz1[i*n*3+k*3+2];
}
float suml=0;
for (int l0=0;l0<m;l0+=Block){
int lend=min(m,l0+Block)-l0;
for (int l=threadIdx.x;l<lend;l+=blockDim.x){
buf[l*4+0]=xyz2[i*m*3+l0*3+l*3+0];
buf[l*4+1]=xyz2[i*m*3+l0*3+l*3+1];
buf[l*4+2]=xyz2[i*m*3+l0*3+l*3+2];
buf[l*4+3]=ratioR[l0+l];
}
__syncthreads();
float rl=ratioL[k];
if (k<n){
for (int l=0;l<lend;l++){
float x2=buf[l*4+0];
float y2=buf[l*4+1];
float z2=buf[l*4+2];
float w=__expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*rl*buf[l*4+3];
match[i*n*m+(l0+l)*n+k]+=w;
suml+=w;
}
}
__syncthreads();
}
if (k<n)
remainL[k]=fmaxf(0.0f,remainL[k]-suml);
}
/*for (int k=threadIdx.x;k<n;k+=blockDim.x){
float x1=xyz1[i*n*3+k*3+0];
float y1=xyz1[i*n*3+k*3+1];
float z1=xyz1[i*n*3+k*3+2];
float suml=0;
for (int l=0;l<m;l++){
float x2=xyz2[i*m*3+l*3+0];
float y2=xyz2[i*m*3+l*3+1];
float z2=xyz2[i*m*3+l*3+2];
float w=expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*ratioL[k]*ratioR[l];
match[i*n*m+l*n+k]+=w;
suml+=w;
}
remainL[k]=fmaxf(0.0f,remainL[k]-suml);
}*/
__syncthreads();
}
}
}
void approxmatchLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,float * match,float * temp){
hipLaunchKernelGGL(( approxmatch), dim3(32),dim3(512), 0, 0, b,n,m,xyz1,xyz2,match,temp);
}
__global__ void matchcost(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ out){
__shared__ float allsum[512];
const int Block=1024;
__shared__ float buf[Block*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
float subsum=0;
for (int k0=0;k0<n;k0+=blockDim.x){
int k=k0+threadIdx.x;
float x1=0,y1=0,z1=0;
if (k<n){
x1=xyz1[i*n*3+k*3+0];
y1=xyz1[i*n*3+k*3+1];
z1=xyz1[i*n*3+k*3+2];
}
for (int l0=0;l0<m;l0+=Block){
int lend=min(m,l0+Block)-l0;
for (int l=threadIdx.x;l<lend*3;l+=blockDim.x)
buf[l]=xyz2[i*m*3+l0*3+l];
__syncthreads();
if (k<n){
for (int l=0;l<lend;l++){
float x2=buf[l*3+0];
float y2=buf[l*3+1];
float z2=buf[l*3+2];
float d=sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1));
subsum+=d*match[i*n*m+(l0+l)*n+k];
}
}
__syncthreads();
}
}
allsum[threadIdx.x]=subsum;
for (int j=1;j<blockDim.x;j<<=1){
__syncthreads();
if ((threadIdx.x&j)==0 && threadIdx.x+j<blockDim.x){
allsum[threadIdx.x]+=allsum[threadIdx.x+j];
}
}
if (threadIdx.x==0)
out[i]=allsum[0];
__syncthreads();
}
}
void matchcostLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * out){
hipLaunchKernelGGL(( matchcost), dim3(32),dim3(512), 0, 0, b,n,m,xyz1,xyz2,match,out);
}
__global__ void matchcostgrad2(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ grad2){
__shared__ float sum_grad[256*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int kbeg=m*blockIdx.y/gridDim.y;
int kend=m*(blockIdx.y+1)/gridDim.y;
for (int k=kbeg;k<kend;k++){
float x2=xyz2[(i*m+k)*3+0];
float y2=xyz2[(i*m+k)*3+1];
float z2=xyz2[(i*m+k)*3+2];
float subsumx=0,subsumy=0,subsumz=0;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
float x1=x2-xyz1[(i*n+j)*3+0];
float y1=y2-xyz1[(i*n+j)*3+1];
float z1=z2-xyz1[(i*n+j)*3+2];
float d=match[i*n*m+k*n+j]*rsqrtf(fmaxf(x1*x1+y1*y1+z1*z1,1e-20f));
subsumx+=x1*d;
subsumy+=y1*d;
subsumz+=z1*d;
}
sum_grad[threadIdx.x*3+0]=subsumx;
sum_grad[threadIdx.x*3+1]=subsumy;
sum_grad[threadIdx.x*3+2]=subsumz;
for (int j=1;j<blockDim.x;j<<=1){
__syncthreads();
int j1=threadIdx.x;
int j2=threadIdx.x+j;
if ((j1&j)==0 && j2<blockDim.x){
sum_grad[j1*3+0]+=sum_grad[j2*3+0];
sum_grad[j1*3+1]+=sum_grad[j2*3+1];
sum_grad[j1*3+2]+=sum_grad[j2*3+2];
}
}
if (threadIdx.x==0){
grad2[(i*m+k)*3+0]=sum_grad[0];
grad2[(i*m+k)*3+1]=sum_grad[1];
grad2[(i*m+k)*3+2]=sum_grad[2];
}
__syncthreads();
}
}
}
__global__ void matchcostgrad1(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ grad1){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int l=threadIdx.x;l<n;l+=blockDim.x){
float x1=xyz1[i*n*3+l*3+0];
float y1=xyz1[i*n*3+l*3+1];
float z1=xyz1[i*n*3+l*3+2];
float dx=0,dy=0,dz=0;
for (int k=0;k<m;k++){
float x2=xyz2[i*m*3+k*3+0];
float y2=xyz2[i*m*3+k*3+1];
float z2=xyz2[i*m*3+k*3+2];
float d=match[i*n*m+k*n+l]*rsqrtf(fmaxf((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)+(z1-z2)*(z1-z2),1e-20f));
dx+=(x1-x2)*d;
dy+=(y1-y2)*d;
dz+=(z1-z2)*d;
}
grad1[i*n*3+l*3+0]=dx;
grad1[i*n*3+l*3+1]=dy;
grad1[i*n*3+l*3+2]=dz;
}
}
}
void matchcostgradLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * grad1,float * grad2){
hipLaunchKernelGGL(( matchcostgrad1), dim3(32),dim3(512), 0, 0, b,n,m,xyz1,xyz2,match,grad1);
hipLaunchKernelGGL(( matchcostgrad2), dim3(dim3(32,32)),dim3(256), 0, 0, b,n,m,xyz1,xyz2,match,grad2);
}
| 7b524292e483cff1b71424f03e4af500f3b09477.cu | __global__ void approxmatch(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,float * __restrict__ match,float * temp){
float * remainL=temp+blockIdx.x*(n+m)*2, * remainR=temp+blockIdx.x*(n+m)*2+n,*ratioL=temp+blockIdx.x*(n+m)*2+n+m,*ratioR=temp+blockIdx.x*(n+m)*2+n+m+n;
float multiL,multiR;
if (n>=m){
multiL=1;
multiR=n/m;
}else{
multiL=m/n;
multiR=1;
}
const int Block=1024;
__shared__ float buf[Block*4];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x;j<n*m;j+=blockDim.x)
match[i*n*m+j]=0;
for (int j=threadIdx.x;j<n;j+=blockDim.x)
remainL[j]=multiL;
for (int j=threadIdx.x;j<m;j+=blockDim.x)
remainR[j]=multiR;
__syncthreads();
for (int j=7;j>=-2;j--){
float level=-powf(4.0f,j);
if (j==-2){
level=0;
}
for (int k0=0;k0<n;k0+=blockDim.x){
int k=k0+threadIdx.x;
float x1=0,y1=0,z1=0;
if (k<n){
x1=xyz1[i*n*3+k*3+0];
y1=xyz1[i*n*3+k*3+1];
z1=xyz1[i*n*3+k*3+2];
}
float suml=1e-9f;
for (int l0=0;l0<m;l0+=Block){
int lend=min(m,l0+Block)-l0;
for (int l=threadIdx.x;l<lend;l+=blockDim.x){
float x2=xyz2[i*m*3+l0*3+l*3+0];
float y2=xyz2[i*m*3+l0*3+l*3+1];
float z2=xyz2[i*m*3+l0*3+l*3+2];
buf[l*4+0]=x2;
buf[l*4+1]=y2;
buf[l*4+2]=z2;
buf[l*4+3]=remainR[l0+l];
}
__syncthreads();
for (int l=0;l<lend;l++){
float x2=buf[l*4+0];
float y2=buf[l*4+1];
float z2=buf[l*4+2];
float d=level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1));
float w=__expf(d)*buf[l*4+3];
suml+=w;
}
__syncthreads();
}
if (k<n)
ratioL[k]=remainL[k]/suml;
}
/*for (int k=threadIdx.x;k<n;k+=gridDim.x){
float x1=xyz1[i*n*3+k*3+0];
float y1=xyz1[i*n*3+k*3+1];
float z1=xyz1[i*n*3+k*3+2];
float suml=1e-9f;
for (int l=0;l<m;l++){
float x2=xyz2[i*m*3+l*3+0];
float y2=xyz2[i*m*3+l*3+1];
float z2=xyz2[i*m*3+l*3+2];
float w=expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*remainR[l];
suml+=w;
}
ratioL[k]=remainL[k]/suml;
}*/
__syncthreads();
for (int l0=0;l0<m;l0+=blockDim.x){
int l=l0+threadIdx.x;
float x2=0,y2=0,z2=0;
if (l<m){
x2=xyz2[i*m*3+l*3+0];
y2=xyz2[i*m*3+l*3+1];
z2=xyz2[i*m*3+l*3+2];
}
float sumr=0;
for (int k0=0;k0<n;k0+=Block){
int kend=min(n,k0+Block)-k0;
for (int k=threadIdx.x;k<kend;k+=blockDim.x){
buf[k*4+0]=xyz1[i*n*3+k0*3+k*3+0];
buf[k*4+1]=xyz1[i*n*3+k0*3+k*3+1];
buf[k*4+2]=xyz1[i*n*3+k0*3+k*3+2];
buf[k*4+3]=ratioL[k0+k];
}
__syncthreads();
for (int k=0;k<kend;k++){
float x1=buf[k*4+0];
float y1=buf[k*4+1];
float z1=buf[k*4+2];
float w=__expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*buf[k*4+3];
sumr+=w;
}
__syncthreads();
}
if (l<m){
sumr*=remainR[l];
float consumption=fminf(remainR[l]/(sumr+1e-9f),1.0f);
ratioR[l]=consumption*remainR[l];
remainR[l]=fmaxf(0.0f,remainR[l]-sumr);
}
}
/*for (int l=threadIdx.x;l<m;l+=blockDim.x){
float x2=xyz2[i*m*3+l*3+0];
float y2=xyz2[i*m*3+l*3+1];
float z2=xyz2[i*m*3+l*3+2];
float sumr=0;
for (int k=0;k<n;k++){
float x1=xyz1[i*n*3+k*3+0];
float y1=xyz1[i*n*3+k*3+1];
float z1=xyz1[i*n*3+k*3+2];
float w=expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*ratioL[k];
sumr+=w;
}
sumr*=remainR[l];
float consumption=fminf(remainR[l]/(sumr+1e-9f),1.0f);
ratioR[l]=consumption*remainR[l];
remainR[l]=fmaxf(0.0f,remainR[l]-sumr);
}*/
__syncthreads();
for (int k0=0;k0<n;k0+=blockDim.x){
int k=k0+threadIdx.x;
float x1=0,y1=0,z1=0;
if (k<n){
x1=xyz1[i*n*3+k*3+0];
y1=xyz1[i*n*3+k*3+1];
z1=xyz1[i*n*3+k*3+2];
}
float suml=0;
for (int l0=0;l0<m;l0+=Block){
int lend=min(m,l0+Block)-l0;
for (int l=threadIdx.x;l<lend;l+=blockDim.x){
buf[l*4+0]=xyz2[i*m*3+l0*3+l*3+0];
buf[l*4+1]=xyz2[i*m*3+l0*3+l*3+1];
buf[l*4+2]=xyz2[i*m*3+l0*3+l*3+2];
buf[l*4+3]=ratioR[l0+l];
}
__syncthreads();
float rl=ratioL[k];
if (k<n){
for (int l=0;l<lend;l++){
float x2=buf[l*4+0];
float y2=buf[l*4+1];
float z2=buf[l*4+2];
float w=__expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*rl*buf[l*4+3];
match[i*n*m+(l0+l)*n+k]+=w;
suml+=w;
}
}
__syncthreads();
}
if (k<n)
remainL[k]=fmaxf(0.0f,remainL[k]-suml);
}
/*for (int k=threadIdx.x;k<n;k+=blockDim.x){
float x1=xyz1[i*n*3+k*3+0];
float y1=xyz1[i*n*3+k*3+1];
float z1=xyz1[i*n*3+k*3+2];
float suml=0;
for (int l=0;l<m;l++){
float x2=xyz2[i*m*3+l*3+0];
float y2=xyz2[i*m*3+l*3+1];
float z2=xyz2[i*m*3+l*3+2];
float w=expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*ratioL[k]*ratioR[l];
match[i*n*m+l*n+k]+=w;
suml+=w;
}
remainL[k]=fmaxf(0.0f,remainL[k]-suml);
}*/
__syncthreads();
}
}
}
void approxmatchLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,float * match,float * temp){
approxmatch<<<32,512>>>(b,n,m,xyz1,xyz2,match,temp);
}
__global__ void matchcost(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ out){
__shared__ float allsum[512];
const int Block=1024;
__shared__ float buf[Block*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
float subsum=0;
for (int k0=0;k0<n;k0+=blockDim.x){
int k=k0+threadIdx.x;
float x1=0,y1=0,z1=0;
if (k<n){
x1=xyz1[i*n*3+k*3+0];
y1=xyz1[i*n*3+k*3+1];
z1=xyz1[i*n*3+k*3+2];
}
for (int l0=0;l0<m;l0+=Block){
int lend=min(m,l0+Block)-l0;
for (int l=threadIdx.x;l<lend*3;l+=blockDim.x)
buf[l]=xyz2[i*m*3+l0*3+l];
__syncthreads();
if (k<n){
for (int l=0;l<lend;l++){
float x2=buf[l*3+0];
float y2=buf[l*3+1];
float z2=buf[l*3+2];
float d=sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1));
subsum+=d*match[i*n*m+(l0+l)*n+k];
}
}
__syncthreads();
}
}
allsum[threadIdx.x]=subsum;
for (int j=1;j<blockDim.x;j<<=1){
__syncthreads();
if ((threadIdx.x&j)==0 && threadIdx.x+j<blockDim.x){
allsum[threadIdx.x]+=allsum[threadIdx.x+j];
}
}
if (threadIdx.x==0)
out[i]=allsum[0];
__syncthreads();
}
}
void matchcostLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * out){
matchcost<<<32,512>>>(b,n,m,xyz1,xyz2,match,out);
}
__global__ void matchcostgrad2(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ grad2){
__shared__ float sum_grad[256*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int kbeg=m*blockIdx.y/gridDim.y;
int kend=m*(blockIdx.y+1)/gridDim.y;
for (int k=kbeg;k<kend;k++){
float x2=xyz2[(i*m+k)*3+0];
float y2=xyz2[(i*m+k)*3+1];
float z2=xyz2[(i*m+k)*3+2];
float subsumx=0,subsumy=0,subsumz=0;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
float x1=x2-xyz1[(i*n+j)*3+0];
float y1=y2-xyz1[(i*n+j)*3+1];
float z1=z2-xyz1[(i*n+j)*3+2];
float d=match[i*n*m+k*n+j]*rsqrtf(fmaxf(x1*x1+y1*y1+z1*z1,1e-20f));
subsumx+=x1*d;
subsumy+=y1*d;
subsumz+=z1*d;
}
sum_grad[threadIdx.x*3+0]=subsumx;
sum_grad[threadIdx.x*3+1]=subsumy;
sum_grad[threadIdx.x*3+2]=subsumz;
for (int j=1;j<blockDim.x;j<<=1){
__syncthreads();
int j1=threadIdx.x;
int j2=threadIdx.x+j;
if ((j1&j)==0 && j2<blockDim.x){
sum_grad[j1*3+0]+=sum_grad[j2*3+0];
sum_grad[j1*3+1]+=sum_grad[j2*3+1];
sum_grad[j1*3+2]+=sum_grad[j2*3+2];
}
}
if (threadIdx.x==0){
grad2[(i*m+k)*3+0]=sum_grad[0];
grad2[(i*m+k)*3+1]=sum_grad[1];
grad2[(i*m+k)*3+2]=sum_grad[2];
}
__syncthreads();
}
}
}
__global__ void matchcostgrad1(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ grad1){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int l=threadIdx.x;l<n;l+=blockDim.x){
float x1=xyz1[i*n*3+l*3+0];
float y1=xyz1[i*n*3+l*3+1];
float z1=xyz1[i*n*3+l*3+2];
float dx=0,dy=0,dz=0;
for (int k=0;k<m;k++){
float x2=xyz2[i*m*3+k*3+0];
float y2=xyz2[i*m*3+k*3+1];
float z2=xyz2[i*m*3+k*3+2];
float d=match[i*n*m+k*n+l]*rsqrtf(fmaxf((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)+(z1-z2)*(z1-z2),1e-20f));
dx+=(x1-x2)*d;
dy+=(y1-y2)*d;
dz+=(z1-z2)*d;
}
grad1[i*n*3+l*3+0]=dx;
grad1[i*n*3+l*3+1]=dy;
grad1[i*n*3+l*3+2]=dz;
}
}
}
void matchcostgradLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * grad1,float * grad2){
matchcostgrad1<<<32,512>>>(b,n,m,xyz1,xyz2,match,grad1);
matchcostgrad2<<<dim3(32,32),256>>>(b,n,m,xyz1,xyz2,match,grad2);
}
|
4e772e1e83bbc0874a13fdac0b1f47839d174f92.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2020 ETH Zurich. All Rights Reserved.
#include "dump_particles.h"
#include "utils/simple_serializer.h"
#include "utils/time_stamp.h"
#include <mirheo/core/pvs/rod_vector.h>
#include <mirheo/core/simulation.h>
#include <mirheo/core/utils/config.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/path.h>
#include <mirheo/core/utils/kernel_launch.h>
#include <mirheo/core/xdmf/type_map.h>
namespace mirheo
{
namespace dump_particles_kernels
{
template <typename T>
__global__ void copyObjectDataToParticles(int objSize, int nObjects, const T *srcObjData, T *dstParticleData)
{
const int pid = threadIdx.x + blockIdx.x * blockDim.x;
const int objId = pid / objSize;
if (objId >= nObjects) return;
dstParticleData[pid] = srcObjData[objId];
}
template <typename T>
__global__ void copyRodDataToParticles(int numBiSegmentsPerObject, int objSize, int nObjects, const T *rodData, T *particleData)
{
constexpr int stride = 5;
const int pid = threadIdx.x + blockIdx.x * blockDim.x;
const int objId = pid / objSize;
const int localPartId = pid % objSize;
const int localBisegId = math::min(localPartId / stride, numBiSegmentsPerObject); // min because of last particle
const int bid = objId * numBiSegmentsPerObject + localBisegId;
if (objId < nObjects)
particleData[pid] = rodData[bid];
}
} // namespace dump_particles_kernels
ParticleSenderPlugin::ParticleSenderPlugin(const MirState *state, std::string name, std::string pvName, int dumpEvery,
const std::vector<std::string>& channelNames) :
SimulationPlugin(state, name),
pvName_(pvName),
dumpEvery_(dumpEvery),
channelNames_(channelNames)
{
channelData_.resize(channelNames_.size());
}
ParticleSenderPlugin::ParticleSenderPlugin(const MirState *state, Loader& loader, const ConfigObject& config) :
ParticleSenderPlugin(state, config["name"], config["pvName"], config["dumpEvery"],
loader.load<std::vector<std::string>>(config["channelNames"]))
{}
ParticleSenderPlugin::~ParticleSenderPlugin() = default;
void ParticleSenderPlugin::setup(Simulation *simulation, const MPI_Comm& comm, const MPI_Comm& interComm)
{
SimulationPlugin::setup(simulation, comm, interComm);
pv_ = simulation->getPVbyNameOrDie(pvName_);
info("Plugin %s initialized for the following particle vector: %s", getCName(), pvName_.c_str());
}
void ParticleSenderPlugin::handshake()
{
std::vector<XDMF::Channel::DataForm> dataForms;
std::vector<XDMF::Channel::NumberType> numberTypes;
std::vector<std::string> typeDescriptorsStr;
auto pushChannelInfos = [&dataForms, &numberTypes, &typeDescriptorsStr](const DataManager::ChannelDescription& desc)
{
mpark::visit([&dataForms, &numberTypes, &typeDescriptorsStr](auto pinnedBufferPtr)
{
using T = typename std::remove_pointer<decltype(pinnedBufferPtr)>::type::value_type;
dataForms .push_back(XDMF::getDataForm <T>());
numberTypes .push_back(XDMF::getNumberType<T>());
typeDescriptorsStr.push_back(typeDescriptorToString(DataTypeWrapper<T>{}));
}, desc.varDataPtr);
};
auto ov = dynamic_cast<ObjectVector*>(pv_);
auto rv = dynamic_cast<RodVector*>(pv_);
for (const auto& name : channelNames_)
{
if (pv_->local()->dataPerParticle.checkChannelExists(name))
{
const auto& desc = pv_->local()->dataPerParticle.getChannelDescOrDie(name);
pushChannelInfos(desc);
}
else if (ov != nullptr && ov->local()->dataPerObject.checkChannelExists(name))
{
const auto& desc = ov->local()->dataPerObject.getChannelDescOrDie(name);
pushChannelInfos(desc);
}
else if (rv != nullptr && rv->local()->dataPerBisegment.checkChannelExists(name))
{
const auto& desc = rv->local()->dataPerBisegment.getChannelDescOrDie(name);
pushChannelInfos(desc);
}
else
{
die("Channel not found: '%s' in particle vector '%s'",
getCName(), pv_->getCName());
}
}
_waitPrevSend();
SimpleSerializer::serialize(sendBuffer_, channelNames_, dataForms, numberTypes, typeDescriptorsStr);
_send(sendBuffer_);
}
static inline void copyData(ParticleVector *pv, const std::string& channelName, HostBuffer<char>& dst, hipStream_t stream)
{
auto srcContainer = pv->local()->dataPerParticle.getGenericData(channelName);
dst.genericCopy(srcContainer, stream);
}
static inline void copyData(ObjectVector *ov, const std::string& channelName, HostBuffer<char>& dst, DeviceBuffer<char>& workSpace, hipStream_t stream)
{
auto lov = ov->local();
const auto& srcDesc = lov->dataPerObject.getChannelDescOrDie(channelName);
const int objSize = lov->getObjectSize();
const int nObjects = lov->getNumObjects();
mpark::visit([&](auto srcBufferPtr)
{
using T = typename std::remove_pointer<decltype(srcBufferPtr)>::type::value_type;
constexpr int nthreads = 128;
const int nParts = objSize * nObjects;
const int nblocks = getNblocks(nParts, nthreads);
workSpace.resize_anew(nParts * sizeof(T));
SAFE_KERNEL_LAUNCH(
dump_particles_kernels::copyObjectDataToParticles,
nblocks, nthreads, 0, stream,
objSize, nObjects, srcBufferPtr->devPtr(),
reinterpret_cast<T*>(workSpace.devPtr()));
}, srcDesc.varDataPtr);
dst.genericCopy(&workSpace, stream);
}
static inline void copyData(RodVector *rv, const std::string& channelName, HostBuffer<char>& dst, DeviceBuffer<char>& workSpace, hipStream_t stream)
{
auto lrv = rv->local();
const auto& srcDesc = lrv->dataPerBisegment.getChannelDescOrDie(channelName);
const int objSize = lrv->getObjectSize();
const int nObjects = lrv->getNumObjects();
const int numBiSegmentsPerObject = lrv->getNumSegmentsPerRod() - 1;
mpark::visit([&](auto srcBufferPtr)
{
using T = typename std::remove_pointer<decltype(srcBufferPtr)>::type::value_type;
constexpr int nthreads = 128;
const int nParts = objSize * nObjects;
const int nblocks = getNblocks(nParts, nthreads);
workSpace.resize_anew(nParts * sizeof(T));
SAFE_KERNEL_LAUNCH(
dump_particles_kernels::copyRodDataToParticles,
nblocks, nthreads, 0, stream,
numBiSegmentsPerObject, objSize, nObjects, srcBufferPtr->devPtr(),
reinterpret_cast<T*>(workSpace.devPtr()));
}, srcDesc.varDataPtr);
dst.genericCopy(&workSpace, stream);
}
void ParticleSenderPlugin::beforeForces(hipStream_t stream)
{
if (!isTimeEvery(getState(), dumpEvery_)) return;
positions_ .genericCopy(&pv_->local()->positions() , stream);
velocities_.genericCopy(&pv_->local()->velocities(), stream);
auto ov = dynamic_cast<ObjectVector*>(pv_);
auto rv = dynamic_cast<RodVector*>(pv_);
for (size_t i = 0; i < channelNames_.size(); ++i)
{
auto name = channelNames_[i];
if (pv_->local()->dataPerParticle.checkChannelExists(name))
{
copyData(pv_, name, channelData_[i], stream);
}
else if (ov != nullptr && ov->local()->dataPerObject.checkChannelExists(name))
{
copyData(ov, name, channelData_[i], workSpace_, stream);
}
else if (rv != nullptr && rv->local()->dataPerBisegment.checkChannelExists(name))
{
copyData(rv, name, channelData_[i], workSpace_, stream);
}
else
{
die("Channel not found: '%s' in particle vector '%s'",
getCName(), pv_->getCName());
}
}
}
void ParticleSenderPlugin::serializeAndSend(__UNUSED hipStream_t stream)
{
if (!isTimeEvery(getState(), dumpEvery_)) return;
debug2("Plugin %s is sending now data", getCName());
for (auto& p : positions_)
{
auto r = getState()->domain.local2global(make_real3(p));
p.x = r.x; p.y = r.y; p.z = r.z;
}
const MirState::StepType timeStamp = getTimeStamp(getState(), dumpEvery_);
debug2("Plugin %s is packing now data consisting of %zu particles",
getCName(), positions_.size());
_waitPrevSend();
SimpleSerializer::serialize(sendBuffer_, timeStamp, getState()->currentTime, positions_, velocities_, channelData_);
_send(sendBuffer_);
}
void ParticleSenderPlugin::saveSnapshotAndRegister(Saver& saver)
{
saver.registerObject(this, _saveSnapshot(saver, "ParticleSenderPlugin"));
}
ConfigObject ParticleSenderPlugin::_saveSnapshot(Saver& saver, const std::string& typeName)
{
ConfigObject config = SimulationPlugin::_saveSnapshot(saver, typeName);
config.emplace("pvName", saver(pvName_));
config.emplace("dumpEvery", saver(dumpEvery_));
config.emplace("channelNames", saver(channelNames_));
return config;
}
ParticleDumperPlugin::ParticleDumperPlugin(std::string name, std::string path) :
PostprocessPlugin(name),
path_(path),
positions_(std::make_shared<std::vector<real3>>())
{}
ParticleDumperPlugin::ParticleDumperPlugin(Loader&, const ConfigObject& config) :
ParticleDumperPlugin(config["name"], config["path"])
{}
ParticleDumperPlugin::~ParticleDumperPlugin() = default;
void ParticleDumperPlugin::handshake()
{
auto req = waitData();
MPI_Check( MPI_Wait(&req, MPI_STATUS_IGNORE) );
recv();
std::vector<std::string> names;
std::vector<XDMF::Channel::DataForm> dataForms;
std::vector<XDMF::Channel::NumberType> numberTypes;
std::vector<std::string> typeDescriptorsStr;
SimpleSerializer::deserialize(data_, names, dataForms, numberTypes, typeDescriptorsStr);
auto initChannel = [] (const std::string& name, XDMF::Channel::DataForm dataForm,
XDMF::Channel::NumberType numberType, TypeDescriptor datatype,
XDMF::Channel::NeedShift needShift = XDMF::Channel::NeedShift::False)
{
return XDMF::Channel{name, nullptr, dataForm, numberType, datatype, needShift};
};
// Velocity and id are special channels which are always present
std::string allNames = "'velocity', 'id'";
channels_.push_back(initChannel("velocity", XDMF::Channel::DataForm::Vector, XDMF::getNumberType<real>(), DataTypeWrapper<real>()));
channels_.push_back(initChannel("id", XDMF::Channel::DataForm::Scalar, XDMF::Channel::NumberType::Int64, DataTypeWrapper<int64_t>()));
for (size_t i = 0; i < names.size(); ++i)
{
const std::string& name = names[i];
const auto dataForm = dataForms[i];
const auto numberType = numberTypes[i];
const auto dataType = stringToTypeDescriptor(typeDescriptorsStr[i]);
const auto channel = initChannel(name, dataForm, numberType, dataType);
channels_.push_back(channel);
allNames += ", '" + name + "'";
}
// Create the required folder
createFoldersCollective(comm_, getParentPath(path_));
debug2("Plugin '%s' was set up to dump channels %s. Path is %s",
getCName(), allNames.c_str(), path_.c_str());
}
static void unpackParticles(const std::vector<real4> &pos4, const std::vector<real4> &vel4,
std::vector<real3> &pos, std::vector<real3> &vel, std::vector<int64_t> &ids)
{
const size_t n = pos4.size();
pos.resize(n);
vel.resize(n);
ids.resize(n);
for (size_t i = 0; i < n; ++i)
{
auto p = Particle(pos4[i], vel4[i]);
pos[i] = p.r;
vel[i] = p.u;
ids[i] = p.getId();
}
}
void ParticleDumperPlugin::_recvAndUnpack(MirState::TimeType &time, MirState::StepType& timeStamp)
{
int c = 0;
SimpleSerializer::deserialize(data_, timeStamp, time, pos4_, vel4_, channelData_);
unpackParticles(pos4_, vel4_, *positions_, velocities_, ids_);
channels_[c++].data = velocities_.data();
channels_[c++].data = ids_.data();
for (auto& cd : channelData_)
channels_[c++].data = cd.data();
}
void ParticleDumperPlugin::deserialize()
{
debug2("Plugin '%s' will dump right now", getCName());
MirState::TimeType time;
MirState::StepType timeStamp;
_recvAndUnpack(time, timeStamp);
std::string fname = path_ + createStrZeroPadded(timeStamp, zeroPadding_);
XDMF::VertexGrid grid(positions_, comm_);
XDMF::write(fname, &grid, channels_, time, comm_);
}
void ParticleDumperPlugin::saveSnapshotAndRegister(Saver& saver)
{
saver.registerObject(this, _saveSnapshot(saver, "ParticleDumperPlugin"));
}
ConfigObject ParticleDumperPlugin::_saveSnapshot(Saver& saver, const std::string& typeName)
{
ConfigObject config = PostprocessPlugin::_saveSnapshot(saver, typeName);
config.emplace("path", saver(path_));
return config;
}
} // namespace mirheo
| 4e772e1e83bbc0874a13fdac0b1f47839d174f92.cu | // Copyright 2020 ETH Zurich. All Rights Reserved.
#include "dump_particles.h"
#include "utils/simple_serializer.h"
#include "utils/time_stamp.h"
#include <mirheo/core/pvs/rod_vector.h>
#include <mirheo/core/simulation.h>
#include <mirheo/core/utils/config.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/path.h>
#include <mirheo/core/utils/kernel_launch.h>
#include <mirheo/core/xdmf/type_map.h>
namespace mirheo
{
namespace dump_particles_kernels
{
template <typename T>
__global__ void copyObjectDataToParticles(int objSize, int nObjects, const T *srcObjData, T *dstParticleData)
{
const int pid = threadIdx.x + blockIdx.x * blockDim.x;
const int objId = pid / objSize;
if (objId >= nObjects) return;
dstParticleData[pid] = srcObjData[objId];
}
template <typename T>
__global__ void copyRodDataToParticles(int numBiSegmentsPerObject, int objSize, int nObjects, const T *rodData, T *particleData)
{
constexpr int stride = 5;
const int pid = threadIdx.x + blockIdx.x * blockDim.x;
const int objId = pid / objSize;
const int localPartId = pid % objSize;
const int localBisegId = math::min(localPartId / stride, numBiSegmentsPerObject); // min because of last particle
const int bid = objId * numBiSegmentsPerObject + localBisegId;
if (objId < nObjects)
particleData[pid] = rodData[bid];
}
} // namespace dump_particles_kernels
ParticleSenderPlugin::ParticleSenderPlugin(const MirState *state, std::string name, std::string pvName, int dumpEvery,
const std::vector<std::string>& channelNames) :
SimulationPlugin(state, name),
pvName_(pvName),
dumpEvery_(dumpEvery),
channelNames_(channelNames)
{
channelData_.resize(channelNames_.size());
}
ParticleSenderPlugin::ParticleSenderPlugin(const MirState *state, Loader& loader, const ConfigObject& config) :
ParticleSenderPlugin(state, config["name"], config["pvName"], config["dumpEvery"],
loader.load<std::vector<std::string>>(config["channelNames"]))
{}
ParticleSenderPlugin::~ParticleSenderPlugin() = default;
void ParticleSenderPlugin::setup(Simulation *simulation, const MPI_Comm& comm, const MPI_Comm& interComm)
{
SimulationPlugin::setup(simulation, comm, interComm);
pv_ = simulation->getPVbyNameOrDie(pvName_);
info("Plugin %s initialized for the following particle vector: %s", getCName(), pvName_.c_str());
}
void ParticleSenderPlugin::handshake()
{
std::vector<XDMF::Channel::DataForm> dataForms;
std::vector<XDMF::Channel::NumberType> numberTypes;
std::vector<std::string> typeDescriptorsStr;
auto pushChannelInfos = [&dataForms, &numberTypes, &typeDescriptorsStr](const DataManager::ChannelDescription& desc)
{
mpark::visit([&dataForms, &numberTypes, &typeDescriptorsStr](auto pinnedBufferPtr)
{
using T = typename std::remove_pointer<decltype(pinnedBufferPtr)>::type::value_type;
dataForms .push_back(XDMF::getDataForm <T>());
numberTypes .push_back(XDMF::getNumberType<T>());
typeDescriptorsStr.push_back(typeDescriptorToString(DataTypeWrapper<T>{}));
}, desc.varDataPtr);
};
auto ov = dynamic_cast<ObjectVector*>(pv_);
auto rv = dynamic_cast<RodVector*>(pv_);
for (const auto& name : channelNames_)
{
if (pv_->local()->dataPerParticle.checkChannelExists(name))
{
const auto& desc = pv_->local()->dataPerParticle.getChannelDescOrDie(name);
pushChannelInfos(desc);
}
else if (ov != nullptr && ov->local()->dataPerObject.checkChannelExists(name))
{
const auto& desc = ov->local()->dataPerObject.getChannelDescOrDie(name);
pushChannelInfos(desc);
}
else if (rv != nullptr && rv->local()->dataPerBisegment.checkChannelExists(name))
{
const auto& desc = rv->local()->dataPerBisegment.getChannelDescOrDie(name);
pushChannelInfos(desc);
}
else
{
die("Channel not found: '%s' in particle vector '%s'",
getCName(), pv_->getCName());
}
}
_waitPrevSend();
SimpleSerializer::serialize(sendBuffer_, channelNames_, dataForms, numberTypes, typeDescriptorsStr);
_send(sendBuffer_);
}
static inline void copyData(ParticleVector *pv, const std::string& channelName, HostBuffer<char>& dst, cudaStream_t stream)
{
auto srcContainer = pv->local()->dataPerParticle.getGenericData(channelName);
dst.genericCopy(srcContainer, stream);
}
static inline void copyData(ObjectVector *ov, const std::string& channelName, HostBuffer<char>& dst, DeviceBuffer<char>& workSpace, cudaStream_t stream)
{
auto lov = ov->local();
const auto& srcDesc = lov->dataPerObject.getChannelDescOrDie(channelName);
const int objSize = lov->getObjectSize();
const int nObjects = lov->getNumObjects();
mpark::visit([&](auto srcBufferPtr)
{
using T = typename std::remove_pointer<decltype(srcBufferPtr)>::type::value_type;
constexpr int nthreads = 128;
const int nParts = objSize * nObjects;
const int nblocks = getNblocks(nParts, nthreads);
workSpace.resize_anew(nParts * sizeof(T));
SAFE_KERNEL_LAUNCH(
dump_particles_kernels::copyObjectDataToParticles,
nblocks, nthreads, 0, stream,
objSize, nObjects, srcBufferPtr->devPtr(),
reinterpret_cast<T*>(workSpace.devPtr()));
}, srcDesc.varDataPtr);
dst.genericCopy(&workSpace, stream);
}
static inline void copyData(RodVector *rv, const std::string& channelName, HostBuffer<char>& dst, DeviceBuffer<char>& workSpace, cudaStream_t stream)
{
auto lrv = rv->local();
const auto& srcDesc = lrv->dataPerBisegment.getChannelDescOrDie(channelName);
const int objSize = lrv->getObjectSize();
const int nObjects = lrv->getNumObjects();
const int numBiSegmentsPerObject = lrv->getNumSegmentsPerRod() - 1;
mpark::visit([&](auto srcBufferPtr)
{
using T = typename std::remove_pointer<decltype(srcBufferPtr)>::type::value_type;
constexpr int nthreads = 128;
const int nParts = objSize * nObjects;
const int nblocks = getNblocks(nParts, nthreads);
workSpace.resize_anew(nParts * sizeof(T));
SAFE_KERNEL_LAUNCH(
dump_particles_kernels::copyRodDataToParticles,
nblocks, nthreads, 0, stream,
numBiSegmentsPerObject, objSize, nObjects, srcBufferPtr->devPtr(),
reinterpret_cast<T*>(workSpace.devPtr()));
}, srcDesc.varDataPtr);
dst.genericCopy(&workSpace, stream);
}
void ParticleSenderPlugin::beforeForces(cudaStream_t stream)
{
if (!isTimeEvery(getState(), dumpEvery_)) return;
positions_ .genericCopy(&pv_->local()->positions() , stream);
velocities_.genericCopy(&pv_->local()->velocities(), stream);
auto ov = dynamic_cast<ObjectVector*>(pv_);
auto rv = dynamic_cast<RodVector*>(pv_);
for (size_t i = 0; i < channelNames_.size(); ++i)
{
auto name = channelNames_[i];
if (pv_->local()->dataPerParticle.checkChannelExists(name))
{
copyData(pv_, name, channelData_[i], stream);
}
else if (ov != nullptr && ov->local()->dataPerObject.checkChannelExists(name))
{
copyData(ov, name, channelData_[i], workSpace_, stream);
}
else if (rv != nullptr && rv->local()->dataPerBisegment.checkChannelExists(name))
{
copyData(rv, name, channelData_[i], workSpace_, stream);
}
else
{
die("Channel not found: '%s' in particle vector '%s'",
getCName(), pv_->getCName());
}
}
}
void ParticleSenderPlugin::serializeAndSend(__UNUSED cudaStream_t stream)
{
if (!isTimeEvery(getState(), dumpEvery_)) return;
debug2("Plugin %s is sending now data", getCName());
for (auto& p : positions_)
{
auto r = getState()->domain.local2global(make_real3(p));
p.x = r.x; p.y = r.y; p.z = r.z;
}
const MirState::StepType timeStamp = getTimeStamp(getState(), dumpEvery_);
debug2("Plugin %s is packing now data consisting of %zu particles",
getCName(), positions_.size());
_waitPrevSend();
SimpleSerializer::serialize(sendBuffer_, timeStamp, getState()->currentTime, positions_, velocities_, channelData_);
_send(sendBuffer_);
}
void ParticleSenderPlugin::saveSnapshotAndRegister(Saver& saver)
{
saver.registerObject(this, _saveSnapshot(saver, "ParticleSenderPlugin"));
}
ConfigObject ParticleSenderPlugin::_saveSnapshot(Saver& saver, const std::string& typeName)
{
ConfigObject config = SimulationPlugin::_saveSnapshot(saver, typeName);
config.emplace("pvName", saver(pvName_));
config.emplace("dumpEvery", saver(dumpEvery_));
config.emplace("channelNames", saver(channelNames_));
return config;
}
ParticleDumperPlugin::ParticleDumperPlugin(std::string name, std::string path) :
PostprocessPlugin(name),
path_(path),
positions_(std::make_shared<std::vector<real3>>())
{}
ParticleDumperPlugin::ParticleDumperPlugin(Loader&, const ConfigObject& config) :
ParticleDumperPlugin(config["name"], config["path"])
{}
ParticleDumperPlugin::~ParticleDumperPlugin() = default;
void ParticleDumperPlugin::handshake()
{
auto req = waitData();
MPI_Check( MPI_Wait(&req, MPI_STATUS_IGNORE) );
recv();
std::vector<std::string> names;
std::vector<XDMF::Channel::DataForm> dataForms;
std::vector<XDMF::Channel::NumberType> numberTypes;
std::vector<std::string> typeDescriptorsStr;
SimpleSerializer::deserialize(data_, names, dataForms, numberTypes, typeDescriptorsStr);
auto initChannel = [] (const std::string& name, XDMF::Channel::DataForm dataForm,
XDMF::Channel::NumberType numberType, TypeDescriptor datatype,
XDMF::Channel::NeedShift needShift = XDMF::Channel::NeedShift::False)
{
return XDMF::Channel{name, nullptr, dataForm, numberType, datatype, needShift};
};
// Velocity and id are special channels which are always present
std::string allNames = "'velocity', 'id'";
channels_.push_back(initChannel("velocity", XDMF::Channel::DataForm::Vector, XDMF::getNumberType<real>(), DataTypeWrapper<real>()));
channels_.push_back(initChannel("id", XDMF::Channel::DataForm::Scalar, XDMF::Channel::NumberType::Int64, DataTypeWrapper<int64_t>()));
for (size_t i = 0; i < names.size(); ++i)
{
const std::string& name = names[i];
const auto dataForm = dataForms[i];
const auto numberType = numberTypes[i];
const auto dataType = stringToTypeDescriptor(typeDescriptorsStr[i]);
const auto channel = initChannel(name, dataForm, numberType, dataType);
channels_.push_back(channel);
allNames += ", '" + name + "'";
}
// Create the required folder
createFoldersCollective(comm_, getParentPath(path_));
debug2("Plugin '%s' was set up to dump channels %s. Path is %s",
getCName(), allNames.c_str(), path_.c_str());
}
static void unpackParticles(const std::vector<real4> &pos4, const std::vector<real4> &vel4,
std::vector<real3> &pos, std::vector<real3> &vel, std::vector<int64_t> &ids)
{
const size_t n = pos4.size();
pos.resize(n);
vel.resize(n);
ids.resize(n);
for (size_t i = 0; i < n; ++i)
{
auto p = Particle(pos4[i], vel4[i]);
pos[i] = p.r;
vel[i] = p.u;
ids[i] = p.getId();
}
}
void ParticleDumperPlugin::_recvAndUnpack(MirState::TimeType &time, MirState::StepType& timeStamp)
{
int c = 0;
SimpleSerializer::deserialize(data_, timeStamp, time, pos4_, vel4_, channelData_);
unpackParticles(pos4_, vel4_, *positions_, velocities_, ids_);
channels_[c++].data = velocities_.data();
channels_[c++].data = ids_.data();
for (auto& cd : channelData_)
channels_[c++].data = cd.data();
}
void ParticleDumperPlugin::deserialize()
{
debug2("Plugin '%s' will dump right now", getCName());
MirState::TimeType time;
MirState::StepType timeStamp;
_recvAndUnpack(time, timeStamp);
std::string fname = path_ + createStrZeroPadded(timeStamp, zeroPadding_);
XDMF::VertexGrid grid(positions_, comm_);
XDMF::write(fname, &grid, channels_, time, comm_);
}
void ParticleDumperPlugin::saveSnapshotAndRegister(Saver& saver)
{
saver.registerObject(this, _saveSnapshot(saver, "ParticleDumperPlugin"));
}
ConfigObject ParticleDumperPlugin::_saveSnapshot(Saver& saver, const std::string& typeName)
{
ConfigObject config = PostprocessPlugin::_saveSnapshot(saver, typeName);
config.emplace("path", saver(path_));
return config;
}
} // namespace mirheo
|
9fbe8e35c1adc1ed052136fac3d0d385040f4761.hip | // !!! This is a file automatically generated by hipify!!!
#include "MWConvLayerImpl.hpp"
#include "MWConvLayer.hpp"
#include "MWTargetNetworkImpl.hpp"
#include "cnn_api.hpp"
#include <cassert>
#include <stdio.h>
MWConvLayerImpl::MWConvLayerImpl(MWCNNLayer* layer, MWTargetNetworkImpl*
ntwk_impl, int filt_H, int filt_W, int numGrps, int numChnls, int numFilts, int
FpguQZSermqZCMRiUfML, int FrpxvsDMwwgbpqHXWxmN, int CpMjJjtGOeWOzwxpAAQP, int
ClEhcJFlvGCgiavziIag, int CufLFODQDXTAPyRqYodN, int DRzwhbNPpftRRIXXfHzd,
int ATEikvMQPqBefhJzjzhc, int AuqaQHxmPQSyYRemQvyX, const char*
vpXxoeEhdEosLSsYXkNG, const char* MIBnYCbKBdUrlfqlHdoo, int outbufIdx) :
MWCNNLayerImpl(layer, ntwk_impl) , yPBlKhIGljihkXaXbYpB(NULL) , vIWQzNvYZSuxmOTVDFhU(NULL) ,
KHClOltUSuqFVVErSxVb(NULL) , veFyKKHbdqBIvQLYBqfF(NULL) , URgvgDXnZskIYGdtimcU(NULL) ,
XCLDbxHBtWRStETWIkId(NULL) , aLsOwwcceEmRSYzllBNs(0) ,
AwZQzUhuWVLGrWgLHRuM(filt_H) , AzTsxYcYjIEJsGQbeYHm (filt_W) ,
BuyZFXzwOMxcePIbCLfl (numGrps) , BdqURaHPmdnfzvtUvocl (numChnls) ,
BlRIQPyqJZORKENzSdYf (numFilts) { enPbWLzEmxYCBmzGJutZ = ntwk_impl;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&ONvcEjLBnVNUdjMKOAwF));
CUDNN_CALL(cudnnCreateFilterDescriptor(&SDWKEQTZaTFZByPlzUDR));
CUDNN_CALL(cudnnCreateTensorDescriptor(&MEmIeGILUZNEWEagSzRk));
CUDNN_CALL(cudnnCreateTensorDescriptor(getOutputDescriptor()));
createConvLayer(FpguQZSermqZCMRiUfML, FrpxvsDMwwgbpqHXWxmN, CpMjJjtGOeWOzwxpAAQP,
ClEhcJFlvGCgiavziIag, CufLFODQDXTAPyRqYodN, DRzwhbNPpftRRIXXfHzd,
ATEikvMQPqBefhJzjzhc, AuqaQHxmPQSyYRemQvyX,
vpXxoeEhdEosLSsYXkNG, MIBnYCbKBdUrlfqlHdoo, outbufIdx); } float
MWConvLayerImpl::getIsGrouped() { return aLsOwwcceEmRSYzllBNs; } void
MWConvLayerImpl::setIsGrouped(float ig) { aLsOwwcceEmRSYzllBNs = ig; } void
MWConvLayerImpl::setOutput2(float* out2) { yPBlKhIGljihkXaXbYpB = out2; } float*
MWConvLayerImpl::getOutput2() { return yPBlKhIGljihkXaXbYpB; }
cudnnTensorDescriptor_t* MWConvLayerImpl::getGroupDescriptor() { return
&THfVbcZJtANcLKxEriuV; } void MWConvLayerImpl::createConvLayer(int
FpguQZSermqZCMRiUfML, int FrpxvsDMwwgbpqHXWxmN, int CpMjJjtGOeWOzwxpAAQP, int
ClEhcJFlvGCgiavziIag , int CufLFODQDXTAPyRqYodN, int DRzwhbNPpftRRIXXfHzd,
int ATEikvMQPqBefhJzjzhc, int AuqaQHxmPQSyYRemQvyX, const char*
vpXxoeEhdEosLSsYXkNG, const char* MIBnYCbKBdUrlfqlHdoo, int outbufIdx) {
MWTensor* ipTensor = getLayer()->getInputTensor(0); int
OVOphSOolqRQDDoKPwxy = CpMjJjtGOeWOzwxpAAQP; int
OiVqrkNdXioJhALWMMvm = CufLFODQDXTAPyRqYodN; if
((CpMjJjtGOeWOzwxpAAQP != ClEhcJFlvGCgiavziIag) || (CufLFODQDXTAPyRqYodN !=
DRzwhbNPpftRRIXXfHzd)) { float* newInput; int inputH = ipTensor->getHeight() +
CpMjJjtGOeWOzwxpAAQP + ClEhcJFlvGCgiavziIag; int inputW =
ipTensor->getWidth() + CufLFODQDXTAPyRqYodN + DRzwhbNPpftRRIXXfHzd;
CUDA_CALL(hipMalloc((void**)&newInput, sizeof(float)*ipTensor->getBatchSize()
* ipTensor->getChannels() * inputH * inputW)); CUDA_CALL(hipMemset(newInput,
0,
sizeof(float)*ipTensor->getBatchSize()*ipTensor->getChannels()*inputH*inputW));
URgvgDXnZskIYGdtimcU = new MWTensor(inputH, inputW, ipTensor->getChannels(),
ipTensor->getBatchSize(), newInput,getLayer(), 0);
CUDNN_CALL(cudnnCreateTensorDescriptor(&YgcpEBUCwCLaPhyntIio));
CUDNN_CALL(cudnnSetTensor4dDescriptor(YgcpEBUCwCLaPhyntIio, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, URgvgDXnZskIYGdtimcU->getBatchSize(), URgvgDXnZskIYGdtimcU->getChannels(),
URgvgDXnZskIYGdtimcU->getHeight(), URgvgDXnZskIYGdtimcU->getWidth()));
OVOphSOolqRQDDoKPwxy = 0; OiVqrkNdXioJhALWMMvm = 0; } else {
URgvgDXnZskIYGdtimcU = ipTensor; YgcpEBUCwCLaPhyntIio =
*getCuDNNDescriptor(URgvgDXnZskIYGdtimcU); } bUVPfnrJhLfHzOLUUrKk =
CpMjJjtGOeWOzwxpAAQP; cQBKlCKXxecGPJrXBXdk = CufLFODQDXTAPyRqYodN;
assert(URgvgDXnZskIYGdtimcU != NULL); MWConvLayer* convLayer = static_cast<MWConvLayer*>(getLayer());
#if (CUDNN_MAJOR <= 5)
{ if ((ATEikvMQPqBefhJzjzhc != 1) && (AuqaQHxmPQSyYRemQvyX != 1)){
printf("Dilated Convolution only supported for cuDNN 6 or greater "); throw
std::runtime_error("Unsupported Dilation Factor"); }
CUDNN_CALL(cudnnSetConvolution2dDescriptor(ONvcEjLBnVNUdjMKOAwF,
OVOphSOolqRQDDoKPwxy, OiVqrkNdXioJhALWMMvm, FpguQZSermqZCMRiUfML,
FrpxvsDMwwgbpqHXWxmN, 1, 1, CUDNN_CROSS_CORRELATION)); }
#else
{ CUDNN_CALL(cudnnSetConvolution2dDescriptor(ONvcEjLBnVNUdjMKOAwF,
OVOphSOolqRQDDoKPwxy, OiVqrkNdXioJhALWMMvm, FpguQZSermqZCMRiUfML,
FrpxvsDMwwgbpqHXWxmN, ATEikvMQPqBefhJzjzhc, AuqaQHxmPQSyYRemQvyX,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); }
#endif
#if (FP16_ENABLED == 1 && ( CUDNN_MAJOR > 7 || (CUDNN_MAJOR == 7 && CUDNN_MINOR >= 2) ))
CUDNN_CALL(cudnnSetConvolutionMathType(ONvcEjLBnVNUdjMKOAwF, CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION));
#endif
int shEncNmxJsMuJKwbrwok, rxMAtVYGgGtZoKBkJcjc; int numInputFeatures =
BdqURaHPmdnfzvtUvocl*BuyZFXzwOMxcePIbCLfl; int
fxxCPKTclxXPxrdMAkwi,sRECVoNNtDdcBOWgDyar,sxuOMwKXOKfuExclRaSe; MWTensor*
opTensor = convLayer->getOutputTensor(0); fxxCPKTclxXPxrdMAkwi =
opTensor->getChannels(); sRECVoNNtDdcBOWgDyar = opTensor->getHeight();
sxuOMwKXOKfuExclRaSe = opTensor->getWidth(); size_t twppmWSuyDzoZjSbrMHi = 0; if(
BuyZFXzwOMxcePIbCLfl == 1 ) {
CUDNN_CALL(cudnnSetFilter4dDescriptor(SDWKEQTZaTFZByPlzUDR, CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW, fxxCPKTclxXPxrdMAkwi, numInputFeatures,
AwZQzUhuWVLGrWgLHRuM, AzTsxYcYjIEJsGQbeYHm));
CUDNN_CALL(cudnnSetTensor4dDescriptor(MEmIeGILUZNEWEagSzRk, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, 1, fxxCPKTclxXPxrdMAkwi, 1, 1));
CUDNN_CALL(cudnnGetConvolution2dForwardOutputDim(ONvcEjLBnVNUdjMKOAwF,
YgcpEBUCwCLaPhyntIio, SDWKEQTZaTFZByPlzUDR, &shEncNmxJsMuJKwbrwok,
&rxMAtVYGgGtZoKBkJcjc, &sRECVoNNtDdcBOWgDyar, &sxuOMwKXOKfuExclRaSe));
CUDNN_CALL(cudnnSetTensor4dDescriptor(*getOutputDescriptor(),
CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, shEncNmxJsMuJKwbrwok, rxMAtVYGgGtZoKBkJcjc,
opTensor->getHeight(), opTensor->getWidth())); assert(opTensor->getHeight() ==
sRECVoNNtDdcBOWgDyar); assert(opTensor->getWidth() == sxuOMwKXOKfuExclRaSe);
#if (CUDNN_MAJOR < 7)
{
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(*enPbWLzEmxYCBmzGJutZ->getCudnnHandle(),
YgcpEBUCwCLaPhyntIio, SDWKEQTZaTFZByPlzUDR, ONvcEjLBnVNUdjMKOAwF,
*getOutputDescriptor(), CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0,
&NtWaRGCHLeTapjWdEHHS)); }
#else
{ cudnnConvolutionFwdAlgoPerf_t perf_results[3]; int returnedAlgoCount;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm_v7(*enPbWLzEmxYCBmzGJutZ->getCudnnHandle(),
YgcpEBUCwCLaPhyntIio, SDWKEQTZaTFZByPlzUDR, ONvcEjLBnVNUdjMKOAwF,
*getOutputDescriptor(), 3, &returnedAlgoCount, perf_results));
NtWaRGCHLeTapjWdEHHS = perf_results[0].algo; }
#endif
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(*enPbWLzEmxYCBmzGJutZ->getCudnnHandle(),
YgcpEBUCwCLaPhyntIio, SDWKEQTZaTFZByPlzUDR, ONvcEjLBnVNUdjMKOAwF,
*getOutputDescriptor(), NtWaRGCHLeTapjWdEHHS, &twppmWSuyDzoZjSbrMHi)); } else {
setIsGrouped(1); MWTensor* ipTensor = URgvgDXnZskIYGdtimcU; XCLDbxHBtWRStETWIkId =
ipTensor->getData() + ipTensor->getChannels()/BuyZFXzwOMxcePIbCLfl *
ipTensor->getHeight() * ipTensor->getWidth();
CUDNN_CALL(cudnnCreateTensorDescriptor(&ZCArwzdUdwQuFQUWjnUE));
CUDNN_CALL(cudnnSetTensor4dDescriptorEx(ZCArwzdUdwQuFQUWjnUE,
CUDNN_DATA_FLOAT, ipTensor->getBatchSize(),
ipTensor->getChannels()/BuyZFXzwOMxcePIbCLfl, ipTensor->getHeight(),
ipTensor->getWidth(),
ipTensor->getChannels()*ipTensor->getHeight()*ipTensor->getWidth(),
ipTensor->getHeight()*ipTensor->getWidth(), ipTensor->getWidth(), 1));
CUDNN_CALL(cudnnCreateTensorDescriptor(getGroupDescriptor()));
CUDNN_CALL(cudnnSetFilter4dDescriptor(SDWKEQTZaTFZByPlzUDR, CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW, BlRIQPyqJZORKENzSdYf, BdqURaHPmdnfzvtUvocl,
AwZQzUhuWVLGrWgLHRuM, AzTsxYcYjIEJsGQbeYHm));
CUDNN_CALL(cudnnGetConvolution2dForwardOutputDim(ONvcEjLBnVNUdjMKOAwF,
ZCArwzdUdwQuFQUWjnUE, SDWKEQTZaTFZByPlzUDR, &shEncNmxJsMuJKwbrwok,
&rxMAtVYGgGtZoKBkJcjc, &sRECVoNNtDdcBOWgDyar, &sxuOMwKXOKfuExclRaSe));
assert(opTensor->getHeight() == sRECVoNNtDdcBOWgDyar); assert(opTensor->getWidth()
== sxuOMwKXOKfuExclRaSe);
CUDNN_CALL(cudnnSetTensor4dDescriptorEx(*getGroupDescriptor(),
CUDNN_DATA_FLOAT, shEncNmxJsMuJKwbrwok, rxMAtVYGgGtZoKBkJcjc, sRECVoNNtDdcBOWgDyar,
sxuOMwKXOKfuExclRaSe,
rxMAtVYGgGtZoKBkJcjc*BuyZFXzwOMxcePIbCLfl*sRECVoNNtDdcBOWgDyar*sxuOMwKXOKfuExclRaSe,
sRECVoNNtDdcBOWgDyar*sxuOMwKXOKfuExclRaSe, sxuOMwKXOKfuExclRaSe, 1));
CUDNN_CALL(cudnnSetTensor4dDescriptor(*getOutputDescriptor(),
CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, shEncNmxJsMuJKwbrwok,
rxMAtVYGgGtZoKBkJcjc*BuyZFXzwOMxcePIbCLfl, sRECVoNNtDdcBOWgDyar, sxuOMwKXOKfuExclRaSe));
CUDNN_CALL(cudnnSetTensor4dDescriptor(MEmIeGILUZNEWEagSzRk, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, 1, rxMAtVYGgGtZoKBkJcjc*BuyZFXzwOMxcePIbCLfl, 1, 1));
#if (CUDNN_MAJOR < 7)
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(*enPbWLzEmxYCBmzGJutZ->getCudnnHandle(),
ZCArwzdUdwQuFQUWjnUE, SDWKEQTZaTFZByPlzUDR, ONvcEjLBnVNUdjMKOAwF,
*getGroupDescriptor(), CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &NtWaRGCHLeTapjWdEHHS));
#else
cudnnConvolutionFwdAlgoPerf_t perf_results[3]; int returnedAlgoCount;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm_v7(*enPbWLzEmxYCBmzGJutZ->getCudnnHandle(),
ZCArwzdUdwQuFQUWjnUE, SDWKEQTZaTFZByPlzUDR, ONvcEjLBnVNUdjMKOAwF,
*getGroupDescriptor(), 3, &returnedAlgoCount,perf_results));
NtWaRGCHLeTapjWdEHHS = perf_results[0].algo;
#endif
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(*enPbWLzEmxYCBmzGJutZ->getCudnnHandle(),
ZCArwzdUdwQuFQUWjnUE, SDWKEQTZaTFZByPlzUDR, ONvcEjLBnVNUdjMKOAwF,
*getGroupDescriptor(), NtWaRGCHLeTapjWdEHHS, &twppmWSuyDzoZjSbrMHi)); } if(
twppmWSuyDzoZjSbrMHi > *enPbWLzEmxYCBmzGJutZ->getWorkSpaceSize() ) {
enPbWLzEmxYCBmzGJutZ->setWorkSpaceSize(twppmWSuyDzoZjSbrMHi); }
assert(shEncNmxJsMuJKwbrwok == ipTensor->getBatchSize());
assert(fxxCPKTclxXPxrdMAkwi == rxMAtVYGgGtZoKBkJcjc *
BuyZFXzwOMxcePIbCLfl); if (outbufIdx < 0) {
CUDA_CALL(hipMalloc((void**)&PtkeOkuClHzhOfpmBevf, sizeof(float) *
opTensor->getBatchSize() * opTensor->getChannels() * opTensor->getHeight() *
opTensor->getWidth())); } else {
setData(enPbWLzEmxYCBmzGJutZ->memBuffer[outbufIdx]);
getLayer()->getOutputTensor(0)->setopBufIndex(outbufIdx); }
CUDA_CALL(hipMalloc((void**)&vIWQzNvYZSuxmOTVDFhU,
sizeof(float)*BdqURaHPmdnfzvtUvocl*fxxCPKTclxXPxrdMAkwi*AwZQzUhuWVLGrWgLHRuM*AzTsxYcYjIEJsGQbeYHm));
CUDA_CALL(hipMalloc((void**)&KHClOltUSuqFVVErSxVb, sizeof(float)*fxxCPKTclxXPxrdMAkwi));
#ifdef RANDOM
hiprandGenerateNormal(SUleyRyvAggTFnSdxLru, vIWQzNvYZSuxmOTVDFhU,
BdqURaHPmdnfzvtUvocl*fxxCPKTclxXPxrdMAkwi*AwZQzUhuWVLGrWgLHRuM*AzTsxYcYjIEJsGQbeYHm,
0, 0.1); hiprandGenerateNormal(SUleyRyvAggTFnSdxLru, KHClOltUSuqFVVErSxVb,
fxxCPKTclxXPxrdMAkwi, -0.5, 1);
#endif
if( BuyZFXzwOMxcePIbCLfl == 2 ) { veFyKKHbdqBIvQLYBqfF = vIWQzNvYZSuxmOTVDFhU +
BlRIQPyqJZORKENzSdYf * BdqURaHPmdnfzvtUvocl * AwZQzUhuWVLGrWgLHRuM *
AzTsxYcYjIEJsGQbeYHm; setOutput2(getData() + fxxCPKTclxXPxrdMAkwi/ 2
* sRECVoNNtDdcBOWgDyar * sxuOMwKXOKfuExclRaSe); setIsGrouped(1); }
loadWeights(vpXxoeEhdEosLSsYXkNG); loadBias(MIBnYCbKBdUrlfqlHdoo); } void
MWConvLayerImpl::predict() { MWConvLayer* convLayer =
static_cast<MWConvLayer*>(getLayer()); if (URgvgDXnZskIYGdtimcU !=
convLayer->getInputTensor()) { CUDA_CALL(hipMemset(URgvgDXnZskIYGdtimcU->getData(),
0,
sizeof(float)*URgvgDXnZskIYGdtimcU->getBatchSize()*URgvgDXnZskIYGdtimcU->getChannels()*URgvgDXnZskIYGdtimcU->getHeight()*URgvgDXnZskIYGdtimcU->getWidth()));
int fhikqqlnUKCjleVKDqiG =
convLayer->getInputTensor()->getHeight()*convLayer->getInputTensor()->getWidth()*convLayer->getInputTensor()->getBatchSize()*convLayer->getInputTensor()->getChannels();
MWCNNLayerImpl::padInput(convLayer->getInputTensor()->getData(),
convLayer->getInputTensor()->getHeight(),
convLayer->getInputTensor()->getWidth(),
convLayer->getInputTensor()->getChannels(), URgvgDXnZskIYGdtimcU->getHeight(),
URgvgDXnZskIYGdtimcU->getWidth(), bUVPfnrJhLfHzOLUUrKk, cQBKlCKXxecGPJrXBXdk,
URgvgDXnZskIYGdtimcU->getData(), fhikqqlnUKCjleVKDqiG); } if(BuyZFXzwOMxcePIbCLfl == 1
) { assert(getData() != URgvgDXnZskIYGdtimcU->getData());
CUDNN_CALL(cudnnConvolutionForward(*enPbWLzEmxYCBmzGJutZ->getCudnnHandle(),getOnePtr(),
YgcpEBUCwCLaPhyntIio, URgvgDXnZskIYGdtimcU->getData(), SDWKEQTZaTFZByPlzUDR,
vIWQzNvYZSuxmOTVDFhU, ONvcEjLBnVNUdjMKOAwF, NtWaRGCHLeTapjWdEHHS,
enPbWLzEmxYCBmzGJutZ->getWorkSpace(), *enPbWLzEmxYCBmzGJutZ->getWorkSpaceSize(),
getZeroPtr(), *getOutputDescriptor(),getData()));
CUDNN_CALL(cudnnAddTensor(*enPbWLzEmxYCBmzGJutZ->getCudnnHandle(), getOnePtr(),
MEmIeGILUZNEWEagSzRk, KHClOltUSuqFVVErSxVb, getOnePtr(),
*getOutputDescriptor(),getData())); } else { assert(getData() !=
URgvgDXnZskIYGdtimcU->getData());
CUDNN_CALL(cudnnConvolutionForward(*enPbWLzEmxYCBmzGJutZ->getCudnnHandle(),
getOnePtr(), ZCArwzdUdwQuFQUWjnUE, URgvgDXnZskIYGdtimcU->getData(),
SDWKEQTZaTFZByPlzUDR, vIWQzNvYZSuxmOTVDFhU, ONvcEjLBnVNUdjMKOAwF, NtWaRGCHLeTapjWdEHHS,
enPbWLzEmxYCBmzGJutZ->getWorkSpace(), *enPbWLzEmxYCBmzGJutZ->getWorkSpaceSize(),
getZeroPtr(), *getGroupDescriptor(), getData()));
CUDNN_CALL(cudnnConvolutionForward(*enPbWLzEmxYCBmzGJutZ->getCudnnHandle(),
getOnePtr(), ZCArwzdUdwQuFQUWjnUE, XCLDbxHBtWRStETWIkId, SDWKEQTZaTFZByPlzUDR,
veFyKKHbdqBIvQLYBqfF, ONvcEjLBnVNUdjMKOAwF, NtWaRGCHLeTapjWdEHHS,
enPbWLzEmxYCBmzGJutZ->getWorkSpace(), *enPbWLzEmxYCBmzGJutZ->getWorkSpaceSize(),
getZeroPtr(), *getGroupDescriptor(), getOutput2()));
CUDNN_CALL(cudnnAddTensor(*enPbWLzEmxYCBmzGJutZ->getCudnnHandle(), getOnePtr(),
MEmIeGILUZNEWEagSzRk, KHClOltUSuqFVVErSxVb, getOnePtr(), *getOutputDescriptor(),
getData())); } } void MWConvLayerImpl::cleanup() {
CUDNN_CALL(cudnnDestroyConvolutionDescriptor(ONvcEjLBnVNUdjMKOAwF));
CUDNN_CALL(cudnnDestroyFilterDescriptor(SDWKEQTZaTFZByPlzUDR)); if
(vIWQzNvYZSuxmOTVDFhU) { CUDA_FREE_CALL(vIWQzNvYZSuxmOTVDFhU); }
CUDNN_CALL(cudnnDestroyTensorDescriptor(MEmIeGILUZNEWEagSzRk)); if
(KHClOltUSuqFVVErSxVb) { CUDA_FREE_CALL(KHClOltUSuqFVVErSxVb); }
CUDNN_CALL(cudnnDestroyTensorDescriptor(*getOutputDescriptor())); if
(URgvgDXnZskIYGdtimcU != getLayer()->getInputTensor(0)) {
CUDNN_CALL(cudnnDestroyTensorDescriptor(YgcpEBUCwCLaPhyntIio));
CUDA_FREE_CALL(URgvgDXnZskIYGdtimcU->getData()); } if (getIsGrouped()) {
CUDNN_CALL(cudnnDestroyTensorDescriptor(ZCArwzdUdwQuFQUWjnUE));
CUDNN_CALL(cudnnDestroyTensorDescriptor(*getGroupDescriptor())); } for(int idx
= 0; idx < getLayer()->getNumOutputs(); idx++) { float* data =
getLayer()->getOutputTensor(idx)->getData(); if (data) {
if(getLayer()->getOutputTensor(idx)->getopBufIndex() < 0) CUDA_FREE_CALL(data);
} } } void MWConvLayerImpl::loadWeights(const char* RVrPByQXdKmunRZHKWJD) {
MWConvLayer* convLayer = static_cast<MWConvLayer*>(getLayer()); FILE*
SUjIWYfjMcdzSZaCSVRT = MWCNNLayer::openBinaryFile(RVrPByQXdKmunRZHKWJD);
assert(SUjIWYfjMcdzSZaCSVRT); assert(BdqURaHPmdnfzvtUvocl ==
URgvgDXnZskIYGdtimcU->getChannels()/BuyZFXzwOMxcePIbCLfl); int eqOmMKQRpqBqRQCnJmxt =
BdqURaHPmdnfzvtUvocl*convLayer->getOutputTensor()->getChannels()*AwZQzUhuWVLGrWgLHRuM*AzTsxYcYjIEJsGQbeYHm;
float* MdSWZSOAjugbWppryHbR = MALLOC_CALL(sizeof(float)*eqOmMKQRpqBqRQCnJmxt);
call_fread(MdSWZSOAjugbWppryHbR, sizeof(float), eqOmMKQRpqBqRQCnJmxt, SUjIWYfjMcdzSZaCSVRT,
RVrPByQXdKmunRZHKWJD); if( AwZQzUhuWVLGrWgLHRuM != 1 &&
AzTsxYcYjIEJsGQbeYHm != 1 ) { float* MgAiRWiTutoTMxKXjmHQ =
MALLOC_CALL(sizeof(float)*AwZQzUhuWVLGrWgLHRuM*AzTsxYcYjIEJsGQbeYHm);
for(int k=0; k<eqOmMKQRpqBqRQCnJmxt/AwZQzUhuWVLGrWgLHRuM/AzTsxYcYjIEJsGQbeYHm;
k++) { for(int i=0; i<AwZQzUhuWVLGrWgLHRuM*AzTsxYcYjIEJsGQbeYHm; i++)
MgAiRWiTutoTMxKXjmHQ[i]=MdSWZSOAjugbWppryHbR[k*AwZQzUhuWVLGrWgLHRuM*AzTsxYcYjIEJsGQbeYHm+i];
for(int j=0; j<AwZQzUhuWVLGrWgLHRuM; j++) for(int i=0;
i<AzTsxYcYjIEJsGQbeYHm; i++)
MdSWZSOAjugbWppryHbR[k*AwZQzUhuWVLGrWgLHRuM*AzTsxYcYjIEJsGQbeYHm+j*AzTsxYcYjIEJsGQbeYHm+i]=MgAiRWiTutoTMxKXjmHQ[j+i*AwZQzUhuWVLGrWgLHRuM];
} free(MgAiRWiTutoTMxKXjmHQ); } CUDA_CALL(hipMemcpy(vIWQzNvYZSuxmOTVDFhU,
MdSWZSOAjugbWppryHbR, sizeof(float)*eqOmMKQRpqBqRQCnJmxt, hipMemcpyHostToDevice));
#if 0
printf("%s loaded. Size = %d. %f\n", RVrPByQXdKmunRZHKWJD, eqOmMKQRpqBqRQCnJmxt, MdSWZSOAjugbWppryHbR[0]);
#endif
free(MdSWZSOAjugbWppryHbR); fclose(SUjIWYfjMcdzSZaCSVRT); } void
MWConvLayerImpl::loadBias(const char* RVrPByQXdKmunRZHKWJD) { MWConvLayer*
convLayer = static_cast<MWConvLayer*>(getLayer()); FILE* SUjIWYfjMcdzSZaCSVRT =
MWCNNLayer::openBinaryFile(RVrPByQXdKmunRZHKWJD); assert(SUjIWYfjMcdzSZaCSVRT); int
eqOmMKQRpqBqRQCnJmxt = convLayer->getOutputTensor()->getChannels(); float*
MdSWZSOAjugbWppryHbR = MALLOC_CALL(sizeof(float)*eqOmMKQRpqBqRQCnJmxt);
call_fread(MdSWZSOAjugbWppryHbR, sizeof(float), eqOmMKQRpqBqRQCnJmxt, SUjIWYfjMcdzSZaCSVRT,
RVrPByQXdKmunRZHKWJD); CUDA_CALL(hipMemcpy(KHClOltUSuqFVVErSxVb, MdSWZSOAjugbWppryHbR,
sizeof(float)*eqOmMKQRpqBqRQCnJmxt, hipMemcpyHostToDevice));
free(MdSWZSOAjugbWppryHbR); fclose(SUjIWYfjMcdzSZaCSVRT); } void
MWConvLayerImpl::postSetup() { if(enPbWLzEmxYCBmzGJutZ->getAutoTune()) {
getConvAlgoTuned(); } else if(!enPbWLzEmxYCBmzGJutZ->getWorkSpace()) {
getConvAlgoNoWorkSpace(); } cudnnTensorDescriptor_t tmpInDesc = getIsGrouped()
? ZCArwzdUdwQuFQUWjnUE : YgcpEBUCwCLaPhyntIio; cudnnTensorDescriptor_t
hnewnpwgzKmOdualajhn = getIsGrouped() ? *getGroupDescriptor() :
*getOutputDescriptor(); size_t twppmWSuyDzoZjSbrMHi;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(*enPbWLzEmxYCBmzGJutZ->getCudnnHandle(),
tmpInDesc, SDWKEQTZaTFZByPlzUDR, ONvcEjLBnVNUdjMKOAwF, hnewnpwgzKmOdualajhn,
NtWaRGCHLeTapjWdEHHS, &twppmWSuyDzoZjSbrMHi)); if( (long int)twppmWSuyDzoZjSbrMHi
> *enPbWLzEmxYCBmzGJutZ->getPostSetupWorkSpaceSize() ) {
enPbWLzEmxYCBmzGJutZ->setPostSetupWorkSpaceSize((long int)twppmWSuyDzoZjSbrMHi);
} } void MWConvLayerImpl::getConvAlgoTuned() { cudnnConvolutionFwdAlgoPerf_t
perf_results[3]; cudnnTensorDescriptor_t tempInDesc = getIsGrouped() ?
ZCArwzdUdwQuFQUWjnUE : YgcpEBUCwCLaPhyntIio; cudnnTensorDescriptor_t
hnewnpwgzKmOdualajhn = getIsGrouped() ? *getGroupDescriptor() :
*getOutputDescriptor(); int returnedAlgoCount;
CUDNN_CALL(cudnnFindConvolutionForwardAlgorithmEx(*enPbWLzEmxYCBmzGJutZ->getCudnnHandle(),
tempInDesc, URgvgDXnZskIYGdtimcU->getData(), SDWKEQTZaTFZByPlzUDR, vIWQzNvYZSuxmOTVDFhU,
ONvcEjLBnVNUdjMKOAwF, hnewnpwgzKmOdualajhn, getData(), 3, &returnedAlgoCount,
&perf_results[0], enPbWLzEmxYCBmzGJutZ->getWorkSpace(),
*enPbWLzEmxYCBmzGJutZ->getWorkSpaceSize())); NtWaRGCHLeTapjWdEHHS =
perf_results[0].algo; } void MWConvLayerImpl::getConvAlgoNoWorkSpace() {
assert(enPbWLzEmxYCBmzGJutZ->getWorkSpace() == 0); cudnnTensorDescriptor_t
tempInDesc = getIsGrouped() ? ZCArwzdUdwQuFQUWjnUE : YgcpEBUCwCLaPhyntIio;
cudnnTensorDescriptor_t hnewnpwgzKmOdualajhn = getIsGrouped() ?
*getGroupDescriptor() : *getOutputDescriptor();
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(*enPbWLzEmxYCBmzGJutZ->getCudnnHandle(),
tempInDesc, SDWKEQTZaTFZByPlzUDR, ONvcEjLBnVNUdjMKOAwF, hnewnpwgzKmOdualajhn,
CUDNN_CONVOLUTION_FWD_NO_WORKSPACE, 0, &NtWaRGCHLeTapjWdEHHS)); } | 9fbe8e35c1adc1ed052136fac3d0d385040f4761.cu | #include "MWConvLayerImpl.hpp"
#include "MWConvLayer.hpp"
#include "MWTargetNetworkImpl.hpp"
#include "cnn_api.hpp"
#include <cassert>
#include <stdio.h>
MWConvLayerImpl::MWConvLayerImpl(MWCNNLayer* layer, MWTargetNetworkImpl*
ntwk_impl, int filt_H, int filt_W, int numGrps, int numChnls, int numFilts, int
FpguQZSermqZCMRiUfML, int FrpxvsDMwwgbpqHXWxmN, int CpMjJjtGOeWOzwxpAAQP, int
ClEhcJFlvGCgiavziIag, int CufLFODQDXTAPyRqYodN, int DRzwhbNPpftRRIXXfHzd,
int ATEikvMQPqBefhJzjzhc, int AuqaQHxmPQSyYRemQvyX, const char*
vpXxoeEhdEosLSsYXkNG, const char* MIBnYCbKBdUrlfqlHdoo, int outbufIdx) :
MWCNNLayerImpl(layer, ntwk_impl) , yPBlKhIGljihkXaXbYpB(NULL) , vIWQzNvYZSuxmOTVDFhU(NULL) ,
KHClOltUSuqFVVErSxVb(NULL) , veFyKKHbdqBIvQLYBqfF(NULL) , URgvgDXnZskIYGdtimcU(NULL) ,
XCLDbxHBtWRStETWIkId(NULL) , aLsOwwcceEmRSYzllBNs(0) ,
AwZQzUhuWVLGrWgLHRuM(filt_H) , AzTsxYcYjIEJsGQbeYHm (filt_W) ,
BuyZFXzwOMxcePIbCLfl (numGrps) , BdqURaHPmdnfzvtUvocl (numChnls) ,
BlRIQPyqJZORKENzSdYf (numFilts) { enPbWLzEmxYCBmzGJutZ = ntwk_impl;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&ONvcEjLBnVNUdjMKOAwF));
CUDNN_CALL(cudnnCreateFilterDescriptor(&SDWKEQTZaTFZByPlzUDR));
CUDNN_CALL(cudnnCreateTensorDescriptor(&MEmIeGILUZNEWEagSzRk));
CUDNN_CALL(cudnnCreateTensorDescriptor(getOutputDescriptor()));
createConvLayer(FpguQZSermqZCMRiUfML, FrpxvsDMwwgbpqHXWxmN, CpMjJjtGOeWOzwxpAAQP,
ClEhcJFlvGCgiavziIag, CufLFODQDXTAPyRqYodN, DRzwhbNPpftRRIXXfHzd,
ATEikvMQPqBefhJzjzhc, AuqaQHxmPQSyYRemQvyX,
vpXxoeEhdEosLSsYXkNG, MIBnYCbKBdUrlfqlHdoo, outbufIdx); } float
MWConvLayerImpl::getIsGrouped() { return aLsOwwcceEmRSYzllBNs; } void
MWConvLayerImpl::setIsGrouped(float ig) { aLsOwwcceEmRSYzllBNs = ig; } void
MWConvLayerImpl::setOutput2(float* out2) { yPBlKhIGljihkXaXbYpB = out2; } float*
MWConvLayerImpl::getOutput2() { return yPBlKhIGljihkXaXbYpB; }
cudnnTensorDescriptor_t* MWConvLayerImpl::getGroupDescriptor() { return
&THfVbcZJtANcLKxEriuV; } void MWConvLayerImpl::createConvLayer(int
FpguQZSermqZCMRiUfML, int FrpxvsDMwwgbpqHXWxmN, int CpMjJjtGOeWOzwxpAAQP, int
ClEhcJFlvGCgiavziIag , int CufLFODQDXTAPyRqYodN, int DRzwhbNPpftRRIXXfHzd,
int ATEikvMQPqBefhJzjzhc, int AuqaQHxmPQSyYRemQvyX, const char*
vpXxoeEhdEosLSsYXkNG, const char* MIBnYCbKBdUrlfqlHdoo, int outbufIdx) {
MWTensor* ipTensor = getLayer()->getInputTensor(0); int
OVOphSOolqRQDDoKPwxy = CpMjJjtGOeWOzwxpAAQP; int
OiVqrkNdXioJhALWMMvm = CufLFODQDXTAPyRqYodN; if
((CpMjJjtGOeWOzwxpAAQP != ClEhcJFlvGCgiavziIag) || (CufLFODQDXTAPyRqYodN !=
DRzwhbNPpftRRIXXfHzd)) { float* newInput; int inputH = ipTensor->getHeight() +
CpMjJjtGOeWOzwxpAAQP + ClEhcJFlvGCgiavziIag; int inputW =
ipTensor->getWidth() + CufLFODQDXTAPyRqYodN + DRzwhbNPpftRRIXXfHzd;
CUDA_CALL(cudaMalloc((void**)&newInput, sizeof(float)*ipTensor->getBatchSize()
* ipTensor->getChannels() * inputH * inputW)); CUDA_CALL(cudaMemset(newInput,
0,
sizeof(float)*ipTensor->getBatchSize()*ipTensor->getChannels()*inputH*inputW));
URgvgDXnZskIYGdtimcU = new MWTensor(inputH, inputW, ipTensor->getChannels(),
ipTensor->getBatchSize(), newInput,getLayer(), 0);
CUDNN_CALL(cudnnCreateTensorDescriptor(&YgcpEBUCwCLaPhyntIio));
CUDNN_CALL(cudnnSetTensor4dDescriptor(YgcpEBUCwCLaPhyntIio, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, URgvgDXnZskIYGdtimcU->getBatchSize(), URgvgDXnZskIYGdtimcU->getChannels(),
URgvgDXnZskIYGdtimcU->getHeight(), URgvgDXnZskIYGdtimcU->getWidth()));
OVOphSOolqRQDDoKPwxy = 0; OiVqrkNdXioJhALWMMvm = 0; } else {
URgvgDXnZskIYGdtimcU = ipTensor; YgcpEBUCwCLaPhyntIio =
*getCuDNNDescriptor(URgvgDXnZskIYGdtimcU); } bUVPfnrJhLfHzOLUUrKk =
CpMjJjtGOeWOzwxpAAQP; cQBKlCKXxecGPJrXBXdk = CufLFODQDXTAPyRqYodN;
assert(URgvgDXnZskIYGdtimcU != NULL); MWConvLayer* convLayer = static_cast<MWConvLayer*>(getLayer());
#if (CUDNN_MAJOR <= 5)
{ if ((ATEikvMQPqBefhJzjzhc != 1) && (AuqaQHxmPQSyYRemQvyX != 1)){
printf("Dilated Convolution only supported for cuDNN 6 or greater "); throw
std::runtime_error("Unsupported Dilation Factor"); }
CUDNN_CALL(cudnnSetConvolution2dDescriptor(ONvcEjLBnVNUdjMKOAwF,
OVOphSOolqRQDDoKPwxy, OiVqrkNdXioJhALWMMvm, FpguQZSermqZCMRiUfML,
FrpxvsDMwwgbpqHXWxmN, 1, 1, CUDNN_CROSS_CORRELATION)); }
#else
{ CUDNN_CALL(cudnnSetConvolution2dDescriptor(ONvcEjLBnVNUdjMKOAwF,
OVOphSOolqRQDDoKPwxy, OiVqrkNdXioJhALWMMvm, FpguQZSermqZCMRiUfML,
FrpxvsDMwwgbpqHXWxmN, ATEikvMQPqBefhJzjzhc, AuqaQHxmPQSyYRemQvyX,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); }
#endif
#if (FP16_ENABLED == 1 && ( CUDNN_MAJOR > 7 || (CUDNN_MAJOR == 7 && CUDNN_MINOR >= 2) ))
CUDNN_CALL(cudnnSetConvolutionMathType(ONvcEjLBnVNUdjMKOAwF, CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION));
#endif
int shEncNmxJsMuJKwbrwok, rxMAtVYGgGtZoKBkJcjc; int numInputFeatures =
BdqURaHPmdnfzvtUvocl*BuyZFXzwOMxcePIbCLfl; int
fxxCPKTclxXPxrdMAkwi,sRECVoNNtDdcBOWgDyar,sxuOMwKXOKfuExclRaSe; MWTensor*
opTensor = convLayer->getOutputTensor(0); fxxCPKTclxXPxrdMAkwi =
opTensor->getChannels(); sRECVoNNtDdcBOWgDyar = opTensor->getHeight();
sxuOMwKXOKfuExclRaSe = opTensor->getWidth(); size_t twppmWSuyDzoZjSbrMHi = 0; if(
BuyZFXzwOMxcePIbCLfl == 1 ) {
CUDNN_CALL(cudnnSetFilter4dDescriptor(SDWKEQTZaTFZByPlzUDR, CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW, fxxCPKTclxXPxrdMAkwi, numInputFeatures,
AwZQzUhuWVLGrWgLHRuM, AzTsxYcYjIEJsGQbeYHm));
CUDNN_CALL(cudnnSetTensor4dDescriptor(MEmIeGILUZNEWEagSzRk, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, 1, fxxCPKTclxXPxrdMAkwi, 1, 1));
CUDNN_CALL(cudnnGetConvolution2dForwardOutputDim(ONvcEjLBnVNUdjMKOAwF,
YgcpEBUCwCLaPhyntIio, SDWKEQTZaTFZByPlzUDR, &shEncNmxJsMuJKwbrwok,
&rxMAtVYGgGtZoKBkJcjc, &sRECVoNNtDdcBOWgDyar, &sxuOMwKXOKfuExclRaSe));
CUDNN_CALL(cudnnSetTensor4dDescriptor(*getOutputDescriptor(),
CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, shEncNmxJsMuJKwbrwok, rxMAtVYGgGtZoKBkJcjc,
opTensor->getHeight(), opTensor->getWidth())); assert(opTensor->getHeight() ==
sRECVoNNtDdcBOWgDyar); assert(opTensor->getWidth() == sxuOMwKXOKfuExclRaSe);
#if (CUDNN_MAJOR < 7)
{
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(*enPbWLzEmxYCBmzGJutZ->getCudnnHandle(),
YgcpEBUCwCLaPhyntIio, SDWKEQTZaTFZByPlzUDR, ONvcEjLBnVNUdjMKOAwF,
*getOutputDescriptor(), CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0,
&NtWaRGCHLeTapjWdEHHS)); }
#else
{ cudnnConvolutionFwdAlgoPerf_t perf_results[3]; int returnedAlgoCount;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm_v7(*enPbWLzEmxYCBmzGJutZ->getCudnnHandle(),
YgcpEBUCwCLaPhyntIio, SDWKEQTZaTFZByPlzUDR, ONvcEjLBnVNUdjMKOAwF,
*getOutputDescriptor(), 3, &returnedAlgoCount, perf_results));
NtWaRGCHLeTapjWdEHHS = perf_results[0].algo; }
#endif
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(*enPbWLzEmxYCBmzGJutZ->getCudnnHandle(),
YgcpEBUCwCLaPhyntIio, SDWKEQTZaTFZByPlzUDR, ONvcEjLBnVNUdjMKOAwF,
*getOutputDescriptor(), NtWaRGCHLeTapjWdEHHS, &twppmWSuyDzoZjSbrMHi)); } else {
setIsGrouped(1); MWTensor* ipTensor = URgvgDXnZskIYGdtimcU; XCLDbxHBtWRStETWIkId =
ipTensor->getData() + ipTensor->getChannels()/BuyZFXzwOMxcePIbCLfl *
ipTensor->getHeight() * ipTensor->getWidth();
CUDNN_CALL(cudnnCreateTensorDescriptor(&ZCArwzdUdwQuFQUWjnUE));
CUDNN_CALL(cudnnSetTensor4dDescriptorEx(ZCArwzdUdwQuFQUWjnUE,
CUDNN_DATA_FLOAT, ipTensor->getBatchSize(),
ipTensor->getChannels()/BuyZFXzwOMxcePIbCLfl, ipTensor->getHeight(),
ipTensor->getWidth(),
ipTensor->getChannels()*ipTensor->getHeight()*ipTensor->getWidth(),
ipTensor->getHeight()*ipTensor->getWidth(), ipTensor->getWidth(), 1));
CUDNN_CALL(cudnnCreateTensorDescriptor(getGroupDescriptor()));
CUDNN_CALL(cudnnSetFilter4dDescriptor(SDWKEQTZaTFZByPlzUDR, CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW, BlRIQPyqJZORKENzSdYf, BdqURaHPmdnfzvtUvocl,
AwZQzUhuWVLGrWgLHRuM, AzTsxYcYjIEJsGQbeYHm));
CUDNN_CALL(cudnnGetConvolution2dForwardOutputDim(ONvcEjLBnVNUdjMKOAwF,
ZCArwzdUdwQuFQUWjnUE, SDWKEQTZaTFZByPlzUDR, &shEncNmxJsMuJKwbrwok,
&rxMAtVYGgGtZoKBkJcjc, &sRECVoNNtDdcBOWgDyar, &sxuOMwKXOKfuExclRaSe));
assert(opTensor->getHeight() == sRECVoNNtDdcBOWgDyar); assert(opTensor->getWidth()
== sxuOMwKXOKfuExclRaSe);
CUDNN_CALL(cudnnSetTensor4dDescriptorEx(*getGroupDescriptor(),
CUDNN_DATA_FLOAT, shEncNmxJsMuJKwbrwok, rxMAtVYGgGtZoKBkJcjc, sRECVoNNtDdcBOWgDyar,
sxuOMwKXOKfuExclRaSe,
rxMAtVYGgGtZoKBkJcjc*BuyZFXzwOMxcePIbCLfl*sRECVoNNtDdcBOWgDyar*sxuOMwKXOKfuExclRaSe,
sRECVoNNtDdcBOWgDyar*sxuOMwKXOKfuExclRaSe, sxuOMwKXOKfuExclRaSe, 1));
CUDNN_CALL(cudnnSetTensor4dDescriptor(*getOutputDescriptor(),
CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, shEncNmxJsMuJKwbrwok,
rxMAtVYGgGtZoKBkJcjc*BuyZFXzwOMxcePIbCLfl, sRECVoNNtDdcBOWgDyar, sxuOMwKXOKfuExclRaSe));
CUDNN_CALL(cudnnSetTensor4dDescriptor(MEmIeGILUZNEWEagSzRk, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, 1, rxMAtVYGgGtZoKBkJcjc*BuyZFXzwOMxcePIbCLfl, 1, 1));
#if (CUDNN_MAJOR < 7)
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(*enPbWLzEmxYCBmzGJutZ->getCudnnHandle(),
ZCArwzdUdwQuFQUWjnUE, SDWKEQTZaTFZByPlzUDR, ONvcEjLBnVNUdjMKOAwF,
*getGroupDescriptor(), CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &NtWaRGCHLeTapjWdEHHS));
#else
cudnnConvolutionFwdAlgoPerf_t perf_results[3]; int returnedAlgoCount;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm_v7(*enPbWLzEmxYCBmzGJutZ->getCudnnHandle(),
ZCArwzdUdwQuFQUWjnUE, SDWKEQTZaTFZByPlzUDR, ONvcEjLBnVNUdjMKOAwF,
*getGroupDescriptor(), 3, &returnedAlgoCount,perf_results));
NtWaRGCHLeTapjWdEHHS = perf_results[0].algo;
#endif
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(*enPbWLzEmxYCBmzGJutZ->getCudnnHandle(),
ZCArwzdUdwQuFQUWjnUE, SDWKEQTZaTFZByPlzUDR, ONvcEjLBnVNUdjMKOAwF,
*getGroupDescriptor(), NtWaRGCHLeTapjWdEHHS, &twppmWSuyDzoZjSbrMHi)); } if(
twppmWSuyDzoZjSbrMHi > *enPbWLzEmxYCBmzGJutZ->getWorkSpaceSize() ) {
enPbWLzEmxYCBmzGJutZ->setWorkSpaceSize(twppmWSuyDzoZjSbrMHi); }
assert(shEncNmxJsMuJKwbrwok == ipTensor->getBatchSize());
assert(fxxCPKTclxXPxrdMAkwi == rxMAtVYGgGtZoKBkJcjc *
BuyZFXzwOMxcePIbCLfl); if (outbufIdx < 0) {
CUDA_CALL(cudaMalloc((void**)&PtkeOkuClHzhOfpmBevf, sizeof(float) *
opTensor->getBatchSize() * opTensor->getChannels() * opTensor->getHeight() *
opTensor->getWidth())); } else {
setData(enPbWLzEmxYCBmzGJutZ->memBuffer[outbufIdx]);
getLayer()->getOutputTensor(0)->setopBufIndex(outbufIdx); }
CUDA_CALL(cudaMalloc((void**)&vIWQzNvYZSuxmOTVDFhU,
sizeof(float)*BdqURaHPmdnfzvtUvocl*fxxCPKTclxXPxrdMAkwi*AwZQzUhuWVLGrWgLHRuM*AzTsxYcYjIEJsGQbeYHm));
CUDA_CALL(cudaMalloc((void**)&KHClOltUSuqFVVErSxVb, sizeof(float)*fxxCPKTclxXPxrdMAkwi));
#ifdef RANDOM
curandGenerateNormal(SUleyRyvAggTFnSdxLru, vIWQzNvYZSuxmOTVDFhU,
BdqURaHPmdnfzvtUvocl*fxxCPKTclxXPxrdMAkwi*AwZQzUhuWVLGrWgLHRuM*AzTsxYcYjIEJsGQbeYHm,
0, 0.1); curandGenerateNormal(SUleyRyvAggTFnSdxLru, KHClOltUSuqFVVErSxVb,
fxxCPKTclxXPxrdMAkwi, -0.5, 1);
#endif
if( BuyZFXzwOMxcePIbCLfl == 2 ) { veFyKKHbdqBIvQLYBqfF = vIWQzNvYZSuxmOTVDFhU +
BlRIQPyqJZORKENzSdYf * BdqURaHPmdnfzvtUvocl * AwZQzUhuWVLGrWgLHRuM *
AzTsxYcYjIEJsGQbeYHm; setOutput2(getData() + fxxCPKTclxXPxrdMAkwi/ 2
* sRECVoNNtDdcBOWgDyar * sxuOMwKXOKfuExclRaSe); setIsGrouped(1); }
loadWeights(vpXxoeEhdEosLSsYXkNG); loadBias(MIBnYCbKBdUrlfqlHdoo); } void
MWConvLayerImpl::predict() { MWConvLayer* convLayer =
static_cast<MWConvLayer*>(getLayer()); if (URgvgDXnZskIYGdtimcU !=
convLayer->getInputTensor()) { CUDA_CALL(cudaMemset(URgvgDXnZskIYGdtimcU->getData(),
0,
sizeof(float)*URgvgDXnZskIYGdtimcU->getBatchSize()*URgvgDXnZskIYGdtimcU->getChannels()*URgvgDXnZskIYGdtimcU->getHeight()*URgvgDXnZskIYGdtimcU->getWidth()));
int fhikqqlnUKCjleVKDqiG =
convLayer->getInputTensor()->getHeight()*convLayer->getInputTensor()->getWidth()*convLayer->getInputTensor()->getBatchSize()*convLayer->getInputTensor()->getChannels();
MWCNNLayerImpl::padInput(convLayer->getInputTensor()->getData(),
convLayer->getInputTensor()->getHeight(),
convLayer->getInputTensor()->getWidth(),
convLayer->getInputTensor()->getChannels(), URgvgDXnZskIYGdtimcU->getHeight(),
URgvgDXnZskIYGdtimcU->getWidth(), bUVPfnrJhLfHzOLUUrKk, cQBKlCKXxecGPJrXBXdk,
URgvgDXnZskIYGdtimcU->getData(), fhikqqlnUKCjleVKDqiG); } if(BuyZFXzwOMxcePIbCLfl == 1
) { assert(getData() != URgvgDXnZskIYGdtimcU->getData());
CUDNN_CALL(cudnnConvolutionForward(*enPbWLzEmxYCBmzGJutZ->getCudnnHandle(),getOnePtr(),
YgcpEBUCwCLaPhyntIio, URgvgDXnZskIYGdtimcU->getData(), SDWKEQTZaTFZByPlzUDR,
vIWQzNvYZSuxmOTVDFhU, ONvcEjLBnVNUdjMKOAwF, NtWaRGCHLeTapjWdEHHS,
enPbWLzEmxYCBmzGJutZ->getWorkSpace(), *enPbWLzEmxYCBmzGJutZ->getWorkSpaceSize(),
getZeroPtr(), *getOutputDescriptor(),getData()));
CUDNN_CALL(cudnnAddTensor(*enPbWLzEmxYCBmzGJutZ->getCudnnHandle(), getOnePtr(),
MEmIeGILUZNEWEagSzRk, KHClOltUSuqFVVErSxVb, getOnePtr(),
*getOutputDescriptor(),getData())); } else { assert(getData() !=
URgvgDXnZskIYGdtimcU->getData());
CUDNN_CALL(cudnnConvolutionForward(*enPbWLzEmxYCBmzGJutZ->getCudnnHandle(),
getOnePtr(), ZCArwzdUdwQuFQUWjnUE, URgvgDXnZskIYGdtimcU->getData(),
SDWKEQTZaTFZByPlzUDR, vIWQzNvYZSuxmOTVDFhU, ONvcEjLBnVNUdjMKOAwF, NtWaRGCHLeTapjWdEHHS,
enPbWLzEmxYCBmzGJutZ->getWorkSpace(), *enPbWLzEmxYCBmzGJutZ->getWorkSpaceSize(),
getZeroPtr(), *getGroupDescriptor(), getData()));
CUDNN_CALL(cudnnConvolutionForward(*enPbWLzEmxYCBmzGJutZ->getCudnnHandle(),
getOnePtr(), ZCArwzdUdwQuFQUWjnUE, XCLDbxHBtWRStETWIkId, SDWKEQTZaTFZByPlzUDR,
veFyKKHbdqBIvQLYBqfF, ONvcEjLBnVNUdjMKOAwF, NtWaRGCHLeTapjWdEHHS,
enPbWLzEmxYCBmzGJutZ->getWorkSpace(), *enPbWLzEmxYCBmzGJutZ->getWorkSpaceSize(),
getZeroPtr(), *getGroupDescriptor(), getOutput2()));
CUDNN_CALL(cudnnAddTensor(*enPbWLzEmxYCBmzGJutZ->getCudnnHandle(), getOnePtr(),
MEmIeGILUZNEWEagSzRk, KHClOltUSuqFVVErSxVb, getOnePtr(), *getOutputDescriptor(),
getData())); } } void MWConvLayerImpl::cleanup() {
CUDNN_CALL(cudnnDestroyConvolutionDescriptor(ONvcEjLBnVNUdjMKOAwF));
CUDNN_CALL(cudnnDestroyFilterDescriptor(SDWKEQTZaTFZByPlzUDR)); if
(vIWQzNvYZSuxmOTVDFhU) { CUDA_FREE_CALL(vIWQzNvYZSuxmOTVDFhU); }
CUDNN_CALL(cudnnDestroyTensorDescriptor(MEmIeGILUZNEWEagSzRk)); if
(KHClOltUSuqFVVErSxVb) { CUDA_FREE_CALL(KHClOltUSuqFVVErSxVb); }
CUDNN_CALL(cudnnDestroyTensorDescriptor(*getOutputDescriptor())); if
(URgvgDXnZskIYGdtimcU != getLayer()->getInputTensor(0)) {
CUDNN_CALL(cudnnDestroyTensorDescriptor(YgcpEBUCwCLaPhyntIio));
CUDA_FREE_CALL(URgvgDXnZskIYGdtimcU->getData()); } if (getIsGrouped()) {
CUDNN_CALL(cudnnDestroyTensorDescriptor(ZCArwzdUdwQuFQUWjnUE));
CUDNN_CALL(cudnnDestroyTensorDescriptor(*getGroupDescriptor())); } for(int idx
= 0; idx < getLayer()->getNumOutputs(); idx++) { float* data =
getLayer()->getOutputTensor(idx)->getData(); if (data) {
if(getLayer()->getOutputTensor(idx)->getopBufIndex() < 0) CUDA_FREE_CALL(data);
} } } void MWConvLayerImpl::loadWeights(const char* RVrPByQXdKmunRZHKWJD) {
MWConvLayer* convLayer = static_cast<MWConvLayer*>(getLayer()); FILE*
SUjIWYfjMcdzSZaCSVRT = MWCNNLayer::openBinaryFile(RVrPByQXdKmunRZHKWJD);
assert(SUjIWYfjMcdzSZaCSVRT); assert(BdqURaHPmdnfzvtUvocl ==
URgvgDXnZskIYGdtimcU->getChannels()/BuyZFXzwOMxcePIbCLfl); int eqOmMKQRpqBqRQCnJmxt =
BdqURaHPmdnfzvtUvocl*convLayer->getOutputTensor()->getChannels()*AwZQzUhuWVLGrWgLHRuM*AzTsxYcYjIEJsGQbeYHm;
float* MdSWZSOAjugbWppryHbR = MALLOC_CALL(sizeof(float)*eqOmMKQRpqBqRQCnJmxt);
call_fread(MdSWZSOAjugbWppryHbR, sizeof(float), eqOmMKQRpqBqRQCnJmxt, SUjIWYfjMcdzSZaCSVRT,
RVrPByQXdKmunRZHKWJD); if( AwZQzUhuWVLGrWgLHRuM != 1 &&
AzTsxYcYjIEJsGQbeYHm != 1 ) { float* MgAiRWiTutoTMxKXjmHQ =
MALLOC_CALL(sizeof(float)*AwZQzUhuWVLGrWgLHRuM*AzTsxYcYjIEJsGQbeYHm);
for(int k=0; k<eqOmMKQRpqBqRQCnJmxt/AwZQzUhuWVLGrWgLHRuM/AzTsxYcYjIEJsGQbeYHm;
k++) { for(int i=0; i<AwZQzUhuWVLGrWgLHRuM*AzTsxYcYjIEJsGQbeYHm; i++)
MgAiRWiTutoTMxKXjmHQ[i]=MdSWZSOAjugbWppryHbR[k*AwZQzUhuWVLGrWgLHRuM*AzTsxYcYjIEJsGQbeYHm+i];
for(int j=0; j<AwZQzUhuWVLGrWgLHRuM; j++) for(int i=0;
i<AzTsxYcYjIEJsGQbeYHm; i++)
MdSWZSOAjugbWppryHbR[k*AwZQzUhuWVLGrWgLHRuM*AzTsxYcYjIEJsGQbeYHm+j*AzTsxYcYjIEJsGQbeYHm+i]=MgAiRWiTutoTMxKXjmHQ[j+i*AwZQzUhuWVLGrWgLHRuM];
} free(MgAiRWiTutoTMxKXjmHQ); } CUDA_CALL(cudaMemcpy(vIWQzNvYZSuxmOTVDFhU,
MdSWZSOAjugbWppryHbR, sizeof(float)*eqOmMKQRpqBqRQCnJmxt, cudaMemcpyHostToDevice));
#if 0
printf("%s loaded. Size = %d. %f\n", RVrPByQXdKmunRZHKWJD, eqOmMKQRpqBqRQCnJmxt, MdSWZSOAjugbWppryHbR[0]);
#endif
free(MdSWZSOAjugbWppryHbR); fclose(SUjIWYfjMcdzSZaCSVRT); } void
MWConvLayerImpl::loadBias(const char* RVrPByQXdKmunRZHKWJD) { MWConvLayer*
convLayer = static_cast<MWConvLayer*>(getLayer()); FILE* SUjIWYfjMcdzSZaCSVRT =
MWCNNLayer::openBinaryFile(RVrPByQXdKmunRZHKWJD); assert(SUjIWYfjMcdzSZaCSVRT); int
eqOmMKQRpqBqRQCnJmxt = convLayer->getOutputTensor()->getChannels(); float*
MdSWZSOAjugbWppryHbR = MALLOC_CALL(sizeof(float)*eqOmMKQRpqBqRQCnJmxt);
call_fread(MdSWZSOAjugbWppryHbR, sizeof(float), eqOmMKQRpqBqRQCnJmxt, SUjIWYfjMcdzSZaCSVRT,
RVrPByQXdKmunRZHKWJD); CUDA_CALL(cudaMemcpy(KHClOltUSuqFVVErSxVb, MdSWZSOAjugbWppryHbR,
sizeof(float)*eqOmMKQRpqBqRQCnJmxt, cudaMemcpyHostToDevice));
free(MdSWZSOAjugbWppryHbR); fclose(SUjIWYfjMcdzSZaCSVRT); } void
MWConvLayerImpl::postSetup() { if(enPbWLzEmxYCBmzGJutZ->getAutoTune()) {
getConvAlgoTuned(); } else if(!enPbWLzEmxYCBmzGJutZ->getWorkSpace()) {
getConvAlgoNoWorkSpace(); } cudnnTensorDescriptor_t tmpInDesc = getIsGrouped()
? ZCArwzdUdwQuFQUWjnUE : YgcpEBUCwCLaPhyntIio; cudnnTensorDescriptor_t
hnewnpwgzKmOdualajhn = getIsGrouped() ? *getGroupDescriptor() :
*getOutputDescriptor(); size_t twppmWSuyDzoZjSbrMHi;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(*enPbWLzEmxYCBmzGJutZ->getCudnnHandle(),
tmpInDesc, SDWKEQTZaTFZByPlzUDR, ONvcEjLBnVNUdjMKOAwF, hnewnpwgzKmOdualajhn,
NtWaRGCHLeTapjWdEHHS, &twppmWSuyDzoZjSbrMHi)); if( (long int)twppmWSuyDzoZjSbrMHi
> *enPbWLzEmxYCBmzGJutZ->getPostSetupWorkSpaceSize() ) {
enPbWLzEmxYCBmzGJutZ->setPostSetupWorkSpaceSize((long int)twppmWSuyDzoZjSbrMHi);
} } void MWConvLayerImpl::getConvAlgoTuned() { cudnnConvolutionFwdAlgoPerf_t
perf_results[3]; cudnnTensorDescriptor_t tempInDesc = getIsGrouped() ?
ZCArwzdUdwQuFQUWjnUE : YgcpEBUCwCLaPhyntIio; cudnnTensorDescriptor_t
hnewnpwgzKmOdualajhn = getIsGrouped() ? *getGroupDescriptor() :
*getOutputDescriptor(); int returnedAlgoCount;
CUDNN_CALL(cudnnFindConvolutionForwardAlgorithmEx(*enPbWLzEmxYCBmzGJutZ->getCudnnHandle(),
tempInDesc, URgvgDXnZskIYGdtimcU->getData(), SDWKEQTZaTFZByPlzUDR, vIWQzNvYZSuxmOTVDFhU,
ONvcEjLBnVNUdjMKOAwF, hnewnpwgzKmOdualajhn, getData(), 3, &returnedAlgoCount,
&perf_results[0], enPbWLzEmxYCBmzGJutZ->getWorkSpace(),
*enPbWLzEmxYCBmzGJutZ->getWorkSpaceSize())); NtWaRGCHLeTapjWdEHHS =
perf_results[0].algo; } void MWConvLayerImpl::getConvAlgoNoWorkSpace() {
assert(enPbWLzEmxYCBmzGJutZ->getWorkSpace() == 0); cudnnTensorDescriptor_t
tempInDesc = getIsGrouped() ? ZCArwzdUdwQuFQUWjnUE : YgcpEBUCwCLaPhyntIio;
cudnnTensorDescriptor_t hnewnpwgzKmOdualajhn = getIsGrouped() ?
*getGroupDescriptor() : *getOutputDescriptor();
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(*enPbWLzEmxYCBmzGJutZ->getCudnnHandle(),
tempInDesc, SDWKEQTZaTFZByPlzUDR, ONvcEjLBnVNUdjMKOAwF, hnewnpwgzKmOdualajhn,
CUDNN_CONVOLUTION_FWD_NO_WORKSPACE, 0, &NtWaRGCHLeTapjWdEHHS)); } |
1e3d8f29d087f11fa02fbdddd70a1b98c1d93ee1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "refocus_header.h"
#include "refocus_scattering.h"
#include "cuMath.cu"
//debug
// #include<stdio.h>
__constant__ constStructre<double> constStr_scat_single;
template<typename T, typename T2, typename T3, typename scatteringType>
__global__ void singleScattering_kernel(T2* single_scattering_matrix, const T3* x0,
const scatteringType* scattering_struct, T k, const ff_direction<T3> *l_dir, const ff_direction<T3> *v_dir, ub32 directions_num)
{
const constStructre<T>* curr_constStr = (const constStructre<T>*) &constStr_scat_single;
ub32 entry_num = blockIdx.x * blockDim.x + threadIdx.x;
if(entry_num < (directions_num * directions_num))
{
ub32 l_idx = entry_num % directions_num;
ub32 v_idx = entry_num / directions_num;
const ff_direction<T3> *il_dir = l_dir + l_idx;
const ff_direction<T3> *iv_dir = v_dir + v_idx;
if(il_dir->is_active && iv_dir->is_active)
{
T2 e;
T bd1, bd2, bd3, dz;
bd1 = abs(il_dir->dir_attenuation.x) < 1e-8 ? INFINITY : il_dir->dir_attenuation.x >= 0 ? (x0->x - curr_constStr->box_min[0]) / ( il_dir->dir_attenuation.x) : (x0->x - curr_constStr->box_max[0]) / (il_dir->dir_attenuation.x);
bd2 = abs(il_dir->dir_attenuation.y) < 1e-8 ? INFINITY : il_dir->dir_attenuation.y >= 0 ? (x0->y - curr_constStr->box_min[1]) / ( il_dir->dir_attenuation.y) : (x0->y - curr_constStr->box_max[1]) / (il_dir->dir_attenuation.y);
bd3 = abs(il_dir->dir_attenuation.z) < 1e-8 ? INFINITY : il_dir->dir_attenuation.z >= 0 ? (x0->z - curr_constStr->box_min[2]) / ( il_dir->dir_attenuation.z) : (x0->z - curr_constStr->box_max[2]) / (il_dir->dir_attenuation.z);
dz = fmin(fmin(bd1, bd2), bd3);
bd1 = abs(iv_dir->dir_attenuation.x) < 1e-8 ? INFINITY : iv_dir->dir_attenuation.x < 0 ? (curr_constStr->box_min[0] - x0->x) / ( iv_dir->dir_attenuation.x) : (curr_constStr->box_max[0] - x0->x) / (iv_dir->dir_attenuation.x);
bd2 = abs(iv_dir->dir_attenuation.y) < 1e-8 ? INFINITY : iv_dir->dir_attenuation.y < 0 ? (curr_constStr->box_min[1] - x0->y) / ( iv_dir->dir_attenuation.y) : (curr_constStr->box_max[1] - x0->y) / (iv_dir->dir_attenuation.y);
bd3 = abs(iv_dir->dir_attenuation.z) < 1e-8 ? INFINITY : iv_dir->dir_attenuation.z < 0 ? (curr_constStr->box_min[2] - x0->z) / ( iv_dir->dir_attenuation.z) : (curr_constStr->box_max[2] - x0->z) / (iv_dir->dir_attenuation.z);
dz += fmin(fmin(bd1, bd2), bd3);
e.x = -curr_constStr->sigt * dz;
e.y = k *
(x0->x * (il_dir->dir.x - iv_dir->dir.x) +
x0->y * (il_dir->dir.y - iv_dir->dir.y) +
x0->z * (il_dir->dir.z - iv_dir->dir.z) );
e = complexExponent(e);
T s_lv = scattering_contribution<T,T3,3,true>(scattering_struct, &il_dir->dir, &iv_dir->dir);
*(single_scattering_matrix + entry_num) = s_lv * e;
}
else
{
*(single_scattering_matrix + entry_num) = buildComplex((T) 0.0, (T) 0.0);
}
}
}
template<typename T, typename T2, typename T3>
void singleScattering(T2 *us, const T3 *x0, ff_refocus_struct<T2,T3>* ff_refocus, const T2* constPath, ub32 is_correlation, ub32 total_elements,
T k, ub32 scattering_paramenter, const void* scatteringStruct)
{
ub32 total_directions = ff_refocus->num_directions;
// DEBUG
// FILE *fptr;
//
// fptr = fopen("C:\\Users\\chen.bar\\Documents\\GitHub\\gaussianBeam-field\\mex\\debug_2.txt","w");
// {
// double2 A[12] =
// {
// {0.1,0.1}, {0.2,0.2}, {0.3,0.3}, {0.4,0.4},
// {0.5,0.5}, {0.6,0.6}, {0.7,0.7}, {0.8,0.8},
// {0.9,0.9}, {1.0,1.0}, {1.1,1.1}, {1.2,1.2}
// };
//
// double2 B[20] =
// {
// {10.0,5.00}, {10.1,5.05}, {10.2,5.10}, {10.3,5.15}, {10.4,5.20},
// {10.5,5.25}, {10.6,5.30}, {10.7,5.35}, {10.8,5.40}, {10.9,5.45},
// {11.0,5.50}, {11.1,5.55}, {11.2,5.60}, {11.3,5.65}, {11.4,5.70},
// {11.5,5.75}, {11.6,5.80}, {11.7,5.85}, {11.8,5.90}, {11.9,5.95}
// };
//
// double2 C[15];
//
// double2 *gpu_a, *gpu_b, *gpu_c;
//
// hipMalloc(&gpu_a, sizeof(double2) * 12);
// hipMalloc(&gpu_b, sizeof(double2) * 20);
// hipMalloc(&gpu_c, sizeof(double2) * 15);
//
// hipMemcpy(gpu_a , A , sizeof(double2) * 12, hipMemcpyHostToDevice);
// hipMemcpy(gpu_b , B , sizeof(double2) * 20, hipMemcpyHostToDevice);
//
// matrixMult(ff_refocus->cublas_handle, 3, 4, 5, gpu_a, gpu_b, gpu_c);
// hipMemcpy(C , gpu_c , sizeof(double2) * 15, hipMemcpyDeviceToHost);
//
// fprintf(fptr,"C_double = [");
// for(ub32 iter1 = 0; iter1 < 3 ; iter1++)
// {
// for(ub32 iter2 = 0; iter2 < 5 ; iter2++)
// {
// fprintf(fptr,"%f + %fi ",C[iter2 + 5 * iter1].x,C[iter2 + 5 * iter1].y);
// }
// fprintf(fptr,"; \n");
// }
// fprintf(fptr,"];\n\n");
// hipFree(gpu_a);
// hipFree(gpu_b);
// hipFree(gpu_c);
// }
//
// {
// float2 A[12] =
// {
// {0.1f,0.1f}, {0.2f,0.2f}, {0.3f,0.3f}, {0.4f,0.4f},
// {0.5f,0.5f}, {0.6f,0.6f}, {0.7f,0.7f}, {0.8f,0.8f},
// {0.9f,0.9f}, {1.0f,1.0f}, {1.1f,1.1f}, {1.2f,1.2f}
// };
//
// float2 B[20] =
// {
// {10.0f,5.00f}, {10.1f,5.05f}, {10.2f,5.10f}, {10.3f,5.15f}, {10.4f,5.20f},
// {10.5f,5.25f}, {10.6f,5.30f}, {10.7f,5.35f}, {10.8f,5.40f}, {10.9f,5.45f},
// {11.0f,5.50f}, {11.1f,5.55f}, {11.2f,5.60f}, {11.3f,5.65f}, {11.4f,5.70f},
// {11.5f,5.75f}, {11.6f,5.80f}, {11.7f,5.85f}, {11.8f,5.90f}, {11.9f,5.95f}
// };
//
// float2 C[15];
//
// float2 *gpu_a, *gpu_b, *gpu_c;
//
// hipMalloc(&gpu_a, sizeof(float2) * 12);
// hipMalloc(&gpu_b, sizeof(float2) * 20);
// hipMalloc(&gpu_c, sizeof(float2) * 15);
//
// hipMemcpy(gpu_a , A , sizeof(float2) * 12, hipMemcpyHostToDevice);
// hipMemcpy(gpu_b , B , sizeof(float2) * 20, hipMemcpyHostToDevice);
//
// matrixMult(ff_refocus->cublas_handle, 3, 4, 5, gpu_a, gpu_b, gpu_c);
// hipMemcpy(C , gpu_c , sizeof(float2) * 15, hipMemcpyDeviceToHost);
//
// fprintf(fptr,"C_single = [");
// for(ub32 iter1 = 0; iter1 < 3 ; iter1++)
// {
// for(ub32 iter2 = 0; iter2 < 5 ; iter2++)
// {
// fprintf(fptr,"%f + %fi ",C[iter2 + 5 * iter1].x,C[iter2 + 5 * iter1].y);
// }
// fprintf(fptr,"; \n");
// }
// fprintf(fptr,"];\n\n");
// hipFree(gpu_a);
// hipFree(gpu_b);
// hipFree(gpu_c);
// }
//
// fclose(fptr);
// END DEBUG
// DEBUG
// FILE *fptr;
//
// fptr = fopen("C:\\Users\\chen.bar\\Documents\\GitHub\\gaussianBeam-field\\mex\\debug_2.txt","w");
// {
// double2 A[6] =
// {
// {0.1,0.1}, {0.2,0.2}, {0.3,0.3}, {0.4,0.4},
// {0.5,0.5}, {0.6,0.6}
// };
//
// double2 B[6] =
// {
// {10.0,5.00}, {10.1,5.05}, {10.2,5.10}, {10.3,5.15}, {10.4,5.20},
// {10.5,5.25}
// };
//
// double2 *gpu_a, *gpu_b;
//
// hipMalloc(&gpu_a, sizeof(double2) * 6);
// hipMalloc(&gpu_b, sizeof(double2) * 6);
//
// hipMemcpy(gpu_a , A , sizeof(double2) * 6, hipMemcpyHostToDevice);
// hipMemcpy(gpu_b , B , sizeof(double2) * 6, hipMemcpyHostToDevice);
//
// matrixAdd(ff_refocus->cublas_handle, 6, gpu_a, gpu_b);
// hipMemcpy(A , gpu_a , sizeof(double2) * 6, hipMemcpyDeviceToHost);
//
// fprintf(fptr,"A_double = [");
// for(ub32 iter1 = 0; iter1 < 6 ; iter1++)
// {
// fprintf(fptr,"%f + %fi ",A[iter1].x,A[iter1].y);
// }
// fprintf(fptr,"];\n\n");
// hipFree(gpu_a);
// hipFree(gpu_b);
// }
//
// {
// float2 A[5] =
// {
// {0.1f,0.1f}, {0.2f,0.2f}, {0.3f,0.3f}, {0.4f,0.4f},
// {0.5f,0.5f}
// };
//
// float2 B[5] =
// {
// {10.0f,5.00f}, {10.1f,5.05f}, {10.2f,5.10f}, {10.3f,5.15f}, {10.4f,5.20f}
// };
//
// float2 *gpu_a, *gpu_b;
//
// hipMalloc(&gpu_a, sizeof(float2) * 5);
// hipMalloc(&gpu_b, sizeof(float2) * 5);
//
// hipMemcpy(gpu_a , A , sizeof(float2) * 5, hipMemcpyHostToDevice);
// hipMemcpy(gpu_b , B , sizeof(float2) * 5, hipMemcpyHostToDevice);
//
// matrixAdd(ff_refocus->cublas_handle, 5, gpu_a, gpu_b);
// hipMemcpy(A , gpu_a , sizeof(float2) * 5, hipMemcpyDeviceToHost);
//
// fprintf(fptr,"A_single = [");
// for(ub32 iter1 = 0; iter1 < 5 ; iter1++)
// {
// fprintf(fptr,"%f + %fi ",A[iter1].x,A[iter1].y);
// }
// fprintf(fptr,"];\n\n");
// hipFree(gpu_a);
// hipFree(gpu_b);
// }
//
// fclose(fptr);
// END DEBUG
// DEBUG
// FILE *fptr;
//
// fptr = fopen("C:\\Users\\chen.bar\\Documents\\GitHub\\gaussianBeam-field\\mex\\debug_2.txt","w");
// T2* wv_cpu, *wl_cpu;
//
// wv_cpu = (T2*) malloc(sizeof(T2) * total_elements * total_directions);
// wl_cpu = (T2*) malloc(sizeof(T2) * total_elements * total_directions);
//
// hipMemcpy(wv_cpu , ff_refocus->w_v , sizeof(T2) * total_elements * total_directions, hipMemcpyDeviceToHost);
// hipMemcpy(wl_cpu , ff_refocus->w_l , sizeof(T2) * total_elements * total_directions, hipMemcpyDeviceToHost);
//
// fprintf(fptr,"wv = [");
// for(ub32 iter1 = 0; iter1 < total_elements ; iter1++)
// {
// for(ub32 iter2 = 0; iter2 < total_directions ; iter2++)
// {
// fprintf(fptr,"%e + %ei ",wv_cpu[iter2 + total_directions * iter1].x,wv_cpu[iter2 + total_directions * iter1].y);
// }
// fprintf(fptr,"; \n");
// }
// fprintf(fptr,"];\n\n");
//
// fprintf(fptr,"wl = [");
// for(ub32 iter1 = 0; iter1 < total_elements ; iter1++)
// {
// for(ub32 iter2 = 0; iter2 < total_directions ; iter2++)
// {
// fprintf(fptr,"%e + %ei ",wl_cpu[iter2 + total_directions * iter1].x,wl_cpu[iter2 + total_directions * iter1].y);
// }
// fprintf(fptr,"; \n");
// }
// fprintf(fptr,"];\n\n");
//
//
// free(wv_cpu);
// free(wl_cpu);
//
// fclose(fptr);
// END DEBUG
ub32 blocks_num = (total_directions * total_directions - 1) / THREADS_NUM + 1;
ub32 elementwise_blocks_num = (total_elements * total_directions - 1) / THREADS_NUM + 1;
// build the ff matrix
if(scattering_paramenter == 1)
{
hipLaunchKernelGGL(( singleScattering_kernel<T,T2,T3,randomScatter>), dim3(blocks_num), dim3(THREADS_NUM), 0, 0,
ff_refocus->single_scattering_ff,x0,(const randomScatter*) scatteringStruct,k,
ff_refocus->d_l,ff_refocus->d_v,total_directions);
}
if(scattering_paramenter == 2)
{
hipLaunchKernelGGL(( singleScattering_kernel<T,T2,T3,tabulatedScatter<T>>), dim3(blocks_num), dim3(THREADS_NUM), 0, 0,
ff_refocus->single_scattering_ff,x0,(tabulatedScatter<T>*) scatteringStruct,k,
ff_refocus->d_l,ff_refocus->d_v,total_directions);
}
if(scattering_paramenter == 3)
{
hipLaunchKernelGGL(( singleScattering_kernel<T,T2,T3,HGscatter<T>>), dim3(blocks_num), dim3(THREADS_NUM), 0, 0,
ff_refocus->single_scattering_ff,x0,(HGscatter<T>*) scatteringStruct,k,
ff_refocus->d_l,ff_refocus->d_v,total_directions);
}
// DEBUG
// FILE *fptr;
//
// fptr = fopen("C:\\Users\\chen.bar\\Documents\\GitHub\\gaussianBeam-field\\mex\\debug_2.txt","w");
// T2* ff_mat_cpu;
//
// ff_mat_cpu = (T2*) malloc(sizeof(T2) * total_directions * total_directions);
//
// hipMemcpy(ff_mat_cpu , ff_refocus->single_scattering_ff , sizeof(T2) * total_directions * total_directions, hipMemcpyDeviceToHost);
//
// fprintf(fptr,"ff_mat = [");
// for(ub32 iter1 = 0; iter1 < total_directions ; iter1++)
// {
// for(ub32 iter2 = 0; iter2 < total_directions ; iter2++)
// {
// fprintf(fptr,"%e + %ei ",ff_mat_cpu[iter2 + total_directions * iter1].x,ff_mat_cpu[iter2 + total_directions * iter1].y);
// }
// fprintf(fptr,"; \n");
// }
// fprintf(fptr,"];\n\n");
//
// free(ff_mat_cpu);
//
// fclose(fptr);
// END DEBUG
// wv * scattering matrix
matrixMult(ff_refocus->cublas_handle, total_elements, total_directions, total_directions,
ff_refocus->w_v, ff_refocus->single_scattering_ff, ff_refocus->mult_w_v_single_scattering);
// (wv * scattering matrix) .* wl
hipLaunchKernelGGL(( elementwise_mult<T2>), dim3(elementwise_blocks_num),dim3(THREADS_NUM), 0, 0, ff_refocus->mult_w_v_single_scattering, ff_refocus->w_l, total_elements * total_directions);
// sum((wv * scattering matrix) .* wl,2)
matrixMult(ff_refocus->cublas_handle, total_elements, total_directions, 1,
ff_refocus->mult_w_v_single_scattering, ff_refocus->ones_vector, ff_refocus->mult_res);
if(is_correlation)
{
// build the ff matrix
if(scattering_paramenter == 1)
{
hipLaunchKernelGGL(( singleScattering_kernel<T,T2,T3,randomScatter>), dim3(blocks_num), dim3(THREADS_NUM), 0, 0,
ff_refocus->single_scattering_ff,x0,(const randomScatter*) scatteringStruct,k,
ff_refocus->d_l_2,ff_refocus->d_v_2,total_directions);
}
if(scattering_paramenter == 2)
{
hipLaunchKernelGGL(( singleScattering_kernel<T,T2,T3,tabulatedScatter<T>>), dim3(blocks_num), dim3(THREADS_NUM), 0, 0,
ff_refocus->single_scattering_ff,x0,(tabulatedScatter<T>*) scatteringStruct,k,
ff_refocus->d_l_2,ff_refocus->d_v_2,total_directions);
}
if(scattering_paramenter == 3)
{
hipLaunchKernelGGL(( singleScattering_kernel<T,T2,T3,HGscatter<T>>), dim3(blocks_num), dim3(THREADS_NUM), 0, 0,
ff_refocus->single_scattering_ff,x0,(HGscatter<T>*) scatteringStruct,k,
ff_refocus->d_l_2,ff_refocus->d_v_2,total_directions);
}
// wv_2 * scattering matrix
matrixMult(ff_refocus->cublas_handle, total_elements, total_directions, total_directions,
ff_refocus->w_v_2, ff_refocus->single_scattering_ff, ff_refocus->mult_w_v_single_scattering);
// (wv_2 * scattering matrix) .* wl_2
hipLaunchKernelGGL(( elementwise_mult<T2>), dim3(elementwise_blocks_num),dim3(THREADS_NUM), 0, 0, ff_refocus->mult_w_v_single_scattering, ff_refocus->w_l_2, total_elements * total_directions);
// sum((wv * scattering matrix) .* wl_2,2)
matrixMult(ff_refocus->cublas_handle, total_elements, total_directions, 1,
ff_refocus->mult_w_v_single_scattering, ff_refocus->ones_vector, ff_refocus->mult_res_2);
ub32 mult_blocks_num = (total_elements - 1) / THREADS_NUM + 1;
hipLaunchKernelGGL(( elementwise_conj_mult<T2>), dim3(mult_blocks_num),dim3(THREADS_NUM), 0, 0, ff_refocus->mult_res, ff_refocus->mult_res_2, total_elements);
}
matrixAdd(ff_refocus->cublas_handle_device_alpha, total_elements,
constPath, us, ff_refocus->mult_res);
}
template<typename T>
void singleScattering_setConstMem(constStructre<T> *constMem)
{
hipMemcpyToSymbol(constStr_scat_single, constMem, sizeof(constStructre<T>));
}
template void singleScattering_setConstMem<double>(constStructre<double> *constMem);
template void singleScattering_setConstMem<float>(constStructre<float> *constMem);
template void singleScattering<double,double2,double3>(double2 *us, const double3 *x0,
ff_refocus_struct<double2,double3>* ff_refocus, const double2* constPath,
ub32 is_correlation, ub32 total_elements,
double k, ub32 scattering_paramenter, const void* scatteringStruct);
template void singleScattering<float,float2,float3>(float2 *us, const float3 *x0,
ff_refocus_struct<float2,float3>* ff_refocus, const float2* constPath,
ub32 is_correlation, ub32 total_elements,
float k, ub32 scattering_paramenter, const void* scatteringStruct);
| 1e3d8f29d087f11fa02fbdddd70a1b98c1d93ee1.cu | #include "refocus_header.h"
#include "refocus_scattering.h"
#include "cuMath.cu"
//debug
// #include<stdio.h>
__constant__ constStructre<double> constStr_scat_single;
template<typename T, typename T2, typename T3, typename scatteringType>
__global__ void singleScattering_kernel(T2* single_scattering_matrix, const T3* x0,
const scatteringType* scattering_struct, T k, const ff_direction<T3> *l_dir, const ff_direction<T3> *v_dir, ub32 directions_num)
{
const constStructre<T>* curr_constStr = (const constStructre<T>*) &constStr_scat_single;
ub32 entry_num = blockIdx.x * blockDim.x + threadIdx.x;
if(entry_num < (directions_num * directions_num))
{
ub32 l_idx = entry_num % directions_num;
ub32 v_idx = entry_num / directions_num;
const ff_direction<T3> *il_dir = l_dir + l_idx;
const ff_direction<T3> *iv_dir = v_dir + v_idx;
if(il_dir->is_active && iv_dir->is_active)
{
T2 e;
T bd1, bd2, bd3, dz;
bd1 = abs(il_dir->dir_attenuation.x) < 1e-8 ? INFINITY : il_dir->dir_attenuation.x >= 0 ? (x0->x - curr_constStr->box_min[0]) / ( il_dir->dir_attenuation.x) : (x0->x - curr_constStr->box_max[0]) / (il_dir->dir_attenuation.x);
bd2 = abs(il_dir->dir_attenuation.y) < 1e-8 ? INFINITY : il_dir->dir_attenuation.y >= 0 ? (x0->y - curr_constStr->box_min[1]) / ( il_dir->dir_attenuation.y) : (x0->y - curr_constStr->box_max[1]) / (il_dir->dir_attenuation.y);
bd3 = abs(il_dir->dir_attenuation.z) < 1e-8 ? INFINITY : il_dir->dir_attenuation.z >= 0 ? (x0->z - curr_constStr->box_min[2]) / ( il_dir->dir_attenuation.z) : (x0->z - curr_constStr->box_max[2]) / (il_dir->dir_attenuation.z);
dz = fmin(fmin(bd1, bd2), bd3);
bd1 = abs(iv_dir->dir_attenuation.x) < 1e-8 ? INFINITY : iv_dir->dir_attenuation.x < 0 ? (curr_constStr->box_min[0] - x0->x) / ( iv_dir->dir_attenuation.x) : (curr_constStr->box_max[0] - x0->x) / (iv_dir->dir_attenuation.x);
bd2 = abs(iv_dir->dir_attenuation.y) < 1e-8 ? INFINITY : iv_dir->dir_attenuation.y < 0 ? (curr_constStr->box_min[1] - x0->y) / ( iv_dir->dir_attenuation.y) : (curr_constStr->box_max[1] - x0->y) / (iv_dir->dir_attenuation.y);
bd3 = abs(iv_dir->dir_attenuation.z) < 1e-8 ? INFINITY : iv_dir->dir_attenuation.z < 0 ? (curr_constStr->box_min[2] - x0->z) / ( iv_dir->dir_attenuation.z) : (curr_constStr->box_max[2] - x0->z) / (iv_dir->dir_attenuation.z);
dz += fmin(fmin(bd1, bd2), bd3);
e.x = -curr_constStr->sigt * dz;
e.y = k *
(x0->x * (il_dir->dir.x - iv_dir->dir.x) +
x0->y * (il_dir->dir.y - iv_dir->dir.y) +
x0->z * (il_dir->dir.z - iv_dir->dir.z) );
e = complexExponent(e);
T s_lv = scattering_contribution<T,T3,3,true>(scattering_struct, &il_dir->dir, &iv_dir->dir);
*(single_scattering_matrix + entry_num) = s_lv * e;
}
else
{
*(single_scattering_matrix + entry_num) = buildComplex((T) 0.0, (T) 0.0);
}
}
}
template<typename T, typename T2, typename T3>
void singleScattering(T2 *us, const T3 *x0, ff_refocus_struct<T2,T3>* ff_refocus, const T2* constPath, ub32 is_correlation, ub32 total_elements,
T k, ub32 scattering_paramenter, const void* scatteringStruct)
{
ub32 total_directions = ff_refocus->num_directions;
// DEBUG
// FILE *fptr;
//
// fptr = fopen("C:\\Users\\chen.bar\\Documents\\GitHub\\gaussianBeam-field\\mex\\debug_2.txt","w");
// {
// double2 A[12] =
// {
// {0.1,0.1}, {0.2,0.2}, {0.3,0.3}, {0.4,0.4},
// {0.5,0.5}, {0.6,0.6}, {0.7,0.7}, {0.8,0.8},
// {0.9,0.9}, {1.0,1.0}, {1.1,1.1}, {1.2,1.2}
// };
//
// double2 B[20] =
// {
// {10.0,5.00}, {10.1,5.05}, {10.2,5.10}, {10.3,5.15}, {10.4,5.20},
// {10.5,5.25}, {10.6,5.30}, {10.7,5.35}, {10.8,5.40}, {10.9,5.45},
// {11.0,5.50}, {11.1,5.55}, {11.2,5.60}, {11.3,5.65}, {11.4,5.70},
// {11.5,5.75}, {11.6,5.80}, {11.7,5.85}, {11.8,5.90}, {11.9,5.95}
// };
//
// double2 C[15];
//
// double2 *gpu_a, *gpu_b, *gpu_c;
//
// cudaMalloc(&gpu_a, sizeof(double2) * 12);
// cudaMalloc(&gpu_b, sizeof(double2) * 20);
// cudaMalloc(&gpu_c, sizeof(double2) * 15);
//
// cudaMemcpy(gpu_a , A , sizeof(double2) * 12, cudaMemcpyHostToDevice);
// cudaMemcpy(gpu_b , B , sizeof(double2) * 20, cudaMemcpyHostToDevice);
//
// matrixMult(ff_refocus->cublas_handle, 3, 4, 5, gpu_a, gpu_b, gpu_c);
// cudaMemcpy(C , gpu_c , sizeof(double2) * 15, cudaMemcpyDeviceToHost);
//
// fprintf(fptr,"C_double = [");
// for(ub32 iter1 = 0; iter1 < 3 ; iter1++)
// {
// for(ub32 iter2 = 0; iter2 < 5 ; iter2++)
// {
// fprintf(fptr,"%f + %fi ",C[iter2 + 5 * iter1].x,C[iter2 + 5 * iter1].y);
// }
// fprintf(fptr,"; \n");
// }
// fprintf(fptr,"];\n\n");
// cudaFree(gpu_a);
// cudaFree(gpu_b);
// cudaFree(gpu_c);
// }
//
// {
// float2 A[12] =
// {
// {0.1f,0.1f}, {0.2f,0.2f}, {0.3f,0.3f}, {0.4f,0.4f},
// {0.5f,0.5f}, {0.6f,0.6f}, {0.7f,0.7f}, {0.8f,0.8f},
// {0.9f,0.9f}, {1.0f,1.0f}, {1.1f,1.1f}, {1.2f,1.2f}
// };
//
// float2 B[20] =
// {
// {10.0f,5.00f}, {10.1f,5.05f}, {10.2f,5.10f}, {10.3f,5.15f}, {10.4f,5.20f},
// {10.5f,5.25f}, {10.6f,5.30f}, {10.7f,5.35f}, {10.8f,5.40f}, {10.9f,5.45f},
// {11.0f,5.50f}, {11.1f,5.55f}, {11.2f,5.60f}, {11.3f,5.65f}, {11.4f,5.70f},
// {11.5f,5.75f}, {11.6f,5.80f}, {11.7f,5.85f}, {11.8f,5.90f}, {11.9f,5.95f}
// };
//
// float2 C[15];
//
// float2 *gpu_a, *gpu_b, *gpu_c;
//
// cudaMalloc(&gpu_a, sizeof(float2) * 12);
// cudaMalloc(&gpu_b, sizeof(float2) * 20);
// cudaMalloc(&gpu_c, sizeof(float2) * 15);
//
// cudaMemcpy(gpu_a , A , sizeof(float2) * 12, cudaMemcpyHostToDevice);
// cudaMemcpy(gpu_b , B , sizeof(float2) * 20, cudaMemcpyHostToDevice);
//
// matrixMult(ff_refocus->cublas_handle, 3, 4, 5, gpu_a, gpu_b, gpu_c);
// cudaMemcpy(C , gpu_c , sizeof(float2) * 15, cudaMemcpyDeviceToHost);
//
// fprintf(fptr,"C_single = [");
// for(ub32 iter1 = 0; iter1 < 3 ; iter1++)
// {
// for(ub32 iter2 = 0; iter2 < 5 ; iter2++)
// {
// fprintf(fptr,"%f + %fi ",C[iter2 + 5 * iter1].x,C[iter2 + 5 * iter1].y);
// }
// fprintf(fptr,"; \n");
// }
// fprintf(fptr,"];\n\n");
// cudaFree(gpu_a);
// cudaFree(gpu_b);
// cudaFree(gpu_c);
// }
//
// fclose(fptr);
// END DEBUG
// DEBUG
// FILE *fptr;
//
// fptr = fopen("C:\\Users\\chen.bar\\Documents\\GitHub\\gaussianBeam-field\\mex\\debug_2.txt","w");
// {
// double2 A[6] =
// {
// {0.1,0.1}, {0.2,0.2}, {0.3,0.3}, {0.4,0.4},
// {0.5,0.5}, {0.6,0.6}
// };
//
// double2 B[6] =
// {
// {10.0,5.00}, {10.1,5.05}, {10.2,5.10}, {10.3,5.15}, {10.4,5.20},
// {10.5,5.25}
// };
//
// double2 *gpu_a, *gpu_b;
//
// cudaMalloc(&gpu_a, sizeof(double2) * 6);
// cudaMalloc(&gpu_b, sizeof(double2) * 6);
//
// cudaMemcpy(gpu_a , A , sizeof(double2) * 6, cudaMemcpyHostToDevice);
// cudaMemcpy(gpu_b , B , sizeof(double2) * 6, cudaMemcpyHostToDevice);
//
// matrixAdd(ff_refocus->cublas_handle, 6, gpu_a, gpu_b);
// cudaMemcpy(A , gpu_a , sizeof(double2) * 6, cudaMemcpyDeviceToHost);
//
// fprintf(fptr,"A_double = [");
// for(ub32 iter1 = 0; iter1 < 6 ; iter1++)
// {
// fprintf(fptr,"%f + %fi ",A[iter1].x,A[iter1].y);
// }
// fprintf(fptr,"];\n\n");
// cudaFree(gpu_a);
// cudaFree(gpu_b);
// }
//
// {
// float2 A[5] =
// {
// {0.1f,0.1f}, {0.2f,0.2f}, {0.3f,0.3f}, {0.4f,0.4f},
// {0.5f,0.5f}
// };
//
// float2 B[5] =
// {
// {10.0f,5.00f}, {10.1f,5.05f}, {10.2f,5.10f}, {10.3f,5.15f}, {10.4f,5.20f}
// };
//
// float2 *gpu_a, *gpu_b;
//
// cudaMalloc(&gpu_a, sizeof(float2) * 5);
// cudaMalloc(&gpu_b, sizeof(float2) * 5);
//
// cudaMemcpy(gpu_a , A , sizeof(float2) * 5, cudaMemcpyHostToDevice);
// cudaMemcpy(gpu_b , B , sizeof(float2) * 5, cudaMemcpyHostToDevice);
//
// matrixAdd(ff_refocus->cublas_handle, 5, gpu_a, gpu_b);
// cudaMemcpy(A , gpu_a , sizeof(float2) * 5, cudaMemcpyDeviceToHost);
//
// fprintf(fptr,"A_single = [");
// for(ub32 iter1 = 0; iter1 < 5 ; iter1++)
// {
// fprintf(fptr,"%f + %fi ",A[iter1].x,A[iter1].y);
// }
// fprintf(fptr,"];\n\n");
// cudaFree(gpu_a);
// cudaFree(gpu_b);
// }
//
// fclose(fptr);
// END DEBUG
// DEBUG
// FILE *fptr;
//
// fptr = fopen("C:\\Users\\chen.bar\\Documents\\GitHub\\gaussianBeam-field\\mex\\debug_2.txt","w");
// T2* wv_cpu, *wl_cpu;
//
// wv_cpu = (T2*) malloc(sizeof(T2) * total_elements * total_directions);
// wl_cpu = (T2*) malloc(sizeof(T2) * total_elements * total_directions);
//
// cudaMemcpy(wv_cpu , ff_refocus->w_v , sizeof(T2) * total_elements * total_directions, cudaMemcpyDeviceToHost);
// cudaMemcpy(wl_cpu , ff_refocus->w_l , sizeof(T2) * total_elements * total_directions, cudaMemcpyDeviceToHost);
//
// fprintf(fptr,"wv = [");
// for(ub32 iter1 = 0; iter1 < total_elements ; iter1++)
// {
// for(ub32 iter2 = 0; iter2 < total_directions ; iter2++)
// {
// fprintf(fptr,"%e + %ei ",wv_cpu[iter2 + total_directions * iter1].x,wv_cpu[iter2 + total_directions * iter1].y);
// }
// fprintf(fptr,"; \n");
// }
// fprintf(fptr,"];\n\n");
//
// fprintf(fptr,"wl = [");
// for(ub32 iter1 = 0; iter1 < total_elements ; iter1++)
// {
// for(ub32 iter2 = 0; iter2 < total_directions ; iter2++)
// {
// fprintf(fptr,"%e + %ei ",wl_cpu[iter2 + total_directions * iter1].x,wl_cpu[iter2 + total_directions * iter1].y);
// }
// fprintf(fptr,"; \n");
// }
// fprintf(fptr,"];\n\n");
//
//
// free(wv_cpu);
// free(wl_cpu);
//
// fclose(fptr);
// END DEBUG
ub32 blocks_num = (total_directions * total_directions - 1) / THREADS_NUM + 1;
ub32 elementwise_blocks_num = (total_elements * total_directions - 1) / THREADS_NUM + 1;
// build the ff matrix
if(scattering_paramenter == 1)
{
singleScattering_kernel<T,T2,T3,randomScatter><<<blocks_num, THREADS_NUM>>>
(ff_refocus->single_scattering_ff,x0,(const randomScatter*) scatteringStruct,k,
ff_refocus->d_l,ff_refocus->d_v,total_directions);
}
if(scattering_paramenter == 2)
{
singleScattering_kernel<T,T2,T3,tabulatedScatter<T>><<<blocks_num, THREADS_NUM>>>
(ff_refocus->single_scattering_ff,x0,(tabulatedScatter<T>*) scatteringStruct,k,
ff_refocus->d_l,ff_refocus->d_v,total_directions);
}
if(scattering_paramenter == 3)
{
singleScattering_kernel<T,T2,T3,HGscatter<T>><<<blocks_num, THREADS_NUM>>>
(ff_refocus->single_scattering_ff,x0,(HGscatter<T>*) scatteringStruct,k,
ff_refocus->d_l,ff_refocus->d_v,total_directions);
}
// DEBUG
// FILE *fptr;
//
// fptr = fopen("C:\\Users\\chen.bar\\Documents\\GitHub\\gaussianBeam-field\\mex\\debug_2.txt","w");
// T2* ff_mat_cpu;
//
// ff_mat_cpu = (T2*) malloc(sizeof(T2) * total_directions * total_directions);
//
// cudaMemcpy(ff_mat_cpu , ff_refocus->single_scattering_ff , sizeof(T2) * total_directions * total_directions, cudaMemcpyDeviceToHost);
//
// fprintf(fptr,"ff_mat = [");
// for(ub32 iter1 = 0; iter1 < total_directions ; iter1++)
// {
// for(ub32 iter2 = 0; iter2 < total_directions ; iter2++)
// {
// fprintf(fptr,"%e + %ei ",ff_mat_cpu[iter2 + total_directions * iter1].x,ff_mat_cpu[iter2 + total_directions * iter1].y);
// }
// fprintf(fptr,"; \n");
// }
// fprintf(fptr,"];\n\n");
//
// free(ff_mat_cpu);
//
// fclose(fptr);
// END DEBUG
// wv * scattering matrix
matrixMult(ff_refocus->cublas_handle, total_elements, total_directions, total_directions,
ff_refocus->w_v, ff_refocus->single_scattering_ff, ff_refocus->mult_w_v_single_scattering);
// (wv * scattering matrix) .* wl
elementwise_mult<T2><<<elementwise_blocks_num,THREADS_NUM>>>(ff_refocus->mult_w_v_single_scattering, ff_refocus->w_l, total_elements * total_directions);
// sum((wv * scattering matrix) .* wl,2)
matrixMult(ff_refocus->cublas_handle, total_elements, total_directions, 1,
ff_refocus->mult_w_v_single_scattering, ff_refocus->ones_vector, ff_refocus->mult_res);
if(is_correlation)
{
// build the ff matrix
if(scattering_paramenter == 1)
{
singleScattering_kernel<T,T2,T3,randomScatter><<<blocks_num, THREADS_NUM>>>
(ff_refocus->single_scattering_ff,x0,(const randomScatter*) scatteringStruct,k,
ff_refocus->d_l_2,ff_refocus->d_v_2,total_directions);
}
if(scattering_paramenter == 2)
{
singleScattering_kernel<T,T2,T3,tabulatedScatter<T>><<<blocks_num, THREADS_NUM>>>
(ff_refocus->single_scattering_ff,x0,(tabulatedScatter<T>*) scatteringStruct,k,
ff_refocus->d_l_2,ff_refocus->d_v_2,total_directions);
}
if(scattering_paramenter == 3)
{
singleScattering_kernel<T,T2,T3,HGscatter<T>><<<blocks_num, THREADS_NUM>>>
(ff_refocus->single_scattering_ff,x0,(HGscatter<T>*) scatteringStruct,k,
ff_refocus->d_l_2,ff_refocus->d_v_2,total_directions);
}
// wv_2 * scattering matrix
matrixMult(ff_refocus->cublas_handle, total_elements, total_directions, total_directions,
ff_refocus->w_v_2, ff_refocus->single_scattering_ff, ff_refocus->mult_w_v_single_scattering);
// (wv_2 * scattering matrix) .* wl_2
elementwise_mult<T2><<<elementwise_blocks_num,THREADS_NUM>>>(ff_refocus->mult_w_v_single_scattering, ff_refocus->w_l_2, total_elements * total_directions);
// sum((wv * scattering matrix) .* wl_2,2)
matrixMult(ff_refocus->cublas_handle, total_elements, total_directions, 1,
ff_refocus->mult_w_v_single_scattering, ff_refocus->ones_vector, ff_refocus->mult_res_2);
ub32 mult_blocks_num = (total_elements - 1) / THREADS_NUM + 1;
elementwise_conj_mult<T2><<<mult_blocks_num,THREADS_NUM>>>(ff_refocus->mult_res, ff_refocus->mult_res_2, total_elements);
}
matrixAdd(ff_refocus->cublas_handle_device_alpha, total_elements,
constPath, us, ff_refocus->mult_res);
}
template<typename T>
void singleScattering_setConstMem(constStructre<T> *constMem)
{
cudaMemcpyToSymbol(constStr_scat_single, constMem, sizeof(constStructre<T>));
}
template void singleScattering_setConstMem<double>(constStructre<double> *constMem);
template void singleScattering_setConstMem<float>(constStructre<float> *constMem);
template void singleScattering<double,double2,double3>(double2 *us, const double3 *x0,
ff_refocus_struct<double2,double3>* ff_refocus, const double2* constPath,
ub32 is_correlation, ub32 total_elements,
double k, ub32 scattering_paramenter, const void* scatteringStruct);
template void singleScattering<float,float2,float3>(float2 *us, const float3 *x0,
ff_refocus_struct<float2,float3>* ff_refocus, const float2* constPath,
ub32 is_correlation, ub32 total_elements,
float k, ub32 scattering_paramenter, const void* scatteringStruct);
|
gauge_fix_fft.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <quda_internal.h>
#include <quda_matrix.h>
#include <tune_quda.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <launch_kernel.cuh>
#include <unitarization_links.h>
#include <atomic.cuh>
#include <cub_helper.cuh>
#include <index_helper.cuh>
#include <hipfft.h>
#ifdef GPU_GAUGE_ALG
#include <CUFFT_Plans.h>
#endif
namespace quda {
#ifdef GPU_GAUGE_ALG
//Comment if you don't want to use textures for Delta(x) and g(x)
#define GAUGEFIXING_SITE_MATRIX_LOAD_TEX
//UNCOMMENT THIS IF YOU WAN'T TO USE LESS MEMORY
#define GAUGEFIXING_DONT_USE_GX
//Without using the precalculation of g(x),
//we loose some performance, because Delta(x) is written in normal lattice coordinates need for the FFTs
//and the gauge array in even/odd format
#ifdef GAUGEFIXING_DONT_USE_GX
#warning Don't use precalculated g(x)
#else
#warning Using precalculated g(x)
#endif
#ifndef FL_UNITARIZE_PI
#define FL_UNITARIZE_PI 3.14159265358979323846
#endif
texture<float2, 1, hipReadModeElementType> GXTexSingle;
texture<int4, 1, hipReadModeElementType> GXTexDouble;
//Delta is only stored using 12 real number parameters,
// (0,0), (0,1), (0,2), (1,1), (1,2) and (2,2)
// (0,0), (1,1) and (0,1) don't have real part, however we need a complex for the FFTs
texture<float2, 1, hipReadModeElementType> DELTATexSingle;
texture<int4, 1, hipReadModeElementType> DELTATexDouble;
template <class T>
inline __device__ T TEXTURE_GX(int id){
return 0.0;
}
template <>
inline __device__ typename ComplexTypeId<float>::Type TEXTURE_GX<typename ComplexTypeId<float>::Type>(int id){
return tex1Dfetch(GXTexSingle, id);
}
template <>
inline __device__ typename ComplexTypeId<double>::Type TEXTURE_GX<typename ComplexTypeId<double>::Type>(int id){
int4 u = tex1Dfetch(GXTexDouble, id);
return makeComplex(__hiloint2double(u.y, u.x), __hiloint2double(u.w, u.z));
}
template <class T>
inline __device__ T TEXTURE_DELTA(int id){
return 0.0;
}
template <>
inline __device__ typename ComplexTypeId<float>::Type TEXTURE_DELTA<typename ComplexTypeId<float>::Type>(int id){
return tex1Dfetch(DELTATexSingle, id);
}
template <>
inline __device__ typename ComplexTypeId<double>::Type TEXTURE_DELTA<typename ComplexTypeId<double>::Type>(int id){
int4 u = tex1Dfetch(DELTATexDouble, id);
return makeComplex(__hiloint2double(u.y, u.x), __hiloint2double(u.w, u.z));
}
static void BindTex(typename ComplexTypeId<float>::Type *delta, typename ComplexTypeId<float>::Type *gx, size_t bytes){
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
#ifndef GAUGEFIXING_DONT_USE_GX
hipBindTexture(0, GXTexSingle, gx, bytes);
#endif
hipBindTexture(0, DELTATexSingle, delta, bytes);
#endif
}
static void BindTex(typename ComplexTypeId<double>::Type *delta, typename ComplexTypeId<double>::Type *gx, size_t bytes){
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
#ifndef GAUGEFIXING_DONT_USE_GX
hipBindTexture(0, GXTexDouble, gx, bytes);
#endif
hipBindTexture(0, DELTATexDouble, delta, bytes);
#endif
}
static void UnBindTex(typename ComplexTypeId<float>::Type *delta, typename ComplexTypeId<float>::Type *gx){
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
#ifndef GAUGEFIXING_DONT_USE_GX
hipUnbindTexture(GXTexSingle);
#endif
hipUnbindTexture(DELTATexSingle);
#endif
}
static void UnBindTex(typename ComplexTypeId<double>::Type *delta, typename ComplexTypeId<double>::Type *gx){
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
#ifndef GAUGEFIXING_DONT_USE_GX
hipUnbindTexture(GXTexDouble);
#endif
hipUnbindTexture(DELTATexDouble);
#endif
}
template <typename Cmplx>
struct GaugeFixFFTRotateArg {
int threads; // number of active threads required
int X[4]; // grid dimensions
Cmplx *tmp0;
Cmplx *tmp1;
GaugeFixFFTRotateArg(const cudaGaugeField &data){
for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir];
threads = X[0] * X[1] * X[2] * X[3];
tmp0 = 0;
tmp1 = 0;
}
};
template <int direction, typename Cmplx>
__global__ void fft_rotate_kernel_2D2D(GaugeFixFFTRotateArg<Cmplx> arg){ //Cmplx *data_in, Cmplx *data_out){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if ( id >= arg.threads ) return;
if ( direction == 0 ) {
int x3 = id / (arg.X[0] * arg.X[1] * arg.X[2]);
int x2 = (id / (arg.X[0] * arg.X[1])) % arg.X[2];
int x1 = (id / arg.X[0]) % arg.X[1];
int x0 = id % arg.X[0];
int id = x0 + (x1 + (x2 + x3 * arg.X[2]) * arg.X[1]) * arg.X[0];
int id_out = x2 + (x3 + (x0 + x1 * arg.X[0]) * arg.X[3]) * arg.X[2];
arg.tmp1[id_out] = arg.tmp0[id];
//data_out[id_out] = data_in[id];
}
if ( direction == 1 ) {
int x1 = id / (arg.X[2] * arg.X[3] * arg.X[0]);
int x0 = (id / (arg.X[2] * arg.X[3])) % arg.X[0];
int x3 = (id / arg.X[2]) % arg.X[3];
int x2 = id % arg.X[2];
int id = x2 + (x3 + (x0 + x1 * arg.X[0]) * arg.X[3]) * arg.X[2];
int id_out = x0 + (x1 + (x2 + x3 * arg.X[2]) * arg.X[1]) * arg.X[0];
arg.tmp1[id_out] = arg.tmp0[id];
//data_out[id_out] = data_in[id];
}
}
template<typename Cmplx>
class GaugeFixFFTRotate : Tunable {
GaugeFixFFTRotateArg<Cmplx> arg;
unsigned int direction;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
return 0;
}
//bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
GaugeFixFFTRotate(GaugeFixFFTRotateArg<Cmplx> &arg)
: arg(arg) {
direction = 0;
}
~GaugeFixFFTRotate () {
}
void setDirection(unsigned int dir, Cmplx *data_in, Cmplx *data_out){
direction = dir;
arg.tmp0 = data_in;
arg.tmp1 = data_out;
}
void apply(const hipStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if ( direction == 0 )
fft_rotate_kernel_2D2D<0, Cmplx><< < tp.grid, tp.block, 0, stream >> > (arg);
else if ( direction == 1 )
fft_rotate_kernel_2D2D<1, Cmplx><< < tp.grid, tp.block, 0, stream >> > (arg);
else
errorQuda("Error in GaugeFixFFTRotate option.\n");
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Cmplx) / 2);
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const {
return 0;
}
long long bytes() const {
return 2LL * sizeof(Cmplx) * arg.threads;
}
};
template <typename Cmplx, typename Gauge>
struct GaugeFixQualityArg : public ReduceArg<double2> {
int threads; // number of active threads required
int X[4]; // grid dimensions
Gauge dataOr;
Cmplx *delta;
GaugeFixQualityArg(const Gauge &dataOr, const cudaGaugeField &data, Cmplx * delta)
: ReduceArg<double2>(), dataOr(dataOr), delta(delta) {
for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir];
threads = data.VolumeCB();
}
double getAction(){ return result_h[0].x; }
double getTheta(){ return result_h[0].y; }
};
template<int blockSize, unsigned int Elems, typename Float, typename Gauge, int gauge_dir>
__global__ void computeFix_quality(GaugeFixQualityArg<typename ComplexTypeId<Float>::Type, Gauge> argQ){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int parity = threadIdx.y;
double2 data = make_double2(0.0,0.0);
if ( idx < argQ.threads ) {
typedef typename ComplexTypeId<Float>::Type Cmplx;
int x[4];
getCoords(x, idx, argQ.X, parity);
Matrix<Cmplx,3> delta;
setZero(&delta);
//idx = linkIndex(x,X);
for ( int mu = 0; mu < gauge_dir; mu++ ) {
Matrix<Cmplx,3> U;
argQ.dataOr.load((Float*)(U.data),idx, mu, parity);
delta -= U;
}
//18*gauge_dir
data.x = -delta(0,0).x - delta(1,1).x - delta(2,2).x;
//2
for ( int mu = 0; mu < gauge_dir; mu++ ) {
Matrix<Cmplx,3> U;
argQ.dataOr.load((Float*)(U.data),linkIndexM1(x,argQ.X,mu), mu, 1 - parity);
delta += U;
}
//18*gauge_dir
delta -= conj(delta);
//18
//SAVE DELTA!!!!!
SubTraceUnit(delta);
idx = getIndexFull(idx, argQ.X, parity);
//Saving Delta
argQ.delta[idx] = delta(0,0);
argQ.delta[idx + 2 * argQ.threads] = delta(0,1);
argQ.delta[idx + 4 * argQ.threads] = delta(0,2);
argQ.delta[idx + 6 * argQ.threads] = delta(1,1);
argQ.delta[idx + 8 * argQ.threads] = delta(1,2);
argQ.delta[idx + 10 * argQ.threads] = delta(2,2);
//12
data.y = getRealTraceUVdagger(delta, delta);
//35
//T=36*gauge_dir+65
}
reduce2d<blockSize,2>(argQ, data);
}
template<unsigned int Elems, typename Float, typename Gauge, int gauge_dir>
class GaugeFixQuality : TunableLocalParity {
GaugeFixQualityArg<typename ComplexTypeId<Float>::Type, Gauge> argQ;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int minThreads() const { return argQ.threads; }
public:
GaugeFixQuality(GaugeFixQualityArg<typename ComplexTypeId<Float>::Type, Gauge> &argQ)
: argQ(argQ) {
}
~GaugeFixQuality () { }
void apply(const hipStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
argQ.result_h[0] = make_double2(0.0,0.0);
LAUNCH_KERNEL_LOCAL_PARITY(computeFix_quality, tp, stream, argQ, Elems, Float, Gauge, gauge_dir);
hipDeviceSynchronize();
argQ.result_h[0].x /= (double)(3 * gauge_dir * 2 * argQ.threads);
argQ.result_h[0].y /= (double)(3 * 2 * argQ.threads);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << argQ.X[0] << "x" << argQ.X[1] << "x" << argQ.X[2] << "x" << argQ.X[3];
sprintf(aux_string,"threads=%d,prec=%d,gaugedir=%d",argQ.threads, sizeof(Float),gauge_dir);
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const {
return (36LL * gauge_dir + 65LL) * 2 * argQ.threads;
} // Only correct if there is no link reconstruction, no cub reduction accounted also
long long bytes() const {
return (2LL * gauge_dir + 2LL) * Elems * 2 * argQ.threads * sizeof(Float);
} //Not accounting the reduction!!!
};
template <typename Float>
struct GaugeFixArg {
int threads; // number of active threads required
int X[4]; // grid dimensions
cudaGaugeField &data;
Float *invpsq;
typename ComplexTypeId<Float>::Type *delta;
typename ComplexTypeId<Float>::Type *gx;
GaugeFixArg( cudaGaugeField & data, const unsigned int Elems) : data(data){
for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir];
threads = X[0] * X[1] * X[2] * X[3];
invpsq = (Float*)device_malloc(sizeof(Float) * threads);
delta = (typename ComplexTypeId<Float>::Type *)device_malloc(sizeof(typename ComplexTypeId<Float>::Type) * threads * 6);
#ifdef GAUGEFIXING_DONT_USE_GX
gx = (typename ComplexTypeId<Float>::Type *)device_malloc(sizeof(typename ComplexTypeId<Float>::Type) * threads);
#else
gx = (typename ComplexTypeId<Float>::Type *)device_malloc(sizeof(typename ComplexTypeId<Float>::Type) * threads * Elems);
#endif
BindTex(delta, gx, sizeof(typename ComplexTypeId<Float>::Type) * threads * Elems);
}
void free(){
UnBindTex(delta, gx);
device_free(invpsq);
device_free(delta);
device_free(gx);
}
};
template <typename Float>
__global__ void kernel_gauge_set_invpsq(GaugeFixArg<Float> arg){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if ( id >= arg.threads ) return;
int x1 = id / (arg.X[2] * arg.X[3] * arg.X[0]);
int x0 = (id / (arg.X[2] * arg.X[3])) % arg.X[0];
int x3 = (id / arg.X[2]) % arg.X[3];
int x2 = id % arg.X[2];
//id = x2 + (x3 + (x0 + x1 * arg.X[0]) * arg.X[3]) * arg.X[2];
Float sx = sin( (Float)x0 * FL_UNITARIZE_PI / (Float)arg.X[0]);
Float sy = sin( (Float)x1 * FL_UNITARIZE_PI / (Float)arg.X[1]);
Float sz = sin( (Float)x2 * FL_UNITARIZE_PI / (Float)arg.X[2]);
Float st = sin( (Float)x3 * FL_UNITARIZE_PI / (Float)arg.X[3]);
Float sinsq = sx * sx + sy * sy + sz * sz + st * st;
Float prcfact = 0.0;
//The FFT normalization is done here
if ( sinsq > 0.00001 ) prcfact = 4.0 / (sinsq * (Float)arg.threads);
arg.invpsq[id] = prcfact;
}
template<typename Float>
class GaugeFixSETINVPSP : Tunable {
GaugeFixArg<Float> arg;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
return 0;
}
bool tuneSharedBytes() const {
return false;
} // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
GaugeFixSETINVPSP(GaugeFixArg<Float> &arg) : arg(arg) { }
~GaugeFixSETINVPSP () { }
void apply(const hipStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
kernel_gauge_set_invpsq<Float><< < tp.grid, tp.block, 0, stream >> > (arg);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Float));
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const {
return 21 * arg.threads;
}
long long bytes() const {
return sizeof(Float) * arg.threads;
}
};
template<typename Float>
__global__ void kernel_gauge_mult_norm_2D(GaugeFixArg<Float> arg){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if ( id < arg.threads ) arg.gx[id] = arg.gx[id] * arg.invpsq[id];
}
template<typename Float>
class GaugeFixINVPSP : Tunable {
GaugeFixArg<Float> arg;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
return 0;
}
//bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
GaugeFixINVPSP(GaugeFixArg<Float> &arg)
: arg(arg){
hipFuncSetCacheConfig( kernel_gauge_mult_norm_2D<Float>, hipFuncCachePreferL1);
}
~GaugeFixINVPSP () {
}
void apply(const hipStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
kernel_gauge_mult_norm_2D<Float><< < tp.grid, tp.block, 0, stream >> > (arg);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Float));
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
void preTune(){
//since delta contents are irrelevant at this point, we can swap gx with delta
typename ComplexTypeId<Float>::Type *tmp = arg.gx;
arg.gx = arg.delta;
arg.delta = tmp;
}
void postTune(){
arg.gx = arg.delta;
}
long long flops() const {
return 2LL * arg.threads;
}
long long bytes() const {
return 5LL * sizeof(Float) * arg.threads;
}
};
template<typename Cmplx>
__device__ __host__ inline typename RealTypeId<Cmplx>::Type Abs2(const Cmplx & a){
return a.x * a.x + a.y * a.y;
}
template <typename Float>
__host__ __device__ inline void reunit_link( Matrix<typename ComplexTypeId<Float>::Type,3> &U ){
typedef typename ComplexTypeId<Float>::Type Cmplx;
Cmplx t2 = makeComplex((Float)0.0, (Float)0.0);
Float t1 = 0.0;
//first normalize first row
//sum of squares of row
#pragma unroll
for ( int c = 0; c < 3; c++ ) t1 += Abs2(U(0, c));
t1 = (Float)1.0 / sqrt(t1);
//14
//used to normalize row
#pragma unroll
for ( int c = 0; c < 3; c++ ) U(0,c) *= t1;
//6
#pragma unroll
for ( int c = 0; c < 3; c++ ) t2 += Conj(U(0,c)) * U(1,c);
//24
#pragma unroll
for ( int c = 0; c < 3; c++ ) U(1,c) -= t2 * U(0,c);
//24
//normalize second row
//sum of squares of row
t1 = 0.0;
#pragma unroll
for ( int c = 0; c < 3; c++ ) t1 += Abs2(U(1,c));
t1 = (Float)1.0 / sqrt(t1);
//14
//used to normalize row
#pragma unroll
for ( int c = 0; c < 3; c++ ) U(1, c) *= t1;
//6
//Reconstruct lat row
U(2,0) = Conj(U(0,1) * U(1,2) - U(0,2) * U(1,1));
U(2,1) = Conj(U(0,2) * U(1,0) - U(0,0) * U(1,2));
U(2,2) = Conj(U(0,0) * U(1,1) - U(0,1) * U(1,0));
//42
//T=130
}
#ifdef GAUGEFIXING_DONT_USE_GX
template <typename Float, typename Gauge>
__global__ void kernel_gauge_fix_U_EO_NEW( GaugeFixArg<Float> arg, Gauge dataOr, Float half_alpha){
int id = threadIdx.x + blockIdx.x * blockDim.x;
int parity = threadIdx.y;
if ( id >= arg.threads/2 ) return;
typedef typename ComplexTypeId<Float>::Type Cmplx;
int x[4];
getCoords(x, id, arg.X, parity);
int idx = ((x[3] * arg.X[2] + x[2]) * arg.X[1] + x[1]) * arg.X[0] + x[0];
Matrix<Cmplx,3> de;
//Read Delta
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
de(0,0) = TEXTURE_DELTA<Cmplx>(idx + 0 * arg.threads);
de(0,1) = TEXTURE_DELTA<Cmplx>(idx + 1 * arg.threads);
de(0,2) = TEXTURE_DELTA<Cmplx>(idx + 2 * arg.threads);
de(1,1) = TEXTURE_DELTA<Cmplx>(idx + 3 * arg.threads);
de(1,2) = TEXTURE_DELTA<Cmplx>(idx + 4 * arg.threads);
de(2,2) = TEXTURE_DELTA<Cmplx>(idx + 5 * arg.threads);
#else
de(0,0) = arg.delta[idx + 0 * arg.threads];
de(0,1) = arg.delta[idx + 1 * arg.threads];
de(0,2) = arg.delta[idx + 2 * arg.threads];
de(1,1) = arg.delta[idx + 3 * arg.threads];
de(1,2) = arg.delta[idx + 4 * arg.threads];
de(2,2) = arg.delta[idx + 5 * arg.threads];
#endif
de(1,0) = makeComplex(-de(0,1).x, de(0,1).y);
de(2,0) = makeComplex(-de(0,2).x, de(0,2).y);
de(2,1) = makeComplex(-de(1,2).x, de(1,2).y);
Matrix<Cmplx,3> g;
setIdentity(&g);
g += de * half_alpha;
//36
reunit_link<Float>( g );
//130
for ( int mu = 0; mu < 4; mu++ ) {
Matrix<Cmplx,3> U;
Matrix<Cmplx,3> g0;
dataOr.load((Float*)(U.data),id, mu, parity);
U = g * U;
//198
idx = linkNormalIndexP1(x,arg.X,mu);
//Read Delta
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
de(0,0) = TEXTURE_DELTA<Cmplx>(idx + 0 * arg.threads);
de(0,1) = TEXTURE_DELTA<Cmplx>(idx + 1 * arg.threads);
de(0,2) = TEXTURE_DELTA<Cmplx>(idx + 2 * arg.threads);
de(1,1) = TEXTURE_DELTA<Cmplx>(idx + 3 * arg.threads);
de(1,2) = TEXTURE_DELTA<Cmplx>(idx + 4 * arg.threads);
de(2,2) = TEXTURE_DELTA<Cmplx>(idx + 5 * arg.threads);
#else
de(0,0) = arg.delta[idx + 0 * arg.threads];
de(0,1) = arg.delta[idx + 1 * arg.threads];
de(0,2) = arg.delta[idx + 2 * arg.threads];
de(1,1) = arg.delta[idx + 3 * arg.threads];
de(1,2) = arg.delta[idx + 4 * arg.threads];
de(2,2) = arg.delta[idx + 5 * arg.threads];
#endif
de(1,0) = makeComplex(-de(0,1).x, de(0,1).y);
de(2,0) = makeComplex(-de(0,2).x, de(0,2).y);
de(2,1) = makeComplex(-de(1,2).x, de(1,2).y);
setIdentity(&g0);
g0 += de * half_alpha;
//36
reunit_link<Float>( g0 );
//130
U = U * conj(g0);
//198
dataOr.save((Float*)(U.data),id, mu, parity);
}
}
template<typename Float, typename Gauge>
class GaugeFixNEW : TunableLocalParity {
GaugeFixArg<Float> arg;
Float half_alpha;
Gauge dataOr;
mutable char aux_string[128]; // used as a label in the autotuner
private:
// since GaugeFixArg is used by other kernels that don't use
// tunableLocalParity, arg.threads stores Volume and not VolumeCB
// so we need to divide by two
unsigned int minThreads() const { return arg.threads/2; }
public:
GaugeFixNEW(Gauge & dataOr, GaugeFixArg<Float> &arg, Float alpha)
: dataOr(dataOr), arg(arg) {
half_alpha = alpha * 0.5;
hipFuncSetCacheConfig( kernel_gauge_fix_U_EO_NEW<Float, Gauge>, hipFuncCachePreferL1);
}
~GaugeFixNEW () { }
void setAlpha(Float alpha){ half_alpha = alpha * 0.5; }
void apply(const hipStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
kernel_gauge_fix_U_EO_NEW<Float, Gauge><< < tp.grid, tp.block, 0, stream >> > (arg, dataOr, half_alpha);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x" << arg.X[1] << "x" << arg.X[2] << "x" << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Float));
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
//need this
void preTune() {
arg.data.backup();
}
void postTune() {
arg.data.restore();
}
long long flops() const {
return 2414LL * arg.threads;
//Not accounting here the reconstruction of the gauge if 12 or 8!!!!!!
}
long long bytes() const {
return ( dataOr.Bytes() * 4LL + 5 * 12LL * sizeof(Float)) * arg.threads;
}
};
#else
template <unsigned int Elems, typename Float>
__global__ void kernel_gauge_GX(GaugeFixArg<Float> arg, Float half_alpha){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if ( id >= arg.threads ) return;
typedef typename ComplexTypeId<Float>::Type Cmplx;
Matrix<Cmplx,3> de;
//Read Delta
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
de(0,0) = TEXTURE_DELTA<Cmplx>(id);
de(0,1) = TEXTURE_DELTA<Cmplx>(id + arg.threads);
de(0,2) = TEXTURE_DELTA<Cmplx>(id + 2 * arg.threads);
de(1,1) = TEXTURE_DELTA<Cmplx>(id + 3 * arg.threads);
de(1,2) = TEXTURE_DELTA<Cmplx>(id + 4 * arg.threads);
de(2,2) = TEXTURE_DELTA<Cmplx>(id + 5 * arg.threads);
#else
de(0,0) = arg.delta[id];
de(0,1) = arg.delta[id + arg.threads];
de(0,2) = arg.delta[id + 2 * arg.threads];
de(1,1) = arg.delta[id + 3 * arg.threads];
de(1,2) = arg.delta[id + 4 * arg.threads];
de(2,2) = arg.delta[id + 5 * arg.threads];
#endif
de(1,0) = makeComplex(-de(0,1).x, de(0,1).y);
de(2,0) = makeComplex(-de(0,2).x, de(0,2).y);
de(2,1) = makeComplex(-de(1,2).x, de(1,2).y);
Matrix<Cmplx,3> g;
setIdentity(&g);
g += de * half_alpha;
//36
reunit_link<Float>( g );
//130
//gx is represented in even/odd order
//normal lattice index to even/odd index
int x3 = id / (arg.X[0] * arg.X[1] * arg.X[2]);
int x2 = (id / (arg.X[0] * arg.X[1])) % arg.X[2];
int x1 = (id / arg.X[0]) % arg.X[1];
int x0 = id % arg.X[0];
id = (x0 + (x1 + (x2 + x3 * arg.X[2]) * arg.X[1]) * arg.X[0]) >> 1;
id += ((x0 + x1 + x2 + x3) & 1 ) * arg.threads / 2;
for ( int i = 0; i < Elems; i++ ) arg.gx[id + i * arg.threads] = g.data[i];
//T=166 for Elems 9
//T=208 for Elems 6
}
template<unsigned int Elems, typename Float>
class GaugeFix_GX : Tunable {
GaugeFixArg<Float> arg;
Float half_alpha;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
return 0;
}
//bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
GaugeFix_GX(GaugeFixArg<Float> &arg, Float alpha)
: arg(arg) {
half_alpha = alpha * 0.5;
hipFuncSetCacheConfig( kernel_gauge_GX<Elems, Float>, hipFuncCachePreferL1);
}
~GaugeFix_GX () {
}
void setAlpha(Float alpha){
half_alpha = alpha * 0.5;
}
void apply(const hipStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
kernel_gauge_GX<Elems, Float><< < tp.grid, tp.block, 0, stream >> > (arg, half_alpha);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Float));
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const {
if ( Elems == 6 ) return 208LL * arg.threads;
else return 166LL * arg.threads;
}
long long bytes() const {
return 4LL * Elems * sizeof(Float) * arg.threads;
}
};
template <unsigned int Elems, typename Float, typename Gauge>
__global__ void kernel_gauge_fix_U_EO( GaugeFixArg<Float> arg, Gauge dataOr){
int idd = threadIdx.x + blockIdx.x * blockDim.x;
if ( idd >= arg.threads ) return;
int parity = 0;
int id = idd;
if ( idd >= arg.threads / 2 ) {
parity = 1;
id -= arg.threads / 2;
}
typedef typename ComplexTypeId<Float>::Type Cmplx;
Matrix<Cmplx,3> g;
//for(int i = 0; i < Elems; i++) g.data[i] = arg.gx[idd + i * arg.threads];
for ( int i = 0; i < Elems; i++ ) {
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
g.data[i] = TEXTURE_GX<Cmplx>(idd + i * arg.threads);
#else
g.data[i] = arg.gx[idd + i * arg.threads];
#endif
}
if ( Elems == 6 ) {
g(2,0) = Conj(g(0,1) * g(1,2) - g(0,2) * g(1,1));
g(2,1) = Conj(g(0,2) * g(1,0) - g(0,0) * g(1,2));
g(2,2) = Conj(g(0,0) * g(1,1) - g(0,1) * g(1,0));
//42
}
int x[4];
getCoords(x, id, arg.X, parity);
for ( int mu = 0; mu < 4; mu++ ) {
Matrix<Cmplx,3> U;
Matrix<Cmplx,3> g0;
dataOr.load((Float*)(U.data),id, mu, parity);
U = g * U;
//198
int idm1 = linkIndexP1(x,arg.X,mu);
idm1 += (1 - parity) * arg.threads / 2;
//for(int i = 0; i < Elems; i++) g0.data[i] = arg.gx[idm1 + i * arg.threads];
for ( int i = 0; i < Elems; i++ ) {
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
g0.data[i] = TEXTURE_GX<Cmplx>(idm1 + i * arg.threads);
#else
g0.data[i] = arg.gx[idm1 + i * arg.threads];
#endif
}
if ( Elems == 6 ) {
g0(2,0) = Conj(g0(0,1) * g0(1,2) - g0(0,2) * g0(1,1));
g0(2,1) = Conj(g0(0,2) * g0(1,0) - g0(0,0) * g0(1,2));
g0(2,2) = Conj(g0(0,0) * g0(1,1) - g0(0,1) * g0(1,0));
//42
}
U = U * conj(g0);
//198
dataOr.save((Float*)(U.data),id, mu, parity);
}
//T=42+4*(198*2+42) Elems=6
//T=4*(198*2) Elems=9
//Not accounting here the reconstruction of the gauge if 12 or 8!!!!!!
}
template<unsigned int Elems, typename Float, typename Gauge>
class GaugeFix : Tunable {
GaugeFixArg<Float> arg;
Gauge dataOr;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
return 0;
}
//bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
GaugeFix(Gauge & dataOr, GaugeFixArg<Float> &arg)
: dataOr(dataOr), arg(arg) {
hipFuncSetCacheConfig( kernel_gauge_fix_U_EO<Elems, Float, Gauge>, hipFuncCachePreferL1);
}
~GaugeFix () { }
void apply(const hipStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
kernel_gauge_fix_U_EO<Elems, Float, Gauge><< < tp.grid, tp.block, 0, stream >> > (arg, dataOr);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Float));
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
//need this
void preTune() {
arg.data.backup();
}
void postTune() {
arg.data.restore();
}
long long flops() const {
if ( Elems == 6 ) return 1794LL * arg.threads;
else return 1536LL * arg.threads;
//Not accounting here the reconstruction of the gauge if 12 or 8!!!!!!
}
long long bytes() const {
return 26LL * Elems * sizeof(Float) * arg.threads;
}
};
#endif
//GAUGEFIXING_DONT_USE_GX
template<unsigned int Elems, typename Float, typename Gauge, int gauge_dir>
void gaugefixingFFT( Gauge dataOr, cudaGaugeField& data, \
const unsigned int Nsteps, const unsigned int verbose_interval, \
const Float alpha0, const unsigned int autotune, const double tolerance, \
const unsigned int stopWtheta) {
TimeProfile profileInternalGaugeFixFFT("InternalGaugeFixQudaFFT", false);
profileInternalGaugeFixFFT.TPSTART(QUDA_PROFILE_COMPUTE);
Float alpha = alpha0;
std::cout << "\tAlpha parameter of the Steepest Descent Method: " << alpha << std::endl;
if ( autotune ) std::cout << "\tAuto tune active: yes" << std::endl;
else std::cout << "\tAuto tune active: no" << std::endl;
std::cout << "\tStop criterium: " << tolerance << std::endl;
if ( stopWtheta ) std::cout << "\tStop criterium method: theta" << std::endl;
else std::cout << "\tStop criterium method: Delta" << std::endl;
std::cout << "\tMaximum number of iterations: " << Nsteps << std::endl;
std::cout << "\tPrint convergence results at every " << verbose_interval << " steps" << std::endl;
typedef typename ComplexTypeId<Float>::Type Cmplx;
unsigned int delta_pad = data.X()[0] * data.X()[1] * data.X()[2] * data.X()[3];
int4 size = make_int4( data.X()[0], data.X()[1], data.X()[2], data.X()[3] );
hipfftHandle plan_xy;
hipfftHandle plan_zt;
GaugeFixArg<Float> arg(data, Elems);
SetPlanFFT2DMany( plan_zt, size, 0, arg.delta); //for space and time ZT
SetPlanFFT2DMany( plan_xy, size, 1, arg.delta); //with space only XY
GaugeFixFFTRotateArg<Cmplx> arg_rotate(data);
GaugeFixFFTRotate<Cmplx> GFRotate(arg_rotate);
GaugeFixSETINVPSP<Float> setinvpsp(arg);
setinvpsp.apply(0);
GaugeFixINVPSP<Float> invpsp(arg);
#ifdef GAUGEFIXING_DONT_USE_GX
//without using GX, gx will be created only for plane rotation but with less size
GaugeFixNEW<Float, Gauge> gfixNew(dataOr, arg, alpha);
#else
//using GX
GaugeFix_GX<Elems, Float> calcGX(arg, alpha);
GaugeFix<Elems, Float, Gauge> gfix(dataOr, arg);
#endif
GaugeFixQualityArg<Cmplx, Gauge> argQ(dataOr, data, arg.delta);
GaugeFixQuality<Elems, Float, Gauge, gauge_dir> gfixquality(argQ);
gfixquality.apply(0);
double action0 = argQ.getAction();
printf("Step: %d\tAction: %.16e\ttheta: %.16e\n", 0, argQ.getAction(), argQ.getTheta());
double diff = 0.0;
int iter = 0;
for ( iter = 0; iter < Nsteps; iter++ ) {
for ( int k = 0; k < 6; k++ ) {
//------------------------------------------------------------------------
// Set a pointer do the element k in lattice volume
// each element is stored with stride lattice volume
// it uses gx as temporary array!!!!!!
//------------------------------------------------------------------------
Cmplx *_array = arg.delta + k * delta_pad;
////// 2D FFT + 2D FFT
//------------------------------------------------------------------------
// Perform FFT on xy plane
//------------------------------------------------------------------------
ApplyFFT(plan_xy, _array, arg.gx, HIPFFT_FORWARD);
//------------------------------------------------------------------------
// Rotate hypercube, xyzt -> ztxy
//------------------------------------------------------------------------
GFRotate.setDirection(0, arg.gx, _array);
GFRotate.apply(0);
//------------------------------------------------------------------------
// Perform FFT on zt plane
//------------------------------------------------------------------------
ApplyFFT(plan_zt, _array, arg.gx, HIPFFT_FORWARD);
//------------------------------------------------------------------------
// Normalize FFT and apply pmax^2/p^2
//------------------------------------------------------------------------
invpsp.apply(0);
//------------------------------------------------------------------------
// Perform IFFT on zt plane
//------------------------------------------------------------------------
ApplyFFT(plan_zt, arg.gx, _array, HIPFFT_BACKWARD);
//------------------------------------------------------------------------
// Rotate hypercube, ztxy -> xyzt
//------------------------------------------------------------------------
GFRotate.setDirection(1, _array, arg.gx);
GFRotate.apply(0);
//------------------------------------------------------------------------
// Perform IFFT on xy plane
//------------------------------------------------------------------------
ApplyFFT(plan_xy, arg.gx, _array, HIPFFT_BACKWARD);
}
#ifdef GAUGEFIXING_DONT_USE_GX
//------------------------------------------------------------------------
// Apply gauge fix to current gauge field
//------------------------------------------------------------------------
gfixNew.apply(0);
#else
//------------------------------------------------------------------------
// Calculate g(x)
//------------------------------------------------------------------------
calcGX.apply(0);
//------------------------------------------------------------------------
// Apply gauge fix to current gauge field
//------------------------------------------------------------------------
gfix.apply(0);
#endif
//------------------------------------------------------------------------
// Measure gauge quality and recalculate new Delta(x)
//------------------------------------------------------------------------
gfixquality.apply(0);
double action = argQ.getAction();
diff = abs(action0 - action);
if ((iter % verbose_interval) == (verbose_interval - 1))
printf("Step: %d\tAction: %.16e\ttheta: %.16e\tDelta: %.16e\n", iter + 1, argQ.getAction(), argQ.getTheta(), diff);
if ( autotune && ((action - action0) < -1e-14) ) {
if ( alpha > 0.01 ) {
alpha = 0.95 * alpha;
#ifdef GAUGEFIXING_DONT_USE_GX
gfixNew.setAlpha(alpha);
#else
calcGX.setAlpha(alpha);
#endif
printf(">>>>>>>>>>>>>> Warning: changing alpha down -> %.4e\n", alpha );
}
}
//------------------------------------------------------------------------
// Check gauge fix quality criterium
//------------------------------------------------------------------------
if ( stopWtheta ) { if ( argQ.getTheta() < tolerance ) break; }
else { if ( diff < tolerance ) break; }
action0 = action;
}
if ((iter % verbose_interval) != 0 )
printf("Step: %d\tAction: %.16e\ttheta: %.16e\tDelta: %.16e\n", iter, argQ.getAction(), argQ.getTheta(), diff);
// Reunitarize at end
const double unitarize_eps = 1e-14;
const double max_error = 1e-10;
const int reunit_allow_svd = 1;
const int reunit_svd_only = 0;
const double svd_rel_error = 1e-6;
const double svd_abs_error = 1e-6;
setUnitarizeLinksConstants(unitarize_eps, max_error,
reunit_allow_svd, reunit_svd_only,
svd_rel_error, svd_abs_error);
int num_failures = 0;
int* num_failures_dev;
hipMalloc((void**)&num_failures_dev, sizeof(int));
hipMemset(num_failures_dev, 0, sizeof(int));
if ( num_failures_dev == NULL ) errorQuda("hipMalloc failed for dev_pointer\n");
unitarizeLinksQuda(data, data, num_failures_dev);
hipMemcpy(&num_failures, num_failures_dev, sizeof(int), hipMemcpyDeviceToHost);
if ( num_failures > 0 ) {
hipFree(num_failures_dev);
errorQuda("Error in the unitarization\n");
exit(1);
}
hipFree(num_failures_dev);
// end reunitarize
arg.free();
CUFFT_SAFE_CALL(hipfftDestroy(plan_zt));
CUFFT_SAFE_CALL(hipfftDestroy(plan_xy));
checkCudaError();
hipDeviceSynchronize();
profileInternalGaugeFixFFT.TPSTOP(QUDA_PROFILE_COMPUTE);
if (getVerbosity() > QUDA_SUMMARIZE){
double secs = profileInternalGaugeFixFFT.Last(QUDA_PROFILE_COMPUTE);
double fftflop = 5.0 * (log2((double)( data.X()[0] * data.X()[1]) ) + log2( (double)(data.X()[2] * data.X()[3] )));
fftflop *= (double)( data.X()[0] * data.X()[1] * data.X()[2] * data.X()[3] );
double gflops = setinvpsp.flops() + gfixquality.flops();
double gbytes = setinvpsp.bytes() + gfixquality.bytes();
double flop = invpsp.flops() * Elems;
double byte = invpsp.bytes() * Elems;
flop += (GFRotate.flops() + fftflop) * Elems * 2;
byte += GFRotate.bytes() * Elems * 4; //includes FFT reads, assuming 1 read and 1 write per site
#ifdef GAUGEFIXING_DONT_USE_GX
flop += gfixNew.flops();
byte += gfixNew.bytes();
#else
flop += calcGX.flops();
byte += calcGX.bytes();
flop += gfix.flops();
byte += gfix.bytes();
#endif
flop += gfixquality.flops();
byte += gfixquality.bytes();
gflops += flop * iter;
gbytes += byte * iter;
gflops += 4588.0 * data.X()[0]*data.X()[1]*data.X()[2]*data.X()[3]; //Reunitarize at end
gbytes += 8.0 * data.X()[0]*data.X()[1]*data.X()[2]*data.X()[3] * dataOr.Bytes() ; //Reunitarize at end
gflops = (gflops * 1e-9) / (secs);
gbytes = gbytes / (secs * 1e9);
printfQuda("Time: %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops, gbytes);
}
}
template<unsigned int Elems, typename Float, typename Gauge>
void gaugefixingFFT( Gauge dataOr, cudaGaugeField& data, const unsigned int gauge_dir, \
const unsigned int Nsteps, const unsigned int verbose_interval, const Float alpha, const unsigned int autotune, \
const double tolerance, const unsigned int stopWtheta) {
if ( gauge_dir != 3 ) {
printf("Starting Landau gauge fixing with FFTs...\n");
gaugefixingFFT<Elems, Float, Gauge, 4>(dataOr, data, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta);
}
else {
printf("Starting Coulomb gauge fixing with FFTs...\n");
gaugefixingFFT<Elems, Float, Gauge, 3>(dataOr, data, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta);
}
}
template<typename Float>
void gaugefixingFFT( cudaGaugeField& data, const unsigned int gauge_dir, \
const unsigned int Nsteps, const unsigned int verbose_interval, const Float alpha, const unsigned int autotune, \
const double tolerance, const unsigned int stopWtheta) {
// Switching to FloatNOrder for the gauge field in order to support RECONSTRUCT_12
// Need to fix this!!
//9 and 6 means the number of complex elements used to store g(x) and Delta(x)
if ( data.isNative() ) {
if ( data.Reconstruct() == QUDA_RECONSTRUCT_NO ) {
//printfQuda("QUDA_RECONSTRUCT_NO\n");
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type Gauge;
gaugefixingFFT<9, Float>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta);
} else if ( data.Reconstruct() == QUDA_RECONSTRUCT_12 ) {
//printfQuda("QUDA_RECONSTRUCT_12\n");
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type Gauge;
gaugefixingFFT<6, Float>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta);
} else if ( data.Reconstruct() == QUDA_RECONSTRUCT_8 ) {
//printfQuda("QUDA_RECONSTRUCT_8\n");
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type Gauge;
gaugefixingFFT<6, Float>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta);
} else {
errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct());
}
} else {
errorQuda("Invalid Gauge Order\n");
}
}
#endif // GPU_GAUGE_ALG
/**
* @brief Gauge fixing with Steepest descent method with FFTs with support for single GPU only.
* @param[in,out] data, quda gauge field
* @param[in] gauge_dir, 3 for Coulomb gauge fixing, other for Landau gauge fixing
* @param[in] Nsteps, maximum number of steps to perform gauge fixing
* @param[in] verbose_interval, print gauge fixing info when iteration count is a multiple of this
* @param[in] alpha, gauge fixing parameter of the method, most common value is 0.08
* @param[in] autotune, 1 to autotune the method, i.e., if the Fg inverts its tendency we decrease the alpha value
* @param[in] tolerance, torelance value to stop the method, if this value is zero then the method stops when iteration reachs the maximum number of steps defined by Nsteps
* @param[in] stopWtheta, 0 for MILC criterium and 1 to use the theta value
*/
void gaugefixingFFT( cudaGaugeField& data, const unsigned int gauge_dir, \
const unsigned int Nsteps, const unsigned int verbose_interval, const double alpha, const unsigned int autotune, \
const double tolerance, const unsigned int stopWtheta) {
#ifdef GPU_GAUGE_ALG
#ifdef MULTI_GPU
if(comm_dim_partitioned(0) || comm_dim_partitioned(1) || comm_dim_partitioned(2) || comm_dim_partitioned(3))
errorQuda("Gauge Fixing with FFTs in multi-GPU support NOT implemented yet!\n");
#endif
if ( data.Precision() == QUDA_HALF_PRECISION ) {
errorQuda("Half precision not supported\n");
}
if ( data.Precision() == QUDA_SINGLE_PRECISION ) {
gaugefixingFFT<float> (data, gauge_dir, Nsteps, verbose_interval, (float)alpha, autotune, tolerance, stopWtheta);
} else if ( data.Precision() == QUDA_DOUBLE_PRECISION ) {
gaugefixingFFT<double>(data, gauge_dir, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta);
} else {
errorQuda("Precision %d not supported", data.Precision());
}
#else
errorQuda("Gauge fixing has bot been built");
#endif
}
}
| gauge_fix_fft.cu | #include <quda_internal.h>
#include <quda_matrix.h>
#include <tune_quda.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <launch_kernel.cuh>
#include <unitarization_links.h>
#include <atomic.cuh>
#include <cub_helper.cuh>
#include <index_helper.cuh>
#include <cufft.h>
#ifdef GPU_GAUGE_ALG
#include <CUFFT_Plans.h>
#endif
namespace quda {
#ifdef GPU_GAUGE_ALG
//Comment if you don't want to use textures for Delta(x) and g(x)
#define GAUGEFIXING_SITE_MATRIX_LOAD_TEX
//UNCOMMENT THIS IF YOU WAN'T TO USE LESS MEMORY
#define GAUGEFIXING_DONT_USE_GX
//Without using the precalculation of g(x),
//we loose some performance, because Delta(x) is written in normal lattice coordinates need for the FFTs
//and the gauge array in even/odd format
#ifdef GAUGEFIXING_DONT_USE_GX
#warning Don't use precalculated g(x)
#else
#warning Using precalculated g(x)
#endif
#ifndef FL_UNITARIZE_PI
#define FL_UNITARIZE_PI 3.14159265358979323846
#endif
texture<float2, 1, cudaReadModeElementType> GXTexSingle;
texture<int4, 1, cudaReadModeElementType> GXTexDouble;
//Delta is only stored using 12 real number parameters,
// (0,0), (0,1), (0,2), (1,1), (1,2) and (2,2)
// (0,0), (1,1) and (0,1) don't have real part, however we need a complex for the FFTs
texture<float2, 1, cudaReadModeElementType> DELTATexSingle;
texture<int4, 1, cudaReadModeElementType> DELTATexDouble;
template <class T>
inline __device__ T TEXTURE_GX(int id){
return 0.0;
}
template <>
inline __device__ typename ComplexTypeId<float>::Type TEXTURE_GX<typename ComplexTypeId<float>::Type>(int id){
return tex1Dfetch(GXTexSingle, id);
}
template <>
inline __device__ typename ComplexTypeId<double>::Type TEXTURE_GX<typename ComplexTypeId<double>::Type>(int id){
int4 u = tex1Dfetch(GXTexDouble, id);
return makeComplex(__hiloint2double(u.y, u.x), __hiloint2double(u.w, u.z));
}
template <class T>
inline __device__ T TEXTURE_DELTA(int id){
return 0.0;
}
template <>
inline __device__ typename ComplexTypeId<float>::Type TEXTURE_DELTA<typename ComplexTypeId<float>::Type>(int id){
return tex1Dfetch(DELTATexSingle, id);
}
template <>
inline __device__ typename ComplexTypeId<double>::Type TEXTURE_DELTA<typename ComplexTypeId<double>::Type>(int id){
int4 u = tex1Dfetch(DELTATexDouble, id);
return makeComplex(__hiloint2double(u.y, u.x), __hiloint2double(u.w, u.z));
}
static void BindTex(typename ComplexTypeId<float>::Type *delta, typename ComplexTypeId<float>::Type *gx, size_t bytes){
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
#ifndef GAUGEFIXING_DONT_USE_GX
cudaBindTexture(0, GXTexSingle, gx, bytes);
#endif
cudaBindTexture(0, DELTATexSingle, delta, bytes);
#endif
}
static void BindTex(typename ComplexTypeId<double>::Type *delta, typename ComplexTypeId<double>::Type *gx, size_t bytes){
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
#ifndef GAUGEFIXING_DONT_USE_GX
cudaBindTexture(0, GXTexDouble, gx, bytes);
#endif
cudaBindTexture(0, DELTATexDouble, delta, bytes);
#endif
}
static void UnBindTex(typename ComplexTypeId<float>::Type *delta, typename ComplexTypeId<float>::Type *gx){
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
#ifndef GAUGEFIXING_DONT_USE_GX
cudaUnbindTexture(GXTexSingle);
#endif
cudaUnbindTexture(DELTATexSingle);
#endif
}
static void UnBindTex(typename ComplexTypeId<double>::Type *delta, typename ComplexTypeId<double>::Type *gx){
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
#ifndef GAUGEFIXING_DONT_USE_GX
cudaUnbindTexture(GXTexDouble);
#endif
cudaUnbindTexture(DELTATexDouble);
#endif
}
template <typename Cmplx>
struct GaugeFixFFTRotateArg {
int threads; // number of active threads required
int X[4]; // grid dimensions
Cmplx *tmp0;
Cmplx *tmp1;
GaugeFixFFTRotateArg(const cudaGaugeField &data){
for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir];
threads = X[0] * X[1] * X[2] * X[3];
tmp0 = 0;
tmp1 = 0;
}
};
template <int direction, typename Cmplx>
__global__ void fft_rotate_kernel_2D2D(GaugeFixFFTRotateArg<Cmplx> arg){ //Cmplx *data_in, Cmplx *data_out){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if ( id >= arg.threads ) return;
if ( direction == 0 ) {
int x3 = id / (arg.X[0] * arg.X[1] * arg.X[2]);
int x2 = (id / (arg.X[0] * arg.X[1])) % arg.X[2];
int x1 = (id / arg.X[0]) % arg.X[1];
int x0 = id % arg.X[0];
int id = x0 + (x1 + (x2 + x3 * arg.X[2]) * arg.X[1]) * arg.X[0];
int id_out = x2 + (x3 + (x0 + x1 * arg.X[0]) * arg.X[3]) * arg.X[2];
arg.tmp1[id_out] = arg.tmp0[id];
//data_out[id_out] = data_in[id];
}
if ( direction == 1 ) {
int x1 = id / (arg.X[2] * arg.X[3] * arg.X[0]);
int x0 = (id / (arg.X[2] * arg.X[3])) % arg.X[0];
int x3 = (id / arg.X[2]) % arg.X[3];
int x2 = id % arg.X[2];
int id = x2 + (x3 + (x0 + x1 * arg.X[0]) * arg.X[3]) * arg.X[2];
int id_out = x0 + (x1 + (x2 + x3 * arg.X[2]) * arg.X[1]) * arg.X[0];
arg.tmp1[id_out] = arg.tmp0[id];
//data_out[id_out] = data_in[id];
}
}
template<typename Cmplx>
class GaugeFixFFTRotate : Tunable {
GaugeFixFFTRotateArg<Cmplx> arg;
unsigned int direction;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
return 0;
}
//bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
GaugeFixFFTRotate(GaugeFixFFTRotateArg<Cmplx> &arg)
: arg(arg) {
direction = 0;
}
~GaugeFixFFTRotate () {
}
void setDirection(unsigned int dir, Cmplx *data_in, Cmplx *data_out){
direction = dir;
arg.tmp0 = data_in;
arg.tmp1 = data_out;
}
void apply(const cudaStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if ( direction == 0 )
fft_rotate_kernel_2D2D<0, Cmplx><< < tp.grid, tp.block, 0, stream >> > (arg);
else if ( direction == 1 )
fft_rotate_kernel_2D2D<1, Cmplx><< < tp.grid, tp.block, 0, stream >> > (arg);
else
errorQuda("Error in GaugeFixFFTRotate option.\n");
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Cmplx) / 2);
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const {
return 0;
}
long long bytes() const {
return 2LL * sizeof(Cmplx) * arg.threads;
}
};
template <typename Cmplx, typename Gauge>
struct GaugeFixQualityArg : public ReduceArg<double2> {
int threads; // number of active threads required
int X[4]; // grid dimensions
Gauge dataOr;
Cmplx *delta;
GaugeFixQualityArg(const Gauge &dataOr, const cudaGaugeField &data, Cmplx * delta)
: ReduceArg<double2>(), dataOr(dataOr), delta(delta) {
for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir];
threads = data.VolumeCB();
}
double getAction(){ return result_h[0].x; }
double getTheta(){ return result_h[0].y; }
};
template<int blockSize, unsigned int Elems, typename Float, typename Gauge, int gauge_dir>
__global__ void computeFix_quality(GaugeFixQualityArg<typename ComplexTypeId<Float>::Type, Gauge> argQ){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int parity = threadIdx.y;
double2 data = make_double2(0.0,0.0);
if ( idx < argQ.threads ) {
typedef typename ComplexTypeId<Float>::Type Cmplx;
int x[4];
getCoords(x, idx, argQ.X, parity);
Matrix<Cmplx,3> delta;
setZero(&delta);
//idx = linkIndex(x,X);
for ( int mu = 0; mu < gauge_dir; mu++ ) {
Matrix<Cmplx,3> U;
argQ.dataOr.load((Float*)(U.data),idx, mu, parity);
delta -= U;
}
//18*gauge_dir
data.x = -delta(0,0).x - delta(1,1).x - delta(2,2).x;
//2
for ( int mu = 0; mu < gauge_dir; mu++ ) {
Matrix<Cmplx,3> U;
argQ.dataOr.load((Float*)(U.data),linkIndexM1(x,argQ.X,mu), mu, 1 - parity);
delta += U;
}
//18*gauge_dir
delta -= conj(delta);
//18
//SAVE DELTA!!!!!
SubTraceUnit(delta);
idx = getIndexFull(idx, argQ.X, parity);
//Saving Delta
argQ.delta[idx] = delta(0,0);
argQ.delta[idx + 2 * argQ.threads] = delta(0,1);
argQ.delta[idx + 4 * argQ.threads] = delta(0,2);
argQ.delta[idx + 6 * argQ.threads] = delta(1,1);
argQ.delta[idx + 8 * argQ.threads] = delta(1,2);
argQ.delta[idx + 10 * argQ.threads] = delta(2,2);
//12
data.y = getRealTraceUVdagger(delta, delta);
//35
//T=36*gauge_dir+65
}
reduce2d<blockSize,2>(argQ, data);
}
template<unsigned int Elems, typename Float, typename Gauge, int gauge_dir>
class GaugeFixQuality : TunableLocalParity {
GaugeFixQualityArg<typename ComplexTypeId<Float>::Type, Gauge> argQ;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int minThreads() const { return argQ.threads; }
public:
GaugeFixQuality(GaugeFixQualityArg<typename ComplexTypeId<Float>::Type, Gauge> &argQ)
: argQ(argQ) {
}
~GaugeFixQuality () { }
void apply(const cudaStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
argQ.result_h[0] = make_double2(0.0,0.0);
LAUNCH_KERNEL_LOCAL_PARITY(computeFix_quality, tp, stream, argQ, Elems, Float, Gauge, gauge_dir);
cudaDeviceSynchronize();
argQ.result_h[0].x /= (double)(3 * gauge_dir * 2 * argQ.threads);
argQ.result_h[0].y /= (double)(3 * 2 * argQ.threads);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << argQ.X[0] << "x" << argQ.X[1] << "x" << argQ.X[2] << "x" << argQ.X[3];
sprintf(aux_string,"threads=%d,prec=%d,gaugedir=%d",argQ.threads, sizeof(Float),gauge_dir);
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const {
return (36LL * gauge_dir + 65LL) * 2 * argQ.threads;
} // Only correct if there is no link reconstruction, no cub reduction accounted also
long long bytes() const {
return (2LL * gauge_dir + 2LL) * Elems * 2 * argQ.threads * sizeof(Float);
} //Not accounting the reduction!!!
};
template <typename Float>
struct GaugeFixArg {
int threads; // number of active threads required
int X[4]; // grid dimensions
cudaGaugeField &data;
Float *invpsq;
typename ComplexTypeId<Float>::Type *delta;
typename ComplexTypeId<Float>::Type *gx;
GaugeFixArg( cudaGaugeField & data, const unsigned int Elems) : data(data){
for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir];
threads = X[0] * X[1] * X[2] * X[3];
invpsq = (Float*)device_malloc(sizeof(Float) * threads);
delta = (typename ComplexTypeId<Float>::Type *)device_malloc(sizeof(typename ComplexTypeId<Float>::Type) * threads * 6);
#ifdef GAUGEFIXING_DONT_USE_GX
gx = (typename ComplexTypeId<Float>::Type *)device_malloc(sizeof(typename ComplexTypeId<Float>::Type) * threads);
#else
gx = (typename ComplexTypeId<Float>::Type *)device_malloc(sizeof(typename ComplexTypeId<Float>::Type) * threads * Elems);
#endif
BindTex(delta, gx, sizeof(typename ComplexTypeId<Float>::Type) * threads * Elems);
}
void free(){
UnBindTex(delta, gx);
device_free(invpsq);
device_free(delta);
device_free(gx);
}
};
template <typename Float>
__global__ void kernel_gauge_set_invpsq(GaugeFixArg<Float> arg){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if ( id >= arg.threads ) return;
int x1 = id / (arg.X[2] * arg.X[3] * arg.X[0]);
int x0 = (id / (arg.X[2] * arg.X[3])) % arg.X[0];
int x3 = (id / arg.X[2]) % arg.X[3];
int x2 = id % arg.X[2];
//id = x2 + (x3 + (x0 + x1 * arg.X[0]) * arg.X[3]) * arg.X[2];
Float sx = sin( (Float)x0 * FL_UNITARIZE_PI / (Float)arg.X[0]);
Float sy = sin( (Float)x1 * FL_UNITARIZE_PI / (Float)arg.X[1]);
Float sz = sin( (Float)x2 * FL_UNITARIZE_PI / (Float)arg.X[2]);
Float st = sin( (Float)x3 * FL_UNITARIZE_PI / (Float)arg.X[3]);
Float sinsq = sx * sx + sy * sy + sz * sz + st * st;
Float prcfact = 0.0;
//The FFT normalization is done here
if ( sinsq > 0.00001 ) prcfact = 4.0 / (sinsq * (Float)arg.threads);
arg.invpsq[id] = prcfact;
}
template<typename Float>
class GaugeFixSETINVPSP : Tunable {
GaugeFixArg<Float> arg;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
return 0;
}
bool tuneSharedBytes() const {
return false;
} // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
GaugeFixSETINVPSP(GaugeFixArg<Float> &arg) : arg(arg) { }
~GaugeFixSETINVPSP () { }
void apply(const cudaStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
kernel_gauge_set_invpsq<Float><< < tp.grid, tp.block, 0, stream >> > (arg);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Float));
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const {
return 21 * arg.threads;
}
long long bytes() const {
return sizeof(Float) * arg.threads;
}
};
template<typename Float>
__global__ void kernel_gauge_mult_norm_2D(GaugeFixArg<Float> arg){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if ( id < arg.threads ) arg.gx[id] = arg.gx[id] * arg.invpsq[id];
}
template<typename Float>
class GaugeFixINVPSP : Tunable {
GaugeFixArg<Float> arg;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
return 0;
}
//bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
GaugeFixINVPSP(GaugeFixArg<Float> &arg)
: arg(arg){
cudaFuncSetCacheConfig( kernel_gauge_mult_norm_2D<Float>, cudaFuncCachePreferL1);
}
~GaugeFixINVPSP () {
}
void apply(const cudaStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
kernel_gauge_mult_norm_2D<Float><< < tp.grid, tp.block, 0, stream >> > (arg);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Float));
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
void preTune(){
//since delta contents are irrelevant at this point, we can swap gx with delta
typename ComplexTypeId<Float>::Type *tmp = arg.gx;
arg.gx = arg.delta;
arg.delta = tmp;
}
void postTune(){
arg.gx = arg.delta;
}
long long flops() const {
return 2LL * arg.threads;
}
long long bytes() const {
return 5LL * sizeof(Float) * arg.threads;
}
};
template<typename Cmplx>
__device__ __host__ inline typename RealTypeId<Cmplx>::Type Abs2(const Cmplx & a){
return a.x * a.x + a.y * a.y;
}
template <typename Float>
__host__ __device__ inline void reunit_link( Matrix<typename ComplexTypeId<Float>::Type,3> &U ){
typedef typename ComplexTypeId<Float>::Type Cmplx;
Cmplx t2 = makeComplex((Float)0.0, (Float)0.0);
Float t1 = 0.0;
//first normalize first row
//sum of squares of row
#pragma unroll
for ( int c = 0; c < 3; c++ ) t1 += Abs2(U(0, c));
t1 = (Float)1.0 / sqrt(t1);
//14
//used to normalize row
#pragma unroll
for ( int c = 0; c < 3; c++ ) U(0,c) *= t1;
//6
#pragma unroll
for ( int c = 0; c < 3; c++ ) t2 += Conj(U(0,c)) * U(1,c);
//24
#pragma unroll
for ( int c = 0; c < 3; c++ ) U(1,c) -= t2 * U(0,c);
//24
//normalize second row
//sum of squares of row
t1 = 0.0;
#pragma unroll
for ( int c = 0; c < 3; c++ ) t1 += Abs2(U(1,c));
t1 = (Float)1.0 / sqrt(t1);
//14
//used to normalize row
#pragma unroll
for ( int c = 0; c < 3; c++ ) U(1, c) *= t1;
//6
//Reconstruct lat row
U(2,0) = Conj(U(0,1) * U(1,2) - U(0,2) * U(1,1));
U(2,1) = Conj(U(0,2) * U(1,0) - U(0,0) * U(1,2));
U(2,2) = Conj(U(0,0) * U(1,1) - U(0,1) * U(1,0));
//42
//T=130
}
#ifdef GAUGEFIXING_DONT_USE_GX
template <typename Float, typename Gauge>
__global__ void kernel_gauge_fix_U_EO_NEW( GaugeFixArg<Float> arg, Gauge dataOr, Float half_alpha){
int id = threadIdx.x + blockIdx.x * blockDim.x;
int parity = threadIdx.y;
if ( id >= arg.threads/2 ) return;
typedef typename ComplexTypeId<Float>::Type Cmplx;
int x[4];
getCoords(x, id, arg.X, parity);
int idx = ((x[3] * arg.X[2] + x[2]) * arg.X[1] + x[1]) * arg.X[0] + x[0];
Matrix<Cmplx,3> de;
//Read Delta
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
de(0,0) = TEXTURE_DELTA<Cmplx>(idx + 0 * arg.threads);
de(0,1) = TEXTURE_DELTA<Cmplx>(idx + 1 * arg.threads);
de(0,2) = TEXTURE_DELTA<Cmplx>(idx + 2 * arg.threads);
de(1,1) = TEXTURE_DELTA<Cmplx>(idx + 3 * arg.threads);
de(1,2) = TEXTURE_DELTA<Cmplx>(idx + 4 * arg.threads);
de(2,2) = TEXTURE_DELTA<Cmplx>(idx + 5 * arg.threads);
#else
de(0,0) = arg.delta[idx + 0 * arg.threads];
de(0,1) = arg.delta[idx + 1 * arg.threads];
de(0,2) = arg.delta[idx + 2 * arg.threads];
de(1,1) = arg.delta[idx + 3 * arg.threads];
de(1,2) = arg.delta[idx + 4 * arg.threads];
de(2,2) = arg.delta[idx + 5 * arg.threads];
#endif
de(1,0) = makeComplex(-de(0,1).x, de(0,1).y);
de(2,0) = makeComplex(-de(0,2).x, de(0,2).y);
de(2,1) = makeComplex(-de(1,2).x, de(1,2).y);
Matrix<Cmplx,3> g;
setIdentity(&g);
g += de * half_alpha;
//36
reunit_link<Float>( g );
//130
for ( int mu = 0; mu < 4; mu++ ) {
Matrix<Cmplx,3> U;
Matrix<Cmplx,3> g0;
dataOr.load((Float*)(U.data),id, mu, parity);
U = g * U;
//198
idx = linkNormalIndexP1(x,arg.X,mu);
//Read Delta
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
de(0,0) = TEXTURE_DELTA<Cmplx>(idx + 0 * arg.threads);
de(0,1) = TEXTURE_DELTA<Cmplx>(idx + 1 * arg.threads);
de(0,2) = TEXTURE_DELTA<Cmplx>(idx + 2 * arg.threads);
de(1,1) = TEXTURE_DELTA<Cmplx>(idx + 3 * arg.threads);
de(1,2) = TEXTURE_DELTA<Cmplx>(idx + 4 * arg.threads);
de(2,2) = TEXTURE_DELTA<Cmplx>(idx + 5 * arg.threads);
#else
de(0,0) = arg.delta[idx + 0 * arg.threads];
de(0,1) = arg.delta[idx + 1 * arg.threads];
de(0,2) = arg.delta[idx + 2 * arg.threads];
de(1,1) = arg.delta[idx + 3 * arg.threads];
de(1,2) = arg.delta[idx + 4 * arg.threads];
de(2,2) = arg.delta[idx + 5 * arg.threads];
#endif
de(1,0) = makeComplex(-de(0,1).x, de(0,1).y);
de(2,0) = makeComplex(-de(0,2).x, de(0,2).y);
de(2,1) = makeComplex(-de(1,2).x, de(1,2).y);
setIdentity(&g0);
g0 += de * half_alpha;
//36
reunit_link<Float>( g0 );
//130
U = U * conj(g0);
//198
dataOr.save((Float*)(U.data),id, mu, parity);
}
}
template<typename Float, typename Gauge>
class GaugeFixNEW : TunableLocalParity {
GaugeFixArg<Float> arg;
Float half_alpha;
Gauge dataOr;
mutable char aux_string[128]; // used as a label in the autotuner
private:
// since GaugeFixArg is used by other kernels that don't use
// tunableLocalParity, arg.threads stores Volume and not VolumeCB
// so we need to divide by two
unsigned int minThreads() const { return arg.threads/2; }
public:
GaugeFixNEW(Gauge & dataOr, GaugeFixArg<Float> &arg, Float alpha)
: dataOr(dataOr), arg(arg) {
half_alpha = alpha * 0.5;
cudaFuncSetCacheConfig( kernel_gauge_fix_U_EO_NEW<Float, Gauge>, cudaFuncCachePreferL1);
}
~GaugeFixNEW () { }
void setAlpha(Float alpha){ half_alpha = alpha * 0.5; }
void apply(const cudaStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
kernel_gauge_fix_U_EO_NEW<Float, Gauge><< < tp.grid, tp.block, 0, stream >> > (arg, dataOr, half_alpha);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x" << arg.X[1] << "x" << arg.X[2] << "x" << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Float));
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
//need this
void preTune() {
arg.data.backup();
}
void postTune() {
arg.data.restore();
}
long long flops() const {
return 2414LL * arg.threads;
//Not accounting here the reconstruction of the gauge if 12 or 8!!!!!!
}
long long bytes() const {
return ( dataOr.Bytes() * 4LL + 5 * 12LL * sizeof(Float)) * arg.threads;
}
};
#else
template <unsigned int Elems, typename Float>
__global__ void kernel_gauge_GX(GaugeFixArg<Float> arg, Float half_alpha){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if ( id >= arg.threads ) return;
typedef typename ComplexTypeId<Float>::Type Cmplx;
Matrix<Cmplx,3> de;
//Read Delta
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
de(0,0) = TEXTURE_DELTA<Cmplx>(id);
de(0,1) = TEXTURE_DELTA<Cmplx>(id + arg.threads);
de(0,2) = TEXTURE_DELTA<Cmplx>(id + 2 * arg.threads);
de(1,1) = TEXTURE_DELTA<Cmplx>(id + 3 * arg.threads);
de(1,2) = TEXTURE_DELTA<Cmplx>(id + 4 * arg.threads);
de(2,2) = TEXTURE_DELTA<Cmplx>(id + 5 * arg.threads);
#else
de(0,0) = arg.delta[id];
de(0,1) = arg.delta[id + arg.threads];
de(0,2) = arg.delta[id + 2 * arg.threads];
de(1,1) = arg.delta[id + 3 * arg.threads];
de(1,2) = arg.delta[id + 4 * arg.threads];
de(2,2) = arg.delta[id + 5 * arg.threads];
#endif
de(1,0) = makeComplex(-de(0,1).x, de(0,1).y);
de(2,0) = makeComplex(-de(0,2).x, de(0,2).y);
de(2,1) = makeComplex(-de(1,2).x, de(1,2).y);
Matrix<Cmplx,3> g;
setIdentity(&g);
g += de * half_alpha;
//36
reunit_link<Float>( g );
//130
//gx is represented in even/odd order
//normal lattice index to even/odd index
int x3 = id / (arg.X[0] * arg.X[1] * arg.X[2]);
int x2 = (id / (arg.X[0] * arg.X[1])) % arg.X[2];
int x1 = (id / arg.X[0]) % arg.X[1];
int x0 = id % arg.X[0];
id = (x0 + (x1 + (x2 + x3 * arg.X[2]) * arg.X[1]) * arg.X[0]) >> 1;
id += ((x0 + x1 + x2 + x3) & 1 ) * arg.threads / 2;
for ( int i = 0; i < Elems; i++ ) arg.gx[id + i * arg.threads] = g.data[i];
//T=166 for Elems 9
//T=208 for Elems 6
}
template<unsigned int Elems, typename Float>
class GaugeFix_GX : Tunable {
GaugeFixArg<Float> arg;
Float half_alpha;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
return 0;
}
//bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
GaugeFix_GX(GaugeFixArg<Float> &arg, Float alpha)
: arg(arg) {
half_alpha = alpha * 0.5;
cudaFuncSetCacheConfig( kernel_gauge_GX<Elems, Float>, cudaFuncCachePreferL1);
}
~GaugeFix_GX () {
}
void setAlpha(Float alpha){
half_alpha = alpha * 0.5;
}
void apply(const cudaStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
kernel_gauge_GX<Elems, Float><< < tp.grid, tp.block, 0, stream >> > (arg, half_alpha);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Float));
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const {
if ( Elems == 6 ) return 208LL * arg.threads;
else return 166LL * arg.threads;
}
long long bytes() const {
return 4LL * Elems * sizeof(Float) * arg.threads;
}
};
template <unsigned int Elems, typename Float, typename Gauge>
__global__ void kernel_gauge_fix_U_EO( GaugeFixArg<Float> arg, Gauge dataOr){
int idd = threadIdx.x + blockIdx.x * blockDim.x;
if ( idd >= arg.threads ) return;
int parity = 0;
int id = idd;
if ( idd >= arg.threads / 2 ) {
parity = 1;
id -= arg.threads / 2;
}
typedef typename ComplexTypeId<Float>::Type Cmplx;
Matrix<Cmplx,3> g;
//for(int i = 0; i < Elems; i++) g.data[i] = arg.gx[idd + i * arg.threads];
for ( int i = 0; i < Elems; i++ ) {
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
g.data[i] = TEXTURE_GX<Cmplx>(idd + i * arg.threads);
#else
g.data[i] = arg.gx[idd + i * arg.threads];
#endif
}
if ( Elems == 6 ) {
g(2,0) = Conj(g(0,1) * g(1,2) - g(0,2) * g(1,1));
g(2,1) = Conj(g(0,2) * g(1,0) - g(0,0) * g(1,2));
g(2,2) = Conj(g(0,0) * g(1,1) - g(0,1) * g(1,0));
//42
}
int x[4];
getCoords(x, id, arg.X, parity);
for ( int mu = 0; mu < 4; mu++ ) {
Matrix<Cmplx,3> U;
Matrix<Cmplx,3> g0;
dataOr.load((Float*)(U.data),id, mu, parity);
U = g * U;
//198
int idm1 = linkIndexP1(x,arg.X,mu);
idm1 += (1 - parity) * arg.threads / 2;
//for(int i = 0; i < Elems; i++) g0.data[i] = arg.gx[idm1 + i * arg.threads];
for ( int i = 0; i < Elems; i++ ) {
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
g0.data[i] = TEXTURE_GX<Cmplx>(idm1 + i * arg.threads);
#else
g0.data[i] = arg.gx[idm1 + i * arg.threads];
#endif
}
if ( Elems == 6 ) {
g0(2,0) = Conj(g0(0,1) * g0(1,2) - g0(0,2) * g0(1,1));
g0(2,1) = Conj(g0(0,2) * g0(1,0) - g0(0,0) * g0(1,2));
g0(2,2) = Conj(g0(0,0) * g0(1,1) - g0(0,1) * g0(1,0));
//42
}
U = U * conj(g0);
//198
dataOr.save((Float*)(U.data),id, mu, parity);
}
//T=42+4*(198*2+42) Elems=6
//T=4*(198*2) Elems=9
//Not accounting here the reconstruction of the gauge if 12 or 8!!!!!!
}
template<unsigned int Elems, typename Float, typename Gauge>
class GaugeFix : Tunable {
GaugeFixArg<Float> arg;
Gauge dataOr;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
return 0;
}
//bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
GaugeFix(Gauge & dataOr, GaugeFixArg<Float> &arg)
: dataOr(dataOr), arg(arg) {
cudaFuncSetCacheConfig( kernel_gauge_fix_U_EO<Elems, Float, Gauge>, cudaFuncCachePreferL1);
}
~GaugeFix () { }
void apply(const cudaStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
kernel_gauge_fix_U_EO<Elems, Float, Gauge><< < tp.grid, tp.block, 0, stream >> > (arg, dataOr);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Float));
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
//need this
void preTune() {
arg.data.backup();
}
void postTune() {
arg.data.restore();
}
long long flops() const {
if ( Elems == 6 ) return 1794LL * arg.threads;
else return 1536LL * arg.threads;
//Not accounting here the reconstruction of the gauge if 12 or 8!!!!!!
}
long long bytes() const {
return 26LL * Elems * sizeof(Float) * arg.threads;
}
};
#endif
//GAUGEFIXING_DONT_USE_GX
template<unsigned int Elems, typename Float, typename Gauge, int gauge_dir>
void gaugefixingFFT( Gauge dataOr, cudaGaugeField& data, \
const unsigned int Nsteps, const unsigned int verbose_interval, \
const Float alpha0, const unsigned int autotune, const double tolerance, \
const unsigned int stopWtheta) {
TimeProfile profileInternalGaugeFixFFT("InternalGaugeFixQudaFFT", false);
profileInternalGaugeFixFFT.TPSTART(QUDA_PROFILE_COMPUTE);
Float alpha = alpha0;
std::cout << "\tAlpha parameter of the Steepest Descent Method: " << alpha << std::endl;
if ( autotune ) std::cout << "\tAuto tune active: yes" << std::endl;
else std::cout << "\tAuto tune active: no" << std::endl;
std::cout << "\tStop criterium: " << tolerance << std::endl;
if ( stopWtheta ) std::cout << "\tStop criterium method: theta" << std::endl;
else std::cout << "\tStop criterium method: Delta" << std::endl;
std::cout << "\tMaximum number of iterations: " << Nsteps << std::endl;
std::cout << "\tPrint convergence results at every " << verbose_interval << " steps" << std::endl;
typedef typename ComplexTypeId<Float>::Type Cmplx;
unsigned int delta_pad = data.X()[0] * data.X()[1] * data.X()[2] * data.X()[3];
int4 size = make_int4( data.X()[0], data.X()[1], data.X()[2], data.X()[3] );
cufftHandle plan_xy;
cufftHandle plan_zt;
GaugeFixArg<Float> arg(data, Elems);
SetPlanFFT2DMany( plan_zt, size, 0, arg.delta); //for space and time ZT
SetPlanFFT2DMany( plan_xy, size, 1, arg.delta); //with space only XY
GaugeFixFFTRotateArg<Cmplx> arg_rotate(data);
GaugeFixFFTRotate<Cmplx> GFRotate(arg_rotate);
GaugeFixSETINVPSP<Float> setinvpsp(arg);
setinvpsp.apply(0);
GaugeFixINVPSP<Float> invpsp(arg);
#ifdef GAUGEFIXING_DONT_USE_GX
//without using GX, gx will be created only for plane rotation but with less size
GaugeFixNEW<Float, Gauge> gfixNew(dataOr, arg, alpha);
#else
//using GX
GaugeFix_GX<Elems, Float> calcGX(arg, alpha);
GaugeFix<Elems, Float, Gauge> gfix(dataOr, arg);
#endif
GaugeFixQualityArg<Cmplx, Gauge> argQ(dataOr, data, arg.delta);
GaugeFixQuality<Elems, Float, Gauge, gauge_dir> gfixquality(argQ);
gfixquality.apply(0);
double action0 = argQ.getAction();
printf("Step: %d\tAction: %.16e\ttheta: %.16e\n", 0, argQ.getAction(), argQ.getTheta());
double diff = 0.0;
int iter = 0;
for ( iter = 0; iter < Nsteps; iter++ ) {
for ( int k = 0; k < 6; k++ ) {
//------------------------------------------------------------------------
// Set a pointer do the element k in lattice volume
// each element is stored with stride lattice volume
// it uses gx as temporary array!!!!!!
//------------------------------------------------------------------------
Cmplx *_array = arg.delta + k * delta_pad;
////// 2D FFT + 2D FFT
//------------------------------------------------------------------------
// Perform FFT on xy plane
//------------------------------------------------------------------------
ApplyFFT(plan_xy, _array, arg.gx, CUFFT_FORWARD);
//------------------------------------------------------------------------
// Rotate hypercube, xyzt -> ztxy
//------------------------------------------------------------------------
GFRotate.setDirection(0, arg.gx, _array);
GFRotate.apply(0);
//------------------------------------------------------------------------
// Perform FFT on zt plane
//------------------------------------------------------------------------
ApplyFFT(plan_zt, _array, arg.gx, CUFFT_FORWARD);
//------------------------------------------------------------------------
// Normalize FFT and apply pmax^2/p^2
//------------------------------------------------------------------------
invpsp.apply(0);
//------------------------------------------------------------------------
// Perform IFFT on zt plane
//------------------------------------------------------------------------
ApplyFFT(plan_zt, arg.gx, _array, CUFFT_INVERSE);
//------------------------------------------------------------------------
// Rotate hypercube, ztxy -> xyzt
//------------------------------------------------------------------------
GFRotate.setDirection(1, _array, arg.gx);
GFRotate.apply(0);
//------------------------------------------------------------------------
// Perform IFFT on xy plane
//------------------------------------------------------------------------
ApplyFFT(plan_xy, arg.gx, _array, CUFFT_INVERSE);
}
#ifdef GAUGEFIXING_DONT_USE_GX
//------------------------------------------------------------------------
// Apply gauge fix to current gauge field
//------------------------------------------------------------------------
gfixNew.apply(0);
#else
//------------------------------------------------------------------------
// Calculate g(x)
//------------------------------------------------------------------------
calcGX.apply(0);
//------------------------------------------------------------------------
// Apply gauge fix to current gauge field
//------------------------------------------------------------------------
gfix.apply(0);
#endif
//------------------------------------------------------------------------
// Measure gauge quality and recalculate new Delta(x)
//------------------------------------------------------------------------
gfixquality.apply(0);
double action = argQ.getAction();
diff = abs(action0 - action);
if ((iter % verbose_interval) == (verbose_interval - 1))
printf("Step: %d\tAction: %.16e\ttheta: %.16e\tDelta: %.16e\n", iter + 1, argQ.getAction(), argQ.getTheta(), diff);
if ( autotune && ((action - action0) < -1e-14) ) {
if ( alpha > 0.01 ) {
alpha = 0.95 * alpha;
#ifdef GAUGEFIXING_DONT_USE_GX
gfixNew.setAlpha(alpha);
#else
calcGX.setAlpha(alpha);
#endif
printf(">>>>>>>>>>>>>> Warning: changing alpha down -> %.4e\n", alpha );
}
}
//------------------------------------------------------------------------
// Check gauge fix quality criterium
//------------------------------------------------------------------------
if ( stopWtheta ) { if ( argQ.getTheta() < tolerance ) break; }
else { if ( diff < tolerance ) break; }
action0 = action;
}
if ((iter % verbose_interval) != 0 )
printf("Step: %d\tAction: %.16e\ttheta: %.16e\tDelta: %.16e\n", iter, argQ.getAction(), argQ.getTheta(), diff);
// Reunitarize at end
const double unitarize_eps = 1e-14;
const double max_error = 1e-10;
const int reunit_allow_svd = 1;
const int reunit_svd_only = 0;
const double svd_rel_error = 1e-6;
const double svd_abs_error = 1e-6;
setUnitarizeLinksConstants(unitarize_eps, max_error,
reunit_allow_svd, reunit_svd_only,
svd_rel_error, svd_abs_error);
int num_failures = 0;
int* num_failures_dev;
cudaMalloc((void**)&num_failures_dev, sizeof(int));
cudaMemset(num_failures_dev, 0, sizeof(int));
if ( num_failures_dev == NULL ) errorQuda("cudaMalloc failed for dev_pointer\n");
unitarizeLinksQuda(data, data, num_failures_dev);
cudaMemcpy(&num_failures, num_failures_dev, sizeof(int), cudaMemcpyDeviceToHost);
if ( num_failures > 0 ) {
cudaFree(num_failures_dev);
errorQuda("Error in the unitarization\n");
exit(1);
}
cudaFree(num_failures_dev);
// end reunitarize
arg.free();
CUFFT_SAFE_CALL(cufftDestroy(plan_zt));
CUFFT_SAFE_CALL(cufftDestroy(plan_xy));
checkCudaError();
cudaDeviceSynchronize();
profileInternalGaugeFixFFT.TPSTOP(QUDA_PROFILE_COMPUTE);
if (getVerbosity() > QUDA_SUMMARIZE){
double secs = profileInternalGaugeFixFFT.Last(QUDA_PROFILE_COMPUTE);
double fftflop = 5.0 * (log2((double)( data.X()[0] * data.X()[1]) ) + log2( (double)(data.X()[2] * data.X()[3] )));
fftflop *= (double)( data.X()[0] * data.X()[1] * data.X()[2] * data.X()[3] );
double gflops = setinvpsp.flops() + gfixquality.flops();
double gbytes = setinvpsp.bytes() + gfixquality.bytes();
double flop = invpsp.flops() * Elems;
double byte = invpsp.bytes() * Elems;
flop += (GFRotate.flops() + fftflop) * Elems * 2;
byte += GFRotate.bytes() * Elems * 4; //includes FFT reads, assuming 1 read and 1 write per site
#ifdef GAUGEFIXING_DONT_USE_GX
flop += gfixNew.flops();
byte += gfixNew.bytes();
#else
flop += calcGX.flops();
byte += calcGX.bytes();
flop += gfix.flops();
byte += gfix.bytes();
#endif
flop += gfixquality.flops();
byte += gfixquality.bytes();
gflops += flop * iter;
gbytes += byte * iter;
gflops += 4588.0 * data.X()[0]*data.X()[1]*data.X()[2]*data.X()[3]; //Reunitarize at end
gbytes += 8.0 * data.X()[0]*data.X()[1]*data.X()[2]*data.X()[3] * dataOr.Bytes() ; //Reunitarize at end
gflops = (gflops * 1e-9) / (secs);
gbytes = gbytes / (secs * 1e9);
printfQuda("Time: %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops, gbytes);
}
}
template<unsigned int Elems, typename Float, typename Gauge>
void gaugefixingFFT( Gauge dataOr, cudaGaugeField& data, const unsigned int gauge_dir, \
const unsigned int Nsteps, const unsigned int verbose_interval, const Float alpha, const unsigned int autotune, \
const double tolerance, const unsigned int stopWtheta) {
if ( gauge_dir != 3 ) {
printf("Starting Landau gauge fixing with FFTs...\n");
gaugefixingFFT<Elems, Float, Gauge, 4>(dataOr, data, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta);
}
else {
printf("Starting Coulomb gauge fixing with FFTs...\n");
gaugefixingFFT<Elems, Float, Gauge, 3>(dataOr, data, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta);
}
}
template<typename Float>
void gaugefixingFFT( cudaGaugeField& data, const unsigned int gauge_dir, \
const unsigned int Nsteps, const unsigned int verbose_interval, const Float alpha, const unsigned int autotune, \
const double tolerance, const unsigned int stopWtheta) {
// Switching to FloatNOrder for the gauge field in order to support RECONSTRUCT_12
// Need to fix this!!
//9 and 6 means the number of complex elements used to store g(x) and Delta(x)
if ( data.isNative() ) {
if ( data.Reconstruct() == QUDA_RECONSTRUCT_NO ) {
//printfQuda("QUDA_RECONSTRUCT_NO\n");
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type Gauge;
gaugefixingFFT<9, Float>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta);
} else if ( data.Reconstruct() == QUDA_RECONSTRUCT_12 ) {
//printfQuda("QUDA_RECONSTRUCT_12\n");
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type Gauge;
gaugefixingFFT<6, Float>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta);
} else if ( data.Reconstruct() == QUDA_RECONSTRUCT_8 ) {
//printfQuda("QUDA_RECONSTRUCT_8\n");
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type Gauge;
gaugefixingFFT<6, Float>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta);
} else {
errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct());
}
} else {
errorQuda("Invalid Gauge Order\n");
}
}
#endif // GPU_GAUGE_ALG
/**
* @brief Gauge fixing with Steepest descent method with FFTs with support for single GPU only.
* @param[in,out] data, quda gauge field
* @param[in] gauge_dir, 3 for Coulomb gauge fixing, other for Landau gauge fixing
* @param[in] Nsteps, maximum number of steps to perform gauge fixing
* @param[in] verbose_interval, print gauge fixing info when iteration count is a multiple of this
* @param[in] alpha, gauge fixing parameter of the method, most common value is 0.08
* @param[in] autotune, 1 to autotune the method, i.e., if the Fg inverts its tendency we decrease the alpha value
* @param[in] tolerance, torelance value to stop the method, if this value is zero then the method stops when iteration reachs the maximum number of steps defined by Nsteps
* @param[in] stopWtheta, 0 for MILC criterium and 1 to use the theta value
*/
void gaugefixingFFT( cudaGaugeField& data, const unsigned int gauge_dir, \
const unsigned int Nsteps, const unsigned int verbose_interval, const double alpha, const unsigned int autotune, \
const double tolerance, const unsigned int stopWtheta) {
#ifdef GPU_GAUGE_ALG
#ifdef MULTI_GPU
if(comm_dim_partitioned(0) || comm_dim_partitioned(1) || comm_dim_partitioned(2) || comm_dim_partitioned(3))
errorQuda("Gauge Fixing with FFTs in multi-GPU support NOT implemented yet!\n");
#endif
if ( data.Precision() == QUDA_HALF_PRECISION ) {
errorQuda("Half precision not supported\n");
}
if ( data.Precision() == QUDA_SINGLE_PRECISION ) {
gaugefixingFFT<float> (data, gauge_dir, Nsteps, verbose_interval, (float)alpha, autotune, tolerance, stopWtheta);
} else if ( data.Precision() == QUDA_DOUBLE_PRECISION ) {
gaugefixingFFT<double>(data, gauge_dir, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta);
} else {
errorQuda("Precision %d not supported", data.Precision());
}
#else
errorQuda("Gauge fixing has bot been built");
#endif
}
}
|
0bcc2736ba64abc1d95560f2fbfe18b38769941c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "wb.hpp"
#include <stdlib.h>
__global__ void vecAdd(float *in1, float *in2, float *out, int len) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
out[i] = in1[i] + in2[i];
}
}
int main(int argc, char **argv) {
// read arguments
wbArg_t args = wbArg_read(argc, argv);
// read input data
wbTime_start(Generic, "Importing data and creating memory on host");
int inputLength;
float *hostInput1 = wbImport(wbArg_getInputFile(args, 0), &inputLength);
float *hostInput2 = wbImport(wbArg_getInputFile(args, 1), &inputLength);
float *hostOutput = (float *)malloc(inputLength * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
// allocate GPU memory
wbTime_start(GPU, "Allocating GPU memory");
float *deviceInput1;
float *deviceInput2;
float *deviceOutput;
hipMalloc(&deviceInput1, inputLength * sizeof(float));
hipMalloc(&deviceInput2, inputLength * sizeof(float));
hipMalloc(&deviceOutput, inputLength * sizeof(float));
wbTime_stop(GPU, "Allocating GPU memory");
// copy memory to the GPU
wbTime_start(GPU, "Copying input memory to the GPU");
hipMemcpy(deviceInput1, hostInput1, inputLength * sizeof(float),
hipMemcpyHostToDevice);
hipMemcpy(deviceInput2, hostInput2, inputLength * sizeof(float),
hipMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU");
// initialize grid and block dimensions
// hard to decide which execution configuration should be chosen
// as long as vector size is less than 1024, we can use a single block
int numBlocks = 1;
int threadsPerBlock = inputLength;
// launch GPU kernel
wbTime_start(Compute, "Performing CUDA computation");
hipLaunchKernelGGL(( vecAdd), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, deviceInput1, deviceInput2,
deviceOutput, inputLength);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
// copy GPU memory back to the CPU
wbTime_start(Copy, "Copying output memory to the CPU");
hipMemcpy(hostOutput, deviceOutput, inputLength * sizeof(float),
hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
// free GPU memory
wbTime_start(GPU, "Freeing GPU Memory");
hipFree(deviceInput1);
hipFree(deviceInput2);
hipFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
// check solution
wbSolution(args, hostOutput, inputLength);
// free host memory
free(hostInput1);
free(hostInput2);
free(hostOutput);
return EXIT_SUCCESS;
}
| 0bcc2736ba64abc1d95560f2fbfe18b38769941c.cu | #include "wb.hpp"
#include <stdlib.h>
__global__ void vecAdd(float *in1, float *in2, float *out, int len) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
out[i] = in1[i] + in2[i];
}
}
int main(int argc, char **argv) {
// read arguments
wbArg_t args = wbArg_read(argc, argv);
// read input data
wbTime_start(Generic, "Importing data and creating memory on host");
int inputLength;
float *hostInput1 = wbImport(wbArg_getInputFile(args, 0), &inputLength);
float *hostInput2 = wbImport(wbArg_getInputFile(args, 1), &inputLength);
float *hostOutput = (float *)malloc(inputLength * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
// allocate GPU memory
wbTime_start(GPU, "Allocating GPU memory");
float *deviceInput1;
float *deviceInput2;
float *deviceOutput;
cudaMalloc(&deviceInput1, inputLength * sizeof(float));
cudaMalloc(&deviceInput2, inputLength * sizeof(float));
cudaMalloc(&deviceOutput, inputLength * sizeof(float));
wbTime_stop(GPU, "Allocating GPU memory");
// copy memory to the GPU
wbTime_start(GPU, "Copying input memory to the GPU");
cudaMemcpy(deviceInput1, hostInput1, inputLength * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(deviceInput2, hostInput2, inputLength * sizeof(float),
cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU");
// initialize grid and block dimensions
// hard to decide which execution configuration should be chosen
// as long as vector size is less than 1024, we can use a single block
int numBlocks = 1;
int threadsPerBlock = inputLength;
// launch GPU kernel
wbTime_start(Compute, "Performing CUDA computation");
vecAdd<<<numBlocks, threadsPerBlock>>>(deviceInput1, deviceInput2,
deviceOutput, inputLength);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
// copy GPU memory back to the CPU
wbTime_start(Copy, "Copying output memory to the CPU");
cudaMemcpy(hostOutput, deviceOutput, inputLength * sizeof(float),
cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
// free GPU memory
wbTime_start(GPU, "Freeing GPU Memory");
cudaFree(deviceInput1);
cudaFree(deviceInput2);
cudaFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
// check solution
wbSolution(args, hostOutput, inputLength);
// free host memory
free(hostInput1);
free(hostInput2);
free(hostOutput);
return EXIT_SUCCESS;
}
|
796928181f36a3b13e8f36a34813e755f363ac56.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vectFill.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *data1 = NULL;
hipMalloc(&data1, XSIZE*YSIZE);
int *data2 = NULL;
hipMalloc(&data2, XSIZE*YSIZE);
int *restult = NULL;
hipMalloc(&restult, XSIZE*YSIZE);
unsigned long sizeOfArray = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vectFill), dim3(gridBlock),dim3(threadBlock), 0, 0, data1,data2,restult,sizeOfArray);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vectFill), dim3(gridBlock),dim3(threadBlock), 0, 0, data1,data2,restult,sizeOfArray);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vectFill), dim3(gridBlock),dim3(threadBlock), 0, 0, data1,data2,restult,sizeOfArray);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 796928181f36a3b13e8f36a34813e755f363ac56.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vectFill.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *data1 = NULL;
cudaMalloc(&data1, XSIZE*YSIZE);
int *data2 = NULL;
cudaMalloc(&data2, XSIZE*YSIZE);
int *restult = NULL;
cudaMalloc(&restult, XSIZE*YSIZE);
unsigned long sizeOfArray = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vectFill<<<gridBlock,threadBlock>>>(data1,data2,restult,sizeOfArray);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vectFill<<<gridBlock,threadBlock>>>(data1,data2,restult,sizeOfArray);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vectFill<<<gridBlock,threadBlock>>>(data1,data2,restult,sizeOfArray);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
762285486f44c1bb3e7bdb6b2a6a8ec92c1575f4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "moderngpu.cuh"
using namespace mgpu;
template<int NT, typename InputIt1, typename InputIt2, typename OutputIt,
typename Comp>
__global__ void ParallelMergeA(InputIt1 a_global, int aCount, InputIt2 b_global,
int bCount, OutputIt dest_global, Comp comp) {
typedef typename std::iterator_traits<InputIt1>::value_type T;
int gid = threadIdx.x + NT * blockIdx.x;
if(gid < aCount) {
T aKey = a_global[gid];
int lb = BinarySearch<MgpuBoundsLower>(b_global, bCount, aKey, comp);
dest_global[gid + lb] = aKey;
}
}
template<int NT, typename InputIt1, typename InputIt2, typename OutputIt,
typename Comp>
__global__ void ParallelMergeB(InputIt1 a_global, int aCount, InputIt2 b_global,
int bCount, OutputIt dest_global, Comp comp) {
typedef typename std::iterator_traits<InputIt2>::value_type T;
int gid = threadIdx.x + NT * blockIdx.x;
if(gid < bCount) {
T bKey = b_global[gid];
int ub = BinarySearch<MgpuBoundsUpper>(a_global, aCount, bKey, comp);
dest_global[gid + ub] = bKey;
}
}
int main(int argc, char** argv) {
ContextPtr context = CreateCudaDevice(argc, argv, true);
int ACount = 100;
int BCount = 100;
int Count = ACount + BCount;
MGPU_MEM(int) aData = context->SortRandom<int>(ACount, 0, Count - 1);
MGPU_MEM(int) bData = context->SortRandom<int>(BCount, 0, Count - 1);
MGPU_MEM(int) destData = context->Malloc<int>(Count);
const int NT = 256;
int aBlocks = MGPU_DIV_UP(ACount, NT);
int bBlocks = MGPU_DIV_UP(BCount, NT);
hipLaunchKernelGGL(( ParallelMergeA<NT>), dim3(aBlocks), dim3(NT), 0, 0, aData->get(), ACount, bData->get(),
BCount, destData->get(), mgpu::less<int>());
hipLaunchKernelGGL(( ParallelMergeB<NT>), dim3(bBlocks), dim3(NT), 0, 0, aData->get(), ACount, bData->get(),
BCount, destData->get(), mgpu::less<int>());
printf("A data:\n");
PrintArray(*aData, "%4d", 10);
printf("\nB data:\n");
PrintArray(*bData, "%4d", 10);
printf("\nMerged data:\n");
PrintArray(*destData, "%4d", 10);
return 0;
}
| 762285486f44c1bb3e7bdb6b2a6a8ec92c1575f4.cu | #include "moderngpu.cuh"
using namespace mgpu;
template<int NT, typename InputIt1, typename InputIt2, typename OutputIt,
typename Comp>
__global__ void ParallelMergeA(InputIt1 a_global, int aCount, InputIt2 b_global,
int bCount, OutputIt dest_global, Comp comp) {
typedef typename std::iterator_traits<InputIt1>::value_type T;
int gid = threadIdx.x + NT * blockIdx.x;
if(gid < aCount) {
T aKey = a_global[gid];
int lb = BinarySearch<MgpuBoundsLower>(b_global, bCount, aKey, comp);
dest_global[gid + lb] = aKey;
}
}
template<int NT, typename InputIt1, typename InputIt2, typename OutputIt,
typename Comp>
__global__ void ParallelMergeB(InputIt1 a_global, int aCount, InputIt2 b_global,
int bCount, OutputIt dest_global, Comp comp) {
typedef typename std::iterator_traits<InputIt2>::value_type T;
int gid = threadIdx.x + NT * blockIdx.x;
if(gid < bCount) {
T bKey = b_global[gid];
int ub = BinarySearch<MgpuBoundsUpper>(a_global, aCount, bKey, comp);
dest_global[gid + ub] = bKey;
}
}
int main(int argc, char** argv) {
ContextPtr context = CreateCudaDevice(argc, argv, true);
int ACount = 100;
int BCount = 100;
int Count = ACount + BCount;
MGPU_MEM(int) aData = context->SortRandom<int>(ACount, 0, Count - 1);
MGPU_MEM(int) bData = context->SortRandom<int>(BCount, 0, Count - 1);
MGPU_MEM(int) destData = context->Malloc<int>(Count);
const int NT = 256;
int aBlocks = MGPU_DIV_UP(ACount, NT);
int bBlocks = MGPU_DIV_UP(BCount, NT);
ParallelMergeA<NT><<<aBlocks, NT>>>(aData->get(), ACount, bData->get(),
BCount, destData->get(), mgpu::less<int>());
ParallelMergeB<NT><<<bBlocks, NT>>>(aData->get(), ACount, bData->get(),
BCount, destData->get(), mgpu::less<int>());
printf("A data:\n");
PrintArray(*aData, "%4d", 10);
printf("\nB data:\n");
PrintArray(*bData, "%4d", 10);
printf("\nMerged data:\n");
PrintArray(*destData, "%4d", 10);
return 0;
}
|
f27c79a92d0f6fa092fa746ba93230e5f9e86708.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// A block can be split into parallel threads
// use parallel threads instead of parallel blocks
__global__ void add(int *a, int *b, int *c) {
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
void random_ints(int* a, int N)
{
int i;
for (i = 0; i < N; ++i)
a[i] = rand();
}
#define N 512
int main(void) {
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
// Alloc space for device copies of a, b, c
hipMalloc((void **) &d_a, size);
hipMalloc((void **) &d_b, size);
hipMalloc((void **) &d_c, size);
// Alloc space for host copies of a, b, c and setup input values
a = (int *)malloc(size); random_ints(a, N);
b = (int *)malloc(size); random_ints(b, N);
c = (int *)malloc(size);
// Copy inputs to device
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
// Launch add() kernel on GPU with Nthreads
hipLaunchKernelGGL(( add), dim3(1),dim3(N), 0, 0, d_a, d_b, d_c);
printf(a)
printf(b)
printf(c)
// Copy results back to host
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
// Cleanup
free(a); free(b); free(c);
hipFree(d_a); hipFree(d_b); hipFree(d_c);
return 0;
}
| f27c79a92d0f6fa092fa746ba93230e5f9e86708.cu | // A block can be split into parallel threads
// use parallel threads instead of parallel blocks
__global__ void add(int *a, int *b, int *c) {
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
void random_ints(int* a, int N)
{
int i;
for (i = 0; i < N; ++i)
a[i] = rand();
}
#define N 512
int main(void) {
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
// Alloc space for device copies of a, b, c
cudaMalloc((void **) &d_a, size);
cudaMalloc((void **) &d_b, size);
cudaMalloc((void **) &d_c, size);
// Alloc space for host copies of a, b, c and setup input values
a = (int *)malloc(size); random_ints(a, N);
b = (int *)malloc(size); random_ints(b, N);
c = (int *)malloc(size);
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU with Nthreads
add<<<1,N>>>(d_a, d_b, d_c);
printf(a)
printf(b)
printf(c)
// Copy results back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
// Cleanup
free(a); free(b); free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
|
273dd8feb31f8fc117f2b9906caba0b83905debe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--blockDim=[32,4] --gridDim=[4,25]
#define BLOCK_X 32
#define BLOCK_Y 4
//
// Notes:
//
// 1) strategy: one thread per node in the 2D block;
// after initialisation it marches in the k-direction
// working with 3 planes of data at a time
//
// 2) each thread also loads in data for at most one halo node;
// assumes the number of halo nodes is not more than the
// number of interior nodes
//
// 3) corner halo nodes are included because they are needed
// for more general applications with cross-derivatives
//
// 4) could try double-buffering in the future fairly easily
//
// definition to use efficient __mul24 intrinsic
#define INDEX(i,j,j_off) (i +__mul24(j,j_off))
// device code
__global__ void GPU_laplace3d(int NX, int NY, int NZ, int pitch,
float *d_u1, float *d_u2)
{
__requires(NX == 100);
__requires(NY == 100);
__requires(NZ == 100);
__requires(pitch == 128);
int indg, indg_h, indg0;
int i, j, k, ind, ind_h, halo, active;
float u2, sixth=1.0f/6.0f;
int NXM1 = NX-1;
int NYM1 = NY-1;
int NZM1 = NZ-1;
//
// define local array offsets
//
#define IOFF 1
#define JOFF (BLOCK_X+2)
#define KOFF (BLOCK_X+2)*(BLOCK_Y+2)
__shared__ float u1[3*KOFF];
//
// first set up indices for halos
//
k = threadIdx.x + threadIdx.y*BLOCK_X;
halo = k < 2*(BLOCK_X+BLOCK_Y+2);
if (halo) {
if (threadIdx.y<2) { // y-halos (coalesced)
i = threadIdx.x;
j = threadIdx.y*(BLOCK_Y+1) - 1;
}
else { // x-halos (not coalesced)
i = (k%2)*(BLOCK_X+1) - 1;
j = k/2 - BLOCK_X - 1;
}
ind_h = INDEX(i+1,j+1,JOFF) + KOFF;
i = INDEX(i,blockIdx.x,BLOCK_X); // global indices
j = INDEX(j,blockIdx.y,BLOCK_Y);
indg_h = INDEX(i,j,pitch);
halo = (i>=0) && (i<NX) && (j>=0) && (j<NY);
}
//
// then set up indices for main block
//
i = threadIdx.x;
j = threadIdx.y;
ind = INDEX(i+1,j+1,JOFF) + KOFF;
i = INDEX(i,blockIdx.x,BLOCK_X); // global indices
j = INDEX(j,blockIdx.y,BLOCK_Y);
indg = INDEX(i,j,pitch);
active = (i<NX) && (j<NY);
//
// read initial plane of u1 array
//
if (active) u1[ind+KOFF] = d_u1[indg];
if (halo) u1[ind_h+KOFF] = d_u1[indg_h];
//
// loop over k-planes
//
for (k=0;
__invariant(__implies(active, indg == i + j * pitch + k * NY * pitch)),
__global_invariant(__write_implies(d_u2, (__write_offset_bytes(d_u2)/sizeof(float) - (i + j * pitch))%(NY * pitch) == 0)),
__global_invariant(__write_implies(d_u2, (__write_offset_bytes(d_u2)/sizeof(float) - (i + j * pitch))/(NY * pitch) < NZ)),
__global_invariant(__write_implies(u1,
__implies(active & !halo, __write_offset_bytes(u1)/sizeof(float) == ind + KOFF) &
__implies(!active & halo, __write_offset_bytes(u1)/sizeof(float) == ind_h + KOFF) &
__implies(active & halo, (__write_offset_bytes(u1)/sizeof(float) == ind + KOFF) |
(__write_offset_bytes(u1)/sizeof(float) == ind_h + KOFF)))),
__global_invariant(__implies(!active & !halo, !__write(u1))),
k<NZ; k++) {
// move two planes down and read in new plane k+1
if (active) {
indg0 = indg;
indg = INDEX(indg,NY,pitch);
u1[ind-KOFF] = u1[ind];
u1[ind] = u1[ind+KOFF];
if (k<NZM1)
u1[ind+KOFF] = d_u1[indg];
}
if (halo) {
indg_h = INDEX(indg_h,NY,pitch);
u1[ind_h-KOFF] = u1[ind_h];
u1[ind_h] = u1[ind_h+KOFF];
if (k<NZM1)
u1[ind_h+KOFF] = d_u1[indg_h];
}
__syncthreads();
//
// perform Jacobi iteration to set values in u2
//
if (active) {
if (i==0 || i==NXM1 || j==0 || j==NYM1 || k==0 || k==NZM1) {
u2 = u1[ind]; // Dirichlet b.c.'s
}
else {
u2 = ( u1[ind-IOFF] + u1[ind+IOFF]
+ u1[ind-JOFF] + u1[ind+JOFF]
+ u1[ind-KOFF] + u1[ind+KOFF] ) * sixth;
}
d_u2[indg0] = u2;
}
__syncthreads();
}
}
| 273dd8feb31f8fc117f2b9906caba0b83905debe.cu | //pass
//--blockDim=[32,4] --gridDim=[4,25]
#define BLOCK_X 32
#define BLOCK_Y 4
//
// Notes:
//
// 1) strategy: one thread per node in the 2D block;
// after initialisation it marches in the k-direction
// working with 3 planes of data at a time
//
// 2) each thread also loads in data for at most one halo node;
// assumes the number of halo nodes is not more than the
// number of interior nodes
//
// 3) corner halo nodes are included because they are needed
// for more general applications with cross-derivatives
//
// 4) could try double-buffering in the future fairly easily
//
// definition to use efficient __mul24 intrinsic
#define INDEX(i,j,j_off) (i +__mul24(j,j_off))
// device code
__global__ void GPU_laplace3d(int NX, int NY, int NZ, int pitch,
float *d_u1, float *d_u2)
{
__requires(NX == 100);
__requires(NY == 100);
__requires(NZ == 100);
__requires(pitch == 128);
int indg, indg_h, indg0;
int i, j, k, ind, ind_h, halo, active;
float u2, sixth=1.0f/6.0f;
int NXM1 = NX-1;
int NYM1 = NY-1;
int NZM1 = NZ-1;
//
// define local array offsets
//
#define IOFF 1
#define JOFF (BLOCK_X+2)
#define KOFF (BLOCK_X+2)*(BLOCK_Y+2)
__shared__ float u1[3*KOFF];
//
// first set up indices for halos
//
k = threadIdx.x + threadIdx.y*BLOCK_X;
halo = k < 2*(BLOCK_X+BLOCK_Y+2);
if (halo) {
if (threadIdx.y<2) { // y-halos (coalesced)
i = threadIdx.x;
j = threadIdx.y*(BLOCK_Y+1) - 1;
}
else { // x-halos (not coalesced)
i = (k%2)*(BLOCK_X+1) - 1;
j = k/2 - BLOCK_X - 1;
}
ind_h = INDEX(i+1,j+1,JOFF) + KOFF;
i = INDEX(i,blockIdx.x,BLOCK_X); // global indices
j = INDEX(j,blockIdx.y,BLOCK_Y);
indg_h = INDEX(i,j,pitch);
halo = (i>=0) && (i<NX) && (j>=0) && (j<NY);
}
//
// then set up indices for main block
//
i = threadIdx.x;
j = threadIdx.y;
ind = INDEX(i+1,j+1,JOFF) + KOFF;
i = INDEX(i,blockIdx.x,BLOCK_X); // global indices
j = INDEX(j,blockIdx.y,BLOCK_Y);
indg = INDEX(i,j,pitch);
active = (i<NX) && (j<NY);
//
// read initial plane of u1 array
//
if (active) u1[ind+KOFF] = d_u1[indg];
if (halo) u1[ind_h+KOFF] = d_u1[indg_h];
//
// loop over k-planes
//
for (k=0;
__invariant(__implies(active, indg == i + j * pitch + k * NY * pitch)),
__global_invariant(__write_implies(d_u2, (__write_offset_bytes(d_u2)/sizeof(float) - (i + j * pitch))%(NY * pitch) == 0)),
__global_invariant(__write_implies(d_u2, (__write_offset_bytes(d_u2)/sizeof(float) - (i + j * pitch))/(NY * pitch) < NZ)),
__global_invariant(__write_implies(u1,
__implies(active & !halo, __write_offset_bytes(u1)/sizeof(float) == ind + KOFF) &
__implies(!active & halo, __write_offset_bytes(u1)/sizeof(float) == ind_h + KOFF) &
__implies(active & halo, (__write_offset_bytes(u1)/sizeof(float) == ind + KOFF) |
(__write_offset_bytes(u1)/sizeof(float) == ind_h + KOFF)))),
__global_invariant(__implies(!active & !halo, !__write(u1))),
k<NZ; k++) {
// move two planes down and read in new plane k+1
if (active) {
indg0 = indg;
indg = INDEX(indg,NY,pitch);
u1[ind-KOFF] = u1[ind];
u1[ind] = u1[ind+KOFF];
if (k<NZM1)
u1[ind+KOFF] = d_u1[indg];
}
if (halo) {
indg_h = INDEX(indg_h,NY,pitch);
u1[ind_h-KOFF] = u1[ind_h];
u1[ind_h] = u1[ind_h+KOFF];
if (k<NZM1)
u1[ind_h+KOFF] = d_u1[indg_h];
}
__syncthreads();
//
// perform Jacobi iteration to set values in u2
//
if (active) {
if (i==0 || i==NXM1 || j==0 || j==NYM1 || k==0 || k==NZM1) {
u2 = u1[ind]; // Dirichlet b.c.'s
}
else {
u2 = ( u1[ind-IOFF] + u1[ind+IOFF]
+ u1[ind-JOFF] + u1[ind+JOFF]
+ u1[ind-KOFF] + u1[ind+KOFF] ) * sixth;
}
d_u2[indg0] = u2;
}
__syncthreads();
}
}
|
6cef234ea36a9ac3e0967cc313f4a98856d7da2e.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <SDL2/SDL.h>
#include <string>
#include <stdexcept>
#include <cstdio>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include "cuda_common.h"
#include <hip/hip_runtime_api.h>
#include "complex.h"
#include "cuda_common.h"
#include "constants.h"
//https://nvlabs.github.io/cub/
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include <typeinfo>
#include "textures.h"
#include "functions.h"
#define CURAND_CALL(x) x
//do { if((x) != HIPRAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
template<class Real>
__global__ void initializeRI(complex *in, Real *d_sum){
uint id = threadIdx.x + blockIdx.x * blockDim.x;
uint i = id % nx;
uint j = id / nx;
Real val = 0.0;
if(i < nx && j < ny){
if(i>0 && j>0 && i<nx-1 && j<ny-1) val = in[i+j*nx].abs2();
else in[i+j*nx] = complex::zero();
}
Real *smem = SharedMemory<Real>();
smem[threadIdx.x] = val;
__syncthreads();
//Only one active warp do the reduction
//Bocks are always multiple of the warp size!
if(blockDim.x > WARP_SIZE && threadIdx.x < WARP_SIZE){
for(uint s = 1; s < blockDim.x / WARP_SIZE; s++)
smem[threadIdx.x] += smem[threadIdx.x + WARP_SIZE * s];
}
//__syncthreads(); //No need to synchronize inside warp!!!!
//One thread do the warp reduction
if(threadIdx.x == 0 ) {
Real sum = smem[0];
for(uint s = 1; s < WARP_SIZE; s++)
sum += smem[s];
CudaAtomicAdd(d_sum, sum);
}
}
template<class Real>
__global__ void initialize(complex *in, Real *d_sum){
uint id = threadIdx.x + blockIdx.x * blockDim.x;
uint i = id % nx;
uint j = id / nx;
Real val = 0.0;
if(i>0 && j>0 && i<nx-1 && j<ny-1){
complex value = complex::one();
in[i+j*nx]= value;
val = value.abs2();
}
Real *smem = SharedMemory<Real>();
smem[threadIdx.x] = val;
__syncthreads();
//Only one active warp do the reduction
//Bocks are always multiple of the warp size!
if(blockDim.x > WARP_SIZE && threadIdx.x < WARP_SIZE){
for(uint s = 1; s < blockDim.x / WARP_SIZE; s++)
smem[threadIdx.x] += smem[threadIdx.x + WARP_SIZE * s];
}
//__syncthreads(); //No need to synchronize inside warp!!!!
//One thread do the warp reduction
if(threadIdx.x == 0 ) {
Real sum = smem[0];
for(uint s = 1; s < WARP_SIZE; s++)
sum += smem[s];
CudaAtomicAdd(d_sum, sum);
}
}
template<class Real>
void cGenerateUniform(hiprandGenerator_t gen, Real* a0, uint size){};
template<>
void cGenerateUniform<float>(hiprandGenerator_t gen, float* a0, uint size){
CURAND_CALL(hiprandGenerateUniform(gen, a0, size));
}
template<>
void cGenerateUniform<double>(hiprandGenerator_t gen, double* a0, uint size){
CURAND_CALL(hiprandGenerateUniformDouble(gen, a0, size));
}
template<class Real>
void Init(complex *a0_d, Real* d_sum, int nx, int ny, bool randomInit){
CUDA_SAFE_CALL(hipMemset(d_sum, 0, sizeof(Real)));
dim3 threadsperblock(128);
dim3 nblocks((nx*ny+threadsperblock.x-1)/threadsperblock.x, 1, 1);
size_t shared_bytes = threadsperblock.x * sizeof(Real);
if(randomInit){
hiprandGenerator_t gen;
CURAND_CALL(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT));
CURAND_CALL(hiprandSetPseudoRandomGeneratorSeed(gen, 1234ULL));
cGenerateUniform(gen, (Real*)a0_d, nx*ny*2);
CURAND_CALL(hiprandDestroyGenerator(gen));
hipLaunchKernelGGL(( initializeRI<Real>), dim3(nblocks),dim3(threadsperblock), shared_bytes, 0, a0_d, d_sum);
}
else hipLaunchKernelGGL(( initialize<Real>), dim3(nblocks),dim3(threadsperblock), shared_bytes, 0, a0_d, d_sum);
CUT_CHECK_ERROR("kernel launch failure");
CUDA_SAFE_THREAD_SYNC();
}
template
void Init<float>(complexs *a0_d, float* d_sum, int nx, int ny, bool randomInit);
template
void Init<double>(complexd *a0_d, double* d_sum, int nx, int ny, bool randomInit);
| 6cef234ea36a9ac3e0967cc313f4a98856d7da2e.cu | #include <iostream>
#include <SDL2/SDL.h>
#include <string>
#include <stdexcept>
#include <cstdio>
#include <cuda.h>
#include <curand.h>
#include "cuda_common.h"
#include <cuda_runtime_api.h>
#include "complex.h"
#include "cuda_common.h"
#include "constants.h"
//https://nvlabs.github.io/cub/
#include <cub/cub.cuh>
#include <cub/block/block_reduce.cuh>
#include <typeinfo>
#include "textures.h"
#include "functions.h"
#define CURAND_CALL(x) x
//do { if((x) != CURAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
template<class Real>
__global__ void initializeRI(complex *in, Real *d_sum){
uint id = threadIdx.x + blockIdx.x * blockDim.x;
uint i = id % nx;
uint j = id / nx;
Real val = 0.0;
if(i < nx && j < ny){
if(i>0 && j>0 && i<nx-1 && j<ny-1) val = in[i+j*nx].abs2();
else in[i+j*nx] = complex::zero();
}
Real *smem = SharedMemory<Real>();
smem[threadIdx.x] = val;
__syncthreads();
//Only one active warp do the reduction
//Bocks are always multiple of the warp size!
if(blockDim.x > WARP_SIZE && threadIdx.x < WARP_SIZE){
for(uint s = 1; s < blockDim.x / WARP_SIZE; s++)
smem[threadIdx.x] += smem[threadIdx.x + WARP_SIZE * s];
}
//__syncthreads(); //No need to synchronize inside warp!!!!
//One thread do the warp reduction
if(threadIdx.x == 0 ) {
Real sum = smem[0];
for(uint s = 1; s < WARP_SIZE; s++)
sum += smem[s];
CudaAtomicAdd(d_sum, sum);
}
}
template<class Real>
__global__ void initialize(complex *in, Real *d_sum){
uint id = threadIdx.x + blockIdx.x * blockDim.x;
uint i = id % nx;
uint j = id / nx;
Real val = 0.0;
if(i>0 && j>0 && i<nx-1 && j<ny-1){
complex value = complex::one();
in[i+j*nx]= value;
val = value.abs2();
}
Real *smem = SharedMemory<Real>();
smem[threadIdx.x] = val;
__syncthreads();
//Only one active warp do the reduction
//Bocks are always multiple of the warp size!
if(blockDim.x > WARP_SIZE && threadIdx.x < WARP_SIZE){
for(uint s = 1; s < blockDim.x / WARP_SIZE; s++)
smem[threadIdx.x] += smem[threadIdx.x + WARP_SIZE * s];
}
//__syncthreads(); //No need to synchronize inside warp!!!!
//One thread do the warp reduction
if(threadIdx.x == 0 ) {
Real sum = smem[0];
for(uint s = 1; s < WARP_SIZE; s++)
sum += smem[s];
CudaAtomicAdd(d_sum, sum);
}
}
template<class Real>
void cGenerateUniform(curandGenerator_t gen, Real* a0, uint size){};
template<>
void cGenerateUniform<float>(curandGenerator_t gen, float* a0, uint size){
CURAND_CALL(curandGenerateUniform(gen, a0, size));
}
template<>
void cGenerateUniform<double>(curandGenerator_t gen, double* a0, uint size){
CURAND_CALL(curandGenerateUniformDouble(gen, a0, size));
}
template<class Real>
void Init(complex *a0_d, Real* d_sum, int nx, int ny, bool randomInit){
CUDA_SAFE_CALL(cudaMemset(d_sum, 0, sizeof(Real)));
dim3 threadsperblock(128);
dim3 nblocks((nx*ny+threadsperblock.x-1)/threadsperblock.x, 1, 1);
size_t shared_bytes = threadsperblock.x * sizeof(Real);
if(randomInit){
curandGenerator_t gen;
CURAND_CALL(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen, 1234ULL));
cGenerateUniform(gen, (Real*)a0_d, nx*ny*2);
CURAND_CALL(curandDestroyGenerator(gen));
initializeRI<Real><<<nblocks,threadsperblock, shared_bytes, 0>>>(a0_d, d_sum);
}
else initialize<Real><<<nblocks,threadsperblock, shared_bytes, 0>>>(a0_d, d_sum);
CUT_CHECK_ERROR("kernel launch failure");
CUDA_SAFE_THREAD_SYNC();
}
template
void Init<float>(complexs *a0_d, float* d_sum, int nx, int ny, bool randomInit);
template
void Init<double>(complexd *a0_d, double* d_sum, int nx, int ny, bool randomInit);
|
6809eb2eaef60e4bf03146e22163ea1bb41645f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "utilidades.h"
#include <jacobi.h>
#include <iostream>
#include "matrices_test.h"
#include <readers.h>
#include <SRJ_CUDA.h>
#include <SRJ.h>
#include <SRJ_OMP.h>
#include "tests.h"
#include "jacobiOmp.h"
#include "jacobiCuda.h"
using namespace std;
const double tolerance = 1.e-8;
void printResult(int k, const double *_x, const double *_y, const double *_b, int n) {
if (n > 1000) n = 100;
cout << "Vector x: ";
for (auto i = 0; i < n; i++) cout << _x[i] << " ";
cout << endl;
if (_y != nullptr) {
cout << "Vector y: ";
for (auto i = 0; i < n; i++) cout << _y[i] << " ";
cout << endl;
}
cout << "Vector b: ";
for (auto i = 0; i < n; i++) cout << _b[i] << " ";
cout << endl;
cout << "Iteraciones K: " << k << endl << endl;
}
double test_jacobi_CUDA(CSR matriz, double *_b, string _x0) {
int BLOCKSIZE = 256;
cout << "Test Jacobi CUDA *****///\n"
<< "BLOCKSIZE: " << BLOCKSIZE << endl;
double *_x_k = rhsVector_reader<double>(_x0, matriz.getFilas());
jacobi_CUDA test(matriz, vector<double>(_x_k, _x_k + matriz.getFilas()), BLOCKSIZE);
int iter = 0;
double residual = 1e36;
double t1 = utilidades::cpuSecond();
while (residual > tolerance) {
test.multiplicacionMV();
test.calculaResiduo(_b);
test.obtenerNuevaX();
residual = test.norma();
test.actualizaX();
// if (iter % 1000 == 0) cout << "residual: " << residual << endl;
iter++;
}
double t2 = utilidades::cpuSecond() - t1;
cout << "iter: " << iter << endl;
// printResult(iter, test.getX().data(), test.getY(), _b, matriz.getFilas());
return t2;
}
//}
double test_jacobi_OMP(CSR matriz, double *_b, string _x0) {
cout << "Test Jacobi OMP *****///\n";
double *_x_k = rhsVector_reader<double>(_x0, matriz.getFilas());
jacobi_OMP test(matriz, vector<double>(_x_k, _x_k + matriz.getFilas()));
int iter = 0;
double residual = 1e36;
double t1 = utilidades::cpuSecond();
while (residual > tolerance) {
test.multiplicacionMV();
test.calculaResiduo(_b);
test.obtenerNuevaX();
residual = test.norma();
test.actualizaX();
iter++;
}
double t2 = utilidades::cpuSecond() - t1;
cout << "iter: " << iter << endl;
// printResult(iter, test.getX().data(), test.getY(), _b, matriz.getFilas());
return t2;
}
double jacobi_secuencial(CSR matriz, double *_b, string _x0) {
cout << "Jacobi secuencial*****//" << endl;
double *_x_k = rhsVector_reader<double>(_x0, matriz.getFilas());
jacobi test(matriz, vector<double>(_x_k, _x_k + matriz.getFilas()));
double residual = 1.e36;
int iter = 0;
double t1 = utilidades::cpuSecond();
while (residual > tolerance) {
test.multiplicacionMV();
test.calculaResiduo(_b);
test.obtenerNuevaX();
residual = test.norma();
test.actualizaX();
iter++;
}
double t2 = utilidades::cpuSecond() - t1;
cout << "iter: " << iter << endl;
// printResult(iter, test.getX().data(), test.getY(), _b, matriz.getFilas());
return t2;
}
double srj_CUDA(CSR matriz, double *_b, string _x0, string _srjSch) {
cout << "SRJ CUDA*****//" << endl;
double *_x_k = rhsVector_reader<double>(_x0, matriz.getFilas());
auto srjSch = srjSch_reader<double>(_srjSch);
SRJ_CUDA test(matriz, srjSch, vector<double>(_x_k, _x_k + matriz.getFilas()));
double residual = 1.e36;
int iter = 0;
double t1 = utilidades::cpuSecond();
while (residual > tolerance) {
test.multiplicacionMV();
test.calculaResiduo(_b);
test.obtenerNuevaX();
residual = test.norma();
test.actualizaX();
iter++;
}
double t2 = utilidades::cpuSecond() - t1;
cout << "iter: " << iter << endl;
// printResult(iter, test.getX().data(), test.getY(), _b, matriz.getFilas());
return t2;
}
double srj_OMP(CSR matriz, double *_b, string _x0, string _srjSch) {
cout << "SRJ OMP*****//" << endl;
double *_x_k = rhsVector_reader<double>(_x0, matriz.getFilas());
auto srjSch = srjSch_reader<double>(_srjSch);
SRJ_OMP test(matriz, srjSch, vector<double>(_x_k, _x_k + matriz.getFilas()));
double residual = 1.e36;
int iter = 0;
double t1 = utilidades::cpuSecond();
while (residual > tolerance) {
test.multiplicacionMV();
test.calculaResiduo(_b);
test.obtenerNuevaX();
residual = test.norma();
test.actualizaX();
iter++;
}
double t2 = utilidades::cpuSecond() - t1;
cout << "iter: " << iter << endl;
// printResult(iter, test.getX().data(), test.getY(), _b, matriz.getFilas());
return t2;
}
double srj_secuencial(CSR matriz, double *_b, string _x0, string _srjSch) {
cout << "SRJ SECUENCIAL*****//" << endl;
double *_x_k = rhsVector_reader<double>(_x0, matriz.getFilas());
auto srjSch = srjSch_reader<double>(_srjSch);
SRJ test(matriz, srjSch, vector<double>(_x_k, _x_k + matriz.getFilas()));
double residual = 1.e36;
int iter = 0;
double t1 = utilidades::cpuSecond();
while (residual > tolerance) {
test.multiplicacionMV();
test.calculaResiduo(_b);
test.obtenerNuevaX();
residual = test.norma();
test.actualizaX();
iter++;
}
double t2 = utilidades::cpuSecond() - t1;
cout << "iter: " << iter << endl;
// printResult(iter, test.getX().data(), test.getY(), _b, matriz.getFilas());
return t2;
}
double SOR_OMP(CSR matriz, double *_b, string _x0, double omega) {
cout << "SOR OMP *****///" << endl;
cout << "omega: " << omega << endl;
double *_x_k = rhsVector_reader<double>(_x0, matriz.getFilas()); // Vector de entrada iteracin
double *_residual_vec = new double[matriz.getFilas()]; // Vector de residuo
double residual = 1.e36;
int iter;
double t1 = utilidades::cpuSecond();
for (iter = 0; (residual > tolerance); ++iter) {
// Matrix-Vector Product Matrix_2d *x_k and compute residual vector
vector<double> x_km1(_x_k, _x_k + matriz.getFilas());
for (int line = 0; line < matriz.getFilas(); ++line) {
double sum = 0;
const unsigned int row_start = matriz.getRowPtr()[line];
const unsigned int row_end = matriz.getRowPtr()[line + 1];
for (unsigned int element = row_start; element < row_end; element++) {
if (line != matriz.getColInd()[element])
sum += matriz.getVal()[element] * _x_k[matriz.getColInd()[element]];
}
_x_k[line] = (1 - omega) * _x_k[line] +
(omega * (_b[line] - sum) / matriz.getDiagonal()[line]);
}
for (int line = 0; line < matriz.getFilas(); ++line) {
_residual_vec[line] = _x_k[line] - x_km1[line];
}
residual = utilidades::reduce_max_OMP(_residual_vec, matriz.getFilas());
}
double t2 = utilidades::cpuSecond() - t1;
cout << "iter: " << iter << endl;
// printResult(iter, _x_k, nullptr, _b, matriz.getFilas());
return t2;
}
double SOR_CUDA(CSR matriz, double *_b, string _x0, double omega) {
cout << "SOR CUDA *****///" << endl;
cout << "omega: " << omega << endl;
double *_x_k = rhsVector_reader<double>(_x0, matriz.getFilas()); // Vector de entrada iteracin
double *_residual_vec = new double[matriz.getFilas()]; // Vector de residuo
double *_residual_vec_d;
hipMalloc(&_residual_vec_d, matriz.getFilas() * sizeof(double));
double residual = 1.e36;
int iter;
double t1 = utilidades::cpuSecond();
for (iter = 0; (residual > tolerance); ++iter) {
// Matrix-Vector Product Matrix_2d *x_k and compute residual vector
vector<double> x_km1(_x_k, _x_k + matriz.getFilas());
for (int line = 0; line < matriz.getFilas(); ++line) {
double sum = 0;
const unsigned int row_start = matriz.getRowPtr()[line];
const unsigned int row_end = matriz.getRowPtr()[line + 1];
for (unsigned int element = row_start; element < row_end; element++) {
if (line != matriz.getColInd()[element])
sum += matriz.getVal()[element] * _x_k[matriz.getColInd()[element]];
}
_x_k[line] = (1 - omega) * _x_k[line] +
(omega * (_b[line] - sum) / matriz.getDiagonal()[line]);
}
for (int line = 0; line < matriz.getFilas(); ++line) {
_residual_vec[line] = _x_k[line] - x_km1[line];
}
hipMemcpy(_residual_vec_d, _residual_vec, matriz.getFilas() * sizeof(double), hipMemcpyHostToDevice);
residual = utilidades::reduce_max_CUDA(_residual_vec_d, matriz.getFilas(), 256);
}
double t2 = utilidades::cpuSecond() - t1;
cout << "iter: " << iter << endl;
// printResult(iter, _x_k, nullptr, _b, matriz.getFilas());
return t2;
}
double SOR(CSR matriz, double *_b, string _x0, double omega) {
cout << "SOR secuencial *****///" << endl;
cout << "omega: " << omega << endl;
double *_x_k = rhsVector_reader<double>(_x0, matriz.getFilas()); // Vector de entrada iteracin
double *_residual_vec = new double[matriz.getFilas()]; // Vector de residuo
double residual = 1.e36;
int iter;
double t1 = utilidades::cpuSecond();
for (iter = 0; (residual > tolerance); ++iter) {
// Matrix-Vector Product Matrix_2d *x_k and compute residual vector
vector<double> x_km1(_x_k, _x_k + matriz.getFilas());
for (int line = 0; line < matriz.getFilas(); ++line) {
double sum = 0;
const unsigned int row_start = matriz.getRowPtr()[line];
const unsigned int row_end = matriz.getRowPtr()[line + 1];
for (unsigned int element = row_start; element < row_end; element++) {
if (line != matriz.getColInd()[element])
sum += matriz.getVal()[element] * _x_k[matriz.getColInd()[element]];
}
_x_k[line] = (1 - omega) * _x_k[line] +
(omega * (_b[line] - sum) / matriz.getDiagonal()[line]);
}
for (int line = 0; line < matriz.getFilas(); ++line) {
_residual_vec[line] = _x_k[line] - x_km1[line];
}
residual = utilidades::reduce_max_OMP(_residual_vec, matriz.getFilas());
}
double t2 = utilidades::cpuSecond() - t1;
cout << "iter: " << iter << endl;
// printResult(iter, _x_k, nullptr, _b, matriz.getFilas());
return t2;
}
void SOR_clasico() {
cout << "SOR *****///" << endl;
const int filas = 4;
auto matriz = m.m2;
double *_b = b._2;
double *_x_k = new double[filas]; // Vector de entrada iteracin
double *_residual_vec = new double[filas]; // Vector de residuo
const double omega = 0.55;
double residual = 1.e36;
const double tolerance = 1.e-8;
for (int i = 0; i < filas; i++) _x_k[i] = 0;
int iter;
for (iter = 0; (residual > tolerance); ++iter) {
// Matrix-Vector Product Matrix_2d *x_k and compute residual vector
for (int i = 0; i < filas; ++i) {
double sigma = 0;
for (int j = 0; j < filas; j++) {
if (i != j)
sigma += matriz[i][j] * _x_k[j];
}
_x_k[i] = (1 - omega) * _x_k[i] + (omega * 1.0 / matriz[i][i]) * (_b[i] - sigma);
}
// Calculo residuo
for (int i = 0; i < filas; ++i) {
double sum = 0;
for (unsigned int j = 0; j < filas; j++) {
sum += matriz[i][j] * _x_k[j];
}
_residual_vec[i] = sum - _b[i];
}
double sum_cuadrados = 0;
for (size_t i = 0; i < filas; i++) {
sum_cuadrados += pow(_residual_vec[i], 2);
}
residual = sqrt(sum_cuadrados);
}
printResult(iter, _x_k, nullptr, _b, filas);
}
void jacobi_clasico() {
cout << "Jacobi clasico *****///" << endl;
const int filas = 8;
auto matriz = m.m4;
double *_b = b._4;
double *_x_k = new double[filas]; // Vector de entrada iteracin
double *_x_kp1 = new double[filas]; // Vector de entrada iteracin
double *_residual_vec = new double[filas]; // Vector de residuo
double residual = 1.e36;
const double tolerance = 1.e-8;
for (int i = 0; i < filas; i++) _x_k[i] = 1;
int iter;
for (iter = 0; residual > tolerance; iter++) {
// Matrix-Vector Product Matrix_2d *x_k and compute residual vector
for (int i = 0; i < filas; ++i) {
double sigma = 0;
for (int j = 0; j < filas; j++) {
if (i != j)
sigma += matriz[i][j] * _x_k[j];
}
_x_kp1[i] = (_b[i] - sigma) / matriz[i][i];
}
// Calculo residuo
for (int i = 0; i < filas; ++i) {
double sum = 0;
for (unsigned int j = 0; j < filas; j++) {
sum += matriz[i][j] * _x_kp1[j];
}
_residual_vec[i] = sum - _b[i];
}
double sum_cuadrados = 0;
for (size_t i = 0; i < filas; i++) {
sum_cuadrados += pow(_residual_vec[i], 2);
}
residual = sqrt(sum_cuadrados);
if (residual < tolerance) break;
double *tmp = _x_k;
_x_k = _x_kp1;
_x_kp1 = tmp;
}
printResult(iter, _x_k, nullptr, _b, filas);
}
void jacobi_secuencial_funcion(CSR matriz, double *_b, string _x0) {
cout << "Jacobi secuencial*****//" << endl;
double *_x_k = rhsVector_reader<double>(_x0, matriz.getFilas());
// if (matriz.isPrecondicionada())
// for (int i = 0; i < matriz.getFilas(); i++) _b[i] = _b[i] / matriz.getDiagonal()[i];
double *_x_kp1 = new double[matriz.getFilas()]; // Vector de salida iteracin
double *_residual_vec = new double[matriz.getFilas()]; // Vector de residuo
double residual = 1.e36;
const double tolerance = 1.e-8;
// initial seed
// double *_b = new double[matriz.getFilas()];
// for (int i = 0; i < matriz.getFilas(); i++) _b[i] = 1;
// double *_x_k = new double[matriz.getFilas()];
// for (int i = 0; i < matriz.getFilas(); i++) _x_k[i] = 0;
int iter;
for (iter = 0; residual > tolerance; ++iter) {
// Matrix-Vector Product Matrix_2d *x_k and compute residual vector
for (int line = 0; line < matriz.getFilas(); ++line) {
double sum = 0;
const unsigned int row_start = matriz.getRowPtr()[line];
const unsigned int row_end = matriz.getRowPtr()[line + 1];
for (unsigned int element = row_start; element < row_end; element++) {
sum += matriz.getVal()[element] * _x_k[matriz.getColInd()[element]];
}
_residual_vec[line] = (_b[line] - sum) / matriz.getDiagonal()[line];
// vector for residual
_x_kp1[line] = _x_k[line] + _residual_vec[line];
}
// compute residual
residual = 0;
double max_x_k = 0;
for (int line = 0; line < matriz.getFilas(); ++line) {
residual = (fabs(_residual_vec[line]) > residual ? fabs(_residual_vec[line]) : residual);
max_x_k = (fabs(_x_kp1[line]) > max_x_k ? fabs(_x_kp1[line]) : max_x_k);
}
residual = residual / max_x_k;
// transfer _x_kp1 into _x_k
double *tmp;
tmp = _x_k;
_x_k = _x_kp1;
_x_kp1 = tmp;
}
printResult(iter, _x_k, nullptr, _b, matriz.getFilas());
}
double srj_secuencial_funcion(CSR matriz, double *_b, string _x0, string _srjSch) {
cout << "SRJ secuencial*****//" << endl;
double *_x_k = rhsVector_reader<double>(_x0, matriz.getFilas()); // Vector de entrada iteracin
double *_x_kp1 = new double[matriz.getFilas()]; // Vector de salida iteracin
double *_residual_vec = new double[matriz.getFilas()]; // Vector de residuo
auto srjSch = srjSch_reader<double>(_srjSch);
double residual = 1.e36;
const double tolerance = 1.e-8;
int iter;
double t1 = utilidades::cpuSecond();
for (iter = 0; residual > tolerance; ++iter) {
// Matrix-Vector Product Matrix_2d *x_k and compute residual vector
for (int line = 0; line < matriz.getFilas(); ++line) {
double sum = 0;
const unsigned int row_start = matriz.getRowPtr()[line];
const unsigned int row_end = matriz.getRowPtr()[line + 1];
for (unsigned int element = row_start; element < row_end; element++) {
sum += matriz.getVal()[element] * _x_k[matriz.getColInd()[element]];
}
_residual_vec[line] = (_b[line] - sum) / matriz.getDiagonal()[line];
// vector for residual
_x_kp1[line] = _x_k[line] + _residual_vec[line] * srjSch[iter % srjSch.size()];
}
// compute residual
residual = 0;
double max_x_k = 0;
for (int line = 0; line < matriz.getFilas(); ++line) {
residual = (fabs(_residual_vec[line]) > residual ? fabs(_residual_vec[line])
: residual);
max_x_k = (fabs(_x_kp1[line]) > max_x_k ? fabs(_x_kp1[line]) : max_x_k);
}
residual = residual / max_x_k;
// transfer _x_kp1 into _x_k
double *tmp;
tmp = _x_k;
_x_k = _x_kp1;
_x_kp1 = tmp;
}
double t2 = utilidades::cpuSecond() - t1;
// printResult(iter, _x_k, nullptr, _b, matriz.getFilas());
return t2;
}
| 6809eb2eaef60e4bf03146e22163ea1bb41645f6.cu | #include "utilidades.h"
#include <jacobi.h>
#include <iostream>
#include "matrices_test.h"
#include <readers.h>
#include <SRJ_CUDA.h>
#include <SRJ.h>
#include <SRJ_OMP.h>
#include "tests.h"
#include "jacobiOmp.h"
#include "jacobiCuda.h"
using namespace std;
const double tolerance = 1.e-8;
void printResult(int k, const double *_x, const double *_y, const double *_b, int n) {
if (n > 1000) n = 100;
cout << "Vector x: ";
for (auto i = 0; i < n; i++) cout << _x[i] << " ";
cout << endl;
if (_y != nullptr) {
cout << "Vector y: ";
for (auto i = 0; i < n; i++) cout << _y[i] << " ";
cout << endl;
}
cout << "Vector b: ";
for (auto i = 0; i < n; i++) cout << _b[i] << " ";
cout << endl;
cout << "Iteraciones K: " << k << endl << endl;
}
double test_jacobi_CUDA(CSR matriz, double *_b, string _x0) {
int BLOCKSIZE = 256;
cout << "Test Jacobi CUDA *****///\n"
<< "BLOCKSIZE: " << BLOCKSIZE << endl;
double *_x_k = rhsVector_reader<double>(_x0, matriz.getFilas());
jacobi_CUDA test(matriz, vector<double>(_x_k, _x_k + matriz.getFilas()), BLOCKSIZE);
int iter = 0;
double residual = 1e36;
double t1 = utilidades::cpuSecond();
while (residual > tolerance) {
test.multiplicacionMV();
test.calculaResiduo(_b);
test.obtenerNuevaX();
residual = test.norma();
test.actualizaX();
// if (iter % 1000 == 0) cout << "residual: " << residual << endl;
iter++;
}
double t2 = utilidades::cpuSecond() - t1;
cout << "iter: " << iter << endl;
// printResult(iter, test.getX().data(), test.getY(), _b, matriz.getFilas());
return t2;
}
//}
double test_jacobi_OMP(CSR matriz, double *_b, string _x0) {
cout << "Test Jacobi OMP *****///\n";
double *_x_k = rhsVector_reader<double>(_x0, matriz.getFilas());
jacobi_OMP test(matriz, vector<double>(_x_k, _x_k + matriz.getFilas()));
int iter = 0;
double residual = 1e36;
double t1 = utilidades::cpuSecond();
while (residual > tolerance) {
test.multiplicacionMV();
test.calculaResiduo(_b);
test.obtenerNuevaX();
residual = test.norma();
test.actualizaX();
iter++;
}
double t2 = utilidades::cpuSecond() - t1;
cout << "iter: " << iter << endl;
// printResult(iter, test.getX().data(), test.getY(), _b, matriz.getFilas());
return t2;
}
double jacobi_secuencial(CSR matriz, double *_b, string _x0) {
cout << "Jacobi secuencial*****//" << endl;
double *_x_k = rhsVector_reader<double>(_x0, matriz.getFilas());
jacobi test(matriz, vector<double>(_x_k, _x_k + matriz.getFilas()));
double residual = 1.e36;
int iter = 0;
double t1 = utilidades::cpuSecond();
while (residual > tolerance) {
test.multiplicacionMV();
test.calculaResiduo(_b);
test.obtenerNuevaX();
residual = test.norma();
test.actualizaX();
iter++;
}
double t2 = utilidades::cpuSecond() - t1;
cout << "iter: " << iter << endl;
// printResult(iter, test.getX().data(), test.getY(), _b, matriz.getFilas());
return t2;
}
double srj_CUDA(CSR matriz, double *_b, string _x0, string _srjSch) {
cout << "SRJ CUDA*****//" << endl;
double *_x_k = rhsVector_reader<double>(_x0, matriz.getFilas());
auto srjSch = srjSch_reader<double>(_srjSch);
SRJ_CUDA test(matriz, srjSch, vector<double>(_x_k, _x_k + matriz.getFilas()));
double residual = 1.e36;
int iter = 0;
double t1 = utilidades::cpuSecond();
while (residual > tolerance) {
test.multiplicacionMV();
test.calculaResiduo(_b);
test.obtenerNuevaX();
residual = test.norma();
test.actualizaX();
iter++;
}
double t2 = utilidades::cpuSecond() - t1;
cout << "iter: " << iter << endl;
// printResult(iter, test.getX().data(), test.getY(), _b, matriz.getFilas());
return t2;
}
double srj_OMP(CSR matriz, double *_b, string _x0, string _srjSch) {
cout << "SRJ OMP*****//" << endl;
double *_x_k = rhsVector_reader<double>(_x0, matriz.getFilas());
auto srjSch = srjSch_reader<double>(_srjSch);
SRJ_OMP test(matriz, srjSch, vector<double>(_x_k, _x_k + matriz.getFilas()));
double residual = 1.e36;
int iter = 0;
double t1 = utilidades::cpuSecond();
while (residual > tolerance) {
test.multiplicacionMV();
test.calculaResiduo(_b);
test.obtenerNuevaX();
residual = test.norma();
test.actualizaX();
iter++;
}
double t2 = utilidades::cpuSecond() - t1;
cout << "iter: " << iter << endl;
// printResult(iter, test.getX().data(), test.getY(), _b, matriz.getFilas());
return t2;
}
double srj_secuencial(CSR matriz, double *_b, string _x0, string _srjSch) {
cout << "SRJ SECUENCIAL*****//" << endl;
double *_x_k = rhsVector_reader<double>(_x0, matriz.getFilas());
auto srjSch = srjSch_reader<double>(_srjSch);
SRJ test(matriz, srjSch, vector<double>(_x_k, _x_k + matriz.getFilas()));
double residual = 1.e36;
int iter = 0;
double t1 = utilidades::cpuSecond();
while (residual > tolerance) {
test.multiplicacionMV();
test.calculaResiduo(_b);
test.obtenerNuevaX();
residual = test.norma();
test.actualizaX();
iter++;
}
double t2 = utilidades::cpuSecond() - t1;
cout << "iter: " << iter << endl;
// printResult(iter, test.getX().data(), test.getY(), _b, matriz.getFilas());
return t2;
}
double SOR_OMP(CSR matriz, double *_b, string _x0, double omega) {
cout << "SOR OMP *****///" << endl;
cout << "omega: " << omega << endl;
double *_x_k = rhsVector_reader<double>(_x0, matriz.getFilas()); // Vector de entrada iteración
double *_residual_vec = new double[matriz.getFilas()]; // Vector de residuo
double residual = 1.e36;
int iter;
double t1 = utilidades::cpuSecond();
for (iter = 0; (residual > tolerance); ++iter) {
// Matrix-Vector Product Matrix_2d *x_k and compute residual vector
vector<double> x_km1(_x_k, _x_k + matriz.getFilas());
for (int line = 0; line < matriz.getFilas(); ++line) {
double sum = 0;
const unsigned int row_start = matriz.getRowPtr()[line];
const unsigned int row_end = matriz.getRowPtr()[line + 1];
for (unsigned int element = row_start; element < row_end; element++) {
if (line != matriz.getColInd()[element])
sum += matriz.getVal()[element] * _x_k[matriz.getColInd()[element]];
}
_x_k[line] = (1 - omega) * _x_k[line] +
(omega * (_b[line] - sum) / matriz.getDiagonal()[line]);
}
for (int line = 0; line < matriz.getFilas(); ++line) {
_residual_vec[line] = _x_k[line] - x_km1[line];
}
residual = utilidades::reduce_max_OMP(_residual_vec, matriz.getFilas());
}
double t2 = utilidades::cpuSecond() - t1;
cout << "iter: " << iter << endl;
// printResult(iter, _x_k, nullptr, _b, matriz.getFilas());
return t2;
}
double SOR_CUDA(CSR matriz, double *_b, string _x0, double omega) {
cout << "SOR CUDA *****///" << endl;
cout << "omega: " << omega << endl;
double *_x_k = rhsVector_reader<double>(_x0, matriz.getFilas()); // Vector de entrada iteración
double *_residual_vec = new double[matriz.getFilas()]; // Vector de residuo
double *_residual_vec_d;
cudaMalloc(&_residual_vec_d, matriz.getFilas() * sizeof(double));
double residual = 1.e36;
int iter;
double t1 = utilidades::cpuSecond();
for (iter = 0; (residual > tolerance); ++iter) {
// Matrix-Vector Product Matrix_2d *x_k and compute residual vector
vector<double> x_km1(_x_k, _x_k + matriz.getFilas());
for (int line = 0; line < matriz.getFilas(); ++line) {
double sum = 0;
const unsigned int row_start = matriz.getRowPtr()[line];
const unsigned int row_end = matriz.getRowPtr()[line + 1];
for (unsigned int element = row_start; element < row_end; element++) {
if (line != matriz.getColInd()[element])
sum += matriz.getVal()[element] * _x_k[matriz.getColInd()[element]];
}
_x_k[line] = (1 - omega) * _x_k[line] +
(omega * (_b[line] - sum) / matriz.getDiagonal()[line]);
}
for (int line = 0; line < matriz.getFilas(); ++line) {
_residual_vec[line] = _x_k[line] - x_km1[line];
}
cudaMemcpy(_residual_vec_d, _residual_vec, matriz.getFilas() * sizeof(double), cudaMemcpyHostToDevice);
residual = utilidades::reduce_max_CUDA(_residual_vec_d, matriz.getFilas(), 256);
}
double t2 = utilidades::cpuSecond() - t1;
cout << "iter: " << iter << endl;
// printResult(iter, _x_k, nullptr, _b, matriz.getFilas());
return t2;
}
double SOR(CSR matriz, double *_b, string _x0, double omega) {
cout << "SOR secuencial *****///" << endl;
cout << "omega: " << omega << endl;
double *_x_k = rhsVector_reader<double>(_x0, matriz.getFilas()); // Vector de entrada iteración
double *_residual_vec = new double[matriz.getFilas()]; // Vector de residuo
double residual = 1.e36;
int iter;
double t1 = utilidades::cpuSecond();
for (iter = 0; (residual > tolerance); ++iter) {
// Matrix-Vector Product Matrix_2d *x_k and compute residual vector
vector<double> x_km1(_x_k, _x_k + matriz.getFilas());
for (int line = 0; line < matriz.getFilas(); ++line) {
double sum = 0;
const unsigned int row_start = matriz.getRowPtr()[line];
const unsigned int row_end = matriz.getRowPtr()[line + 1];
for (unsigned int element = row_start; element < row_end; element++) {
if (line != matriz.getColInd()[element])
sum += matriz.getVal()[element] * _x_k[matriz.getColInd()[element]];
}
_x_k[line] = (1 - omega) * _x_k[line] +
(omega * (_b[line] - sum) / matriz.getDiagonal()[line]);
}
for (int line = 0; line < matriz.getFilas(); ++line) {
_residual_vec[line] = _x_k[line] - x_km1[line];
}
residual = utilidades::reduce_max_OMP(_residual_vec, matriz.getFilas());
}
double t2 = utilidades::cpuSecond() - t1;
cout << "iter: " << iter << endl;
// printResult(iter, _x_k, nullptr, _b, matriz.getFilas());
return t2;
}
void SOR_clasico() {
cout << "SOR *****///" << endl;
const int filas = 4;
auto matriz = m.m2;
double *_b = b._2;
double *_x_k = new double[filas]; // Vector de entrada iteración
double *_residual_vec = new double[filas]; // Vector de residuo
const double omega = 0.55;
double residual = 1.e36;
const double tolerance = 1.e-8;
for (int i = 0; i < filas; i++) _x_k[i] = 0;
int iter;
for (iter = 0; (residual > tolerance); ++iter) {
// Matrix-Vector Product Matrix_2d *x_k and compute residual vector
for (int i = 0; i < filas; ++i) {
double sigma = 0;
for (int j = 0; j < filas; j++) {
if (i != j)
sigma += matriz[i][j] * _x_k[j];
}
_x_k[i] = (1 - omega) * _x_k[i] + (omega * 1.0 / matriz[i][i]) * (_b[i] - sigma);
}
// Calculo residuo
for (int i = 0; i < filas; ++i) {
double sum = 0;
for (unsigned int j = 0; j < filas; j++) {
sum += matriz[i][j] * _x_k[j];
}
_residual_vec[i] = sum - _b[i];
}
double sum_cuadrados = 0;
for (size_t i = 0; i < filas; i++) {
sum_cuadrados += pow(_residual_vec[i], 2);
}
residual = sqrt(sum_cuadrados);
}
printResult(iter, _x_k, nullptr, _b, filas);
}
void jacobi_clasico() {
cout << "Jacobi clasico *****///" << endl;
const int filas = 8;
auto matriz = m.m4;
double *_b = b._4;
double *_x_k = new double[filas]; // Vector de entrada iteración
double *_x_kp1 = new double[filas]; // Vector de entrada iteración
double *_residual_vec = new double[filas]; // Vector de residuo
double residual = 1.e36;
const double tolerance = 1.e-8;
for (int i = 0; i < filas; i++) _x_k[i] = 1;
int iter;
for (iter = 0; residual > tolerance; iter++) {
// Matrix-Vector Product Matrix_2d *x_k and compute residual vector
for (int i = 0; i < filas; ++i) {
double sigma = 0;
for (int j = 0; j < filas; j++) {
if (i != j)
sigma += matriz[i][j] * _x_k[j];
}
_x_kp1[i] = (_b[i] - sigma) / matriz[i][i];
}
// Calculo residuo
for (int i = 0; i < filas; ++i) {
double sum = 0;
for (unsigned int j = 0; j < filas; j++) {
sum += matriz[i][j] * _x_kp1[j];
}
_residual_vec[i] = sum - _b[i];
}
double sum_cuadrados = 0;
for (size_t i = 0; i < filas; i++) {
sum_cuadrados += pow(_residual_vec[i], 2);
}
residual = sqrt(sum_cuadrados);
if (residual < tolerance) break;
double *tmp = _x_k;
_x_k = _x_kp1;
_x_kp1 = tmp;
}
printResult(iter, _x_k, nullptr, _b, filas);
}
void jacobi_secuencial_funcion(CSR matriz, double *_b, string _x0) {
cout << "Jacobi secuencial*****//" << endl;
double *_x_k = rhsVector_reader<double>(_x0, matriz.getFilas());
// if (matriz.isPrecondicionada())
// for (int i = 0; i < matriz.getFilas(); i++) _b[i] = _b[i] / matriz.getDiagonal()[i];
double *_x_kp1 = new double[matriz.getFilas()]; // Vector de salida iteración
double *_residual_vec = new double[matriz.getFilas()]; // Vector de residuo
double residual = 1.e36;
const double tolerance = 1.e-8;
// initial seed
// double *_b = new double[matriz.getFilas()];
// for (int i = 0; i < matriz.getFilas(); i++) _b[i] = 1;
// double *_x_k = new double[matriz.getFilas()];
// for (int i = 0; i < matriz.getFilas(); i++) _x_k[i] = 0;
int iter;
for (iter = 0; residual > tolerance; ++iter) {
// Matrix-Vector Product Matrix_2d *x_k and compute residual vector
for (int line = 0; line < matriz.getFilas(); ++line) {
double sum = 0;
const unsigned int row_start = matriz.getRowPtr()[line];
const unsigned int row_end = matriz.getRowPtr()[line + 1];
for (unsigned int element = row_start; element < row_end; element++) {
sum += matriz.getVal()[element] * _x_k[matriz.getColInd()[element]];
}
_residual_vec[line] = (_b[line] - sum) / matriz.getDiagonal()[line];
// vector for residual
_x_kp1[line] = _x_k[line] + _residual_vec[line];
}
// compute residual
residual = 0;
double max_x_k = 0;
for (int line = 0; line < matriz.getFilas(); ++line) {
residual = (fabs(_residual_vec[line]) > residual ? fabs(_residual_vec[line]) : residual);
max_x_k = (fabs(_x_kp1[line]) > max_x_k ? fabs(_x_kp1[line]) : max_x_k);
}
residual = residual / max_x_k;
// transfer _x_kp1 into _x_k
double *tmp;
tmp = _x_k;
_x_k = _x_kp1;
_x_kp1 = tmp;
}
printResult(iter, _x_k, nullptr, _b, matriz.getFilas());
}
double srj_secuencial_funcion(CSR matriz, double *_b, string _x0, string _srjSch) {
cout << "SRJ secuencial*****//" << endl;
double *_x_k = rhsVector_reader<double>(_x0, matriz.getFilas()); // Vector de entrada iteración
double *_x_kp1 = new double[matriz.getFilas()]; // Vector de salida iteración
double *_residual_vec = new double[matriz.getFilas()]; // Vector de residuo
auto srjSch = srjSch_reader<double>(_srjSch);
double residual = 1.e36;
const double tolerance = 1.e-8;
int iter;
double t1 = utilidades::cpuSecond();
for (iter = 0; residual > tolerance; ++iter) {
// Matrix-Vector Product Matrix_2d *x_k and compute residual vector
for (int line = 0; line < matriz.getFilas(); ++line) {
double sum = 0;
const unsigned int row_start = matriz.getRowPtr()[line];
const unsigned int row_end = matriz.getRowPtr()[line + 1];
for (unsigned int element = row_start; element < row_end; element++) {
sum += matriz.getVal()[element] * _x_k[matriz.getColInd()[element]];
}
_residual_vec[line] = (_b[line] - sum) / matriz.getDiagonal()[line];
// vector for residual
_x_kp1[line] = _x_k[line] + _residual_vec[line] * srjSch[iter % srjSch.size()];
}
// compute residual
residual = 0;
double max_x_k = 0;
for (int line = 0; line < matriz.getFilas(); ++line) {
residual = (fabs(_residual_vec[line]) > residual ? fabs(_residual_vec[line])
: residual);
max_x_k = (fabs(_x_kp1[line]) > max_x_k ? fabs(_x_kp1[line]) : max_x_k);
}
residual = residual / max_x_k;
// transfer _x_kp1 into _x_k
double *tmp;
tmp = _x_k;
_x_k = _x_kp1;
_x_kp1 = tmp;
}
double t2 = utilidades::cpuSecond() - t1;
// printResult(iter, _x_k, nullptr, _b, matriz.getFilas());
return t2;
}
|
ee52c583a4d4b3f73cc328676275fa2ee7446112.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <iostream>
#include <fstream>
#define ASIZE 256
#define PRIME 1000009
__global__ void processPattern(char* x ,int m, int shifts[]) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx >= m ) return;
char c = x[idx];
for( int i = m - 1; i >= idx; --i ) {
if ( x[i] == c ) {
shifts[c] = m - i;
return;
}
}
}
__global__ void compare(int idx,char *x, char *y, int m, int* results) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
// printf("%d\t%d\n",idx,id);
if(x[id]!=y[idx+id]) {
results[idx]=0;
return;
} else {
return;
}
}
__global__ void search(char *x, int m, char* y, int n, int shifts[], int indx[], int results[]) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx > (n - m) ) { results[idx] = 0; return; }
if ( indx[idx] != idx ) { results[idx] = 0; return; }
if(x[0]==y[idx] && x[m-1]==y[idx+m-1]) {
/*
if(idx>1000 && idx<1100) {
hipLaunchKernelGGL(( compare), dim3(1),dim3(m), 0, 0, idx);
}
*/
if(m>2)
hipLaunchKernelGGL(( compare), dim3(1),dim3(m), 0, 0, idx,x,y,m,results);
/*
for( int i = 0; i < m; ++i ) {
if ( x[i] != y[idx + i] ) {
results[idx] = 0;
return;
}
}
*/
} else {
results[idx] = 0;
}
}
char* readfile(const char* filename) {
FILE* f;
char* data;
f= fopen(filename, "r");
if ( f != NULL ) {
fseek(f,0,SEEK_END);
int size=ftell(f);
fseek(f,0,SEEK_SET);
data = (char*)malloc((size) * sizeof(char));
fread(data, size,1,f);
}
fclose(f);
return data;
}
void precomputeShiftIndx(char* y, int n, int m, int shifts[], int indx[]) {
int j = 0;
int limit = n - m;
while (j <= limit ) {
j += shifts[ y[j + m] ];
indx[j] = j;
}
}
void display_results(int n, int res[]) {
int j=0;
for( int i =0; i < n; ++i )
if ( res[i] == 1 ) {
j++;
// printf("%d\n",i);
}
printf("%d\n",j);
}
int main(int argc, char* argv[]) {
int cuda_device = 0;
size_t n = 0;
size_t m = 0;
if ( argc < 4 ) {
// printf("Usage: ./a.out <device number> <pattern> <data file>\n");
return -1;
}
if( argc > 1 )
cuda_device = atoi( argv[1] );
char* mainString = readfile(argv[3]);
char* subString = (char*) malloc( (strlen(argv[2])) * sizeof(char) );
strcpy(subString, argv[2]);
n = strlen(mainString);
m = strlen(subString);
int* results=(int*)malloc(n * sizeof(int));
int* l_shifts = (int*)malloc( ASIZE * sizeof(int) );
for( int i = 0; i < ASIZE; ++i )
l_shifts[i] = m + 1;
int* l_indx = (int*) malloc( n * sizeof(int) );
for( int i = 0; i < n; ++i ) {
l_indx[i] = -1;
results[i]=1;
}
l_indx[0]=0;
// hipError_t error;
hipEvent_t start_event, stop_event;
float time1, time2;
checkCudaErrors( hipEventCreate(&start_event) );
checkCudaErrors( hipEventCreate(&stop_event) );
int num_devices=0;
checkCudaErrors( hipGetDeviceCount(&num_devices) );
if(0==num_devices)
{
// printf("Your system does not have a CUDA capable device\n");
return 1;
}
/*
if( cuda_device >= num_devices )
{
if(num_devices==0)
// printf("You have only 1 device and it's id is 0\n");
else
// printf("choose device ID between 0 and %d\n", num_devices-1);
return 1;
}
*/
hipDeviceProp_t deviceProp;
checkCudaErrors( hipGetDeviceProperties(&deviceProp, cuda_device) );
// if( (1 == deviceProp.major) && (deviceProp.minor < 1))
// printf("%s does not have compute capability 1.1 or later\n", deviceProp.name);
// printf("Device name : %s\n", deviceProp.name );
// printf("CUDA Capable SM %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// printf("array_size = %zd\n", n);
char* d_substr = 0;
int* d_shifts = 0;
int* d_indx = 0;
char* d_text = 0;
int* d_results = 0;
checkCudaErrors( hipMalloc((void**)&d_shifts, sizeof(int)*ASIZE));
checkCudaErrors( hipMalloc((void**)&d_indx, n * sizeof(int)) );
checkCudaErrors( hipMalloc((void**)&d_results, n * sizeof(int)) );
checkCudaErrors( hipMalloc((void**)&d_substr, (m)*sizeof(char)) );
checkCudaErrors( hipMalloc((void**)&d_text, (strlen(mainString))*sizeof(char)) );
checkCudaErrors( hipMemcpy(d_shifts, l_shifts, sizeof(int) * ASIZE, hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy(d_results, results, sizeof(int) * n, hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy(d_text, mainString, sizeof(char)*(strlen(mainString)), hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy(d_substr, subString, sizeof(char)*(m), hipMemcpyHostToDevice) );
// error = hipGetLastError();
// printf("%s\n", hipGetErrorString(error));
dim3 threadsPerBlocks(ASIZE, 1);
int t = m / threadsPerBlocks.x;
int t1 = m % threadsPerBlocks.x;
if ( t1 != 0 ) t += 1;
dim3 numBlocks(t,1);
// printf("Launching kernel with blocks=%d, threadsperblock=%d\n", numBlocks.x, threadsPerBlocks.x);
hipEventRecord(start_event, 0);
hipLaunchKernelGGL(( processPattern), dim3(numBlocks),dim3(threadsPerBlocks), 0, 0, d_substr, m, d_shifts);
hipDeviceSynchronize();
hipEventRecord(stop_event, 0);
hipEventSynchronize( stop_event );
hipEventElapsedTime( &time1, start_event, stop_event );
checkCudaErrors( hipMemcpy(l_shifts, d_shifts, sizeof(int) * ASIZE, hipMemcpyDeviceToHost ) );
precomputeShiftIndx(mainString , n, m, l_shifts, l_indx);
checkCudaErrors( hipMemcpy(d_indx, l_indx, n * sizeof(int), hipMemcpyHostToDevice) );
/*
// For debugging
for( int i = 0; i < ASIZE; ++i )
printf("%d\t",l_shifts[i]);
printf("\n\n");
for( int i = 0; i < n; ++i )
printf("%d\t",l_indx[i]);
printf("\n\n");
printf("%zd\t%zd",n,m);
printf("\n\n");
*/
t = n / threadsPerBlocks.x;
t1 = n % threadsPerBlocks.x;
if ( t1 != 0 ) t += 1;
dim3 numBlocks2(t, 1);
// printf("Launching kernel with blocks=%d, threadsperblock=%d\n", numBlocks2.x, threadsPerBlocks.x);
hipEventRecord(start_event, 0);
hipLaunchKernelGGL(( search), dim3(numBlocks2),dim3(threadsPerBlocks), 0, 0, d_substr, m, d_text, n, d_shifts, d_indx, d_results);
hipDeviceSynchronize();
hipEventRecord(stop_event, 0);
hipEventSynchronize( stop_event );
hipEventElapsedTime( &time2, start_event, stop_event );
hipEventDestroy( start_event );
hipEventDestroy( stop_event );
// printf("%f+%f=%f milliseconds\t",time1, time2, time1+time2);
printf("%f\t",time1+time2);
checkCudaErrors( hipMemcpy(results, d_results, n * sizeof(int), hipMemcpyDeviceToHost) );
display_results(n, results);
hipFree(d_substr);
hipFree(d_shifts);
hipFree(d_indx);
hipFree(d_text);
hipFree(d_results);
// free(mainString);
free(subString);
free(l_indx);
free(l_shifts);
free(results);
hipDeviceReset();
}
| ee52c583a4d4b3f73cc328676275fa2ee7446112.cu | #include <cuda.h>
#include <helper_cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <iostream>
#include <fstream>
#define ASIZE 256
#define PRIME 1000009
__global__ void processPattern(char* x ,int m, int shifts[]) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx >= m ) return;
char c = x[idx];
for( int i = m - 1; i >= idx; --i ) {
if ( x[i] == c ) {
shifts[c] = m - i;
return;
}
}
}
__global__ void compare(int idx,char *x, char *y, int m, int* results) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
// printf("%d\t%d\n",idx,id);
if(x[id]!=y[idx+id]) {
results[idx]=0;
return;
} else {
return;
}
}
__global__ void search(char *x, int m, char* y, int n, int shifts[], int indx[], int results[]) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx > (n - m) ) { results[idx] = 0; return; }
if ( indx[idx] != idx ) { results[idx] = 0; return; }
if(x[0]==y[idx] && x[m-1]==y[idx+m-1]) {
/*
if(idx>1000 && idx<1100) {
compare<<<1,m>>>(idx);
}
*/
if(m>2)
compare<<<1,m>>>(idx,x,y,m,results);
/*
for( int i = 0; i < m; ++i ) {
if ( x[i] != y[idx + i] ) {
results[idx] = 0;
return;
}
}
*/
} else {
results[idx] = 0;
}
}
char* readfile(const char* filename) {
FILE* f;
char* data;
f= fopen(filename, "r");
if ( f != NULL ) {
fseek(f,0,SEEK_END);
int size=ftell(f);
fseek(f,0,SEEK_SET);
data = (char*)malloc((size) * sizeof(char));
fread(data, size,1,f);
}
fclose(f);
return data;
}
void precomputeShiftIndx(char* y, int n, int m, int shifts[], int indx[]) {
int j = 0;
int limit = n - m;
while (j <= limit ) {
j += shifts[ y[j + m] ];
indx[j] = j;
}
}
void display_results(int n, int res[]) {
int j=0;
for( int i =0; i < n; ++i )
if ( res[i] == 1 ) {
j++;
// printf("%d\n",i);
}
printf("%d\n",j);
}
int main(int argc, char* argv[]) {
int cuda_device = 0;
size_t n = 0;
size_t m = 0;
if ( argc < 4 ) {
// printf("Usage: ./a.out <device number> <pattern> <data file>\n");
return -1;
}
if( argc > 1 )
cuda_device = atoi( argv[1] );
char* mainString = readfile(argv[3]);
char* subString = (char*) malloc( (strlen(argv[2])) * sizeof(char) );
strcpy(subString, argv[2]);
n = strlen(mainString);
m = strlen(subString);
int* results=(int*)malloc(n * sizeof(int));
int* l_shifts = (int*)malloc( ASIZE * sizeof(int) );
for( int i = 0; i < ASIZE; ++i )
l_shifts[i] = m + 1;
int* l_indx = (int*) malloc( n * sizeof(int) );
for( int i = 0; i < n; ++i ) {
l_indx[i] = -1;
results[i]=1;
}
l_indx[0]=0;
// cudaError_t error;
cudaEvent_t start_event, stop_event;
float time1, time2;
checkCudaErrors( cudaEventCreate(&start_event) );
checkCudaErrors( cudaEventCreate(&stop_event) );
int num_devices=0;
checkCudaErrors( cudaGetDeviceCount(&num_devices) );
if(0==num_devices)
{
// printf("Your system does not have a CUDA capable device\n");
return 1;
}
/*
if( cuda_device >= num_devices )
{
if(num_devices==0)
// printf("You have only 1 device and it's id is 0\n");
else
// printf("choose device ID between 0 and %d\n", num_devices-1);
return 1;
}
*/
cudaDeviceProp deviceProp;
checkCudaErrors( cudaGetDeviceProperties(&deviceProp, cuda_device) );
// if( (1 == deviceProp.major) && (deviceProp.minor < 1))
// printf("%s does not have compute capability 1.1 or later\n", deviceProp.name);
// printf("Device name : %s\n", deviceProp.name );
// printf("CUDA Capable SM %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// printf("array_size = %zd\n", n);
char* d_substr = 0;
int* d_shifts = 0;
int* d_indx = 0;
char* d_text = 0;
int* d_results = 0;
checkCudaErrors( cudaMalloc((void**)&d_shifts, sizeof(int)*ASIZE));
checkCudaErrors( cudaMalloc((void**)&d_indx, n * sizeof(int)) );
checkCudaErrors( cudaMalloc((void**)&d_results, n * sizeof(int)) );
checkCudaErrors( cudaMalloc((void**)&d_substr, (m)*sizeof(char)) );
checkCudaErrors( cudaMalloc((void**)&d_text, (strlen(mainString))*sizeof(char)) );
checkCudaErrors( cudaMemcpy(d_shifts, l_shifts, sizeof(int) * ASIZE, cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy(d_results, results, sizeof(int) * n, cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy(d_text, mainString, sizeof(char)*(strlen(mainString)), cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy(d_substr, subString, sizeof(char)*(m), cudaMemcpyHostToDevice) );
// error = cudaGetLastError();
// printf("%s\n", cudaGetErrorString(error));
dim3 threadsPerBlocks(ASIZE, 1);
int t = m / threadsPerBlocks.x;
int t1 = m % threadsPerBlocks.x;
if ( t1 != 0 ) t += 1;
dim3 numBlocks(t,1);
// printf("Launching kernel with blocks=%d, threadsperblock=%d\n", numBlocks.x, threadsPerBlocks.x);
cudaEventRecord(start_event, 0);
processPattern<<<numBlocks,threadsPerBlocks>>>(d_substr, m, d_shifts);
cudaThreadSynchronize();
cudaEventRecord(stop_event, 0);
cudaEventSynchronize( stop_event );
cudaEventElapsedTime( &time1, start_event, stop_event );
checkCudaErrors( cudaMemcpy(l_shifts, d_shifts, sizeof(int) * ASIZE, cudaMemcpyDeviceToHost ) );
precomputeShiftIndx(mainString , n, m, l_shifts, l_indx);
checkCudaErrors( cudaMemcpy(d_indx, l_indx, n * sizeof(int), cudaMemcpyHostToDevice) );
/*
// For debugging
for( int i = 0; i < ASIZE; ++i )
printf("%d\t",l_shifts[i]);
printf("\n\n");
for( int i = 0; i < n; ++i )
printf("%d\t",l_indx[i]);
printf("\n\n");
printf("%zd\t%zd",n,m);
printf("\n\n");
*/
t = n / threadsPerBlocks.x;
t1 = n % threadsPerBlocks.x;
if ( t1 != 0 ) t += 1;
dim3 numBlocks2(t, 1);
// printf("Launching kernel with blocks=%d, threadsperblock=%d\n", numBlocks2.x, threadsPerBlocks.x);
cudaEventRecord(start_event, 0);
search<<<numBlocks2,threadsPerBlocks>>>(d_substr, m, d_text, n, d_shifts, d_indx, d_results);
cudaThreadSynchronize();
cudaEventRecord(stop_event, 0);
cudaEventSynchronize( stop_event );
cudaEventElapsedTime( &time2, start_event, stop_event );
cudaEventDestroy( start_event );
cudaEventDestroy( stop_event );
// printf("%f+%f=%f milliseconds\t",time1, time2, time1+time2);
printf("%f\t",time1+time2);
checkCudaErrors( cudaMemcpy(results, d_results, n * sizeof(int), cudaMemcpyDeviceToHost) );
display_results(n, results);
cudaFree(d_substr);
cudaFree(d_shifts);
cudaFree(d_indx);
cudaFree(d_text);
cudaFree(d_results);
// free(mainString);
free(subString);
free(l_indx);
free(l_shifts);
free(results);
cudaThreadExit();
}
|
e6dd5cb069837ef1059eeb0f422200ddd50c7e5c.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
//CUDA V 10.2
#include <math.h>
#include <algorithm>
#include "hip/hip_runtime.h"
#include "hip/device_functions.h"
#include "device_launch_parameters.h"
#include "rocblas.h"
extern "C"
int testCUDACPP(int a,int b) {
int c = a + b;
return c;
}
__global__ void EUGPU_square(float *desGPU1MinusDesGPU2, float *desGPU1MinusDesGPU2Square)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
desGPU1MinusDesGPU2Square[i] = desGPU1MinusDesGPU2[i] * desGPU1MinusDesGPU2[i];
}
__global__ void EUGPU_minus(float *desGPU1, float *desGPU2, float *desGPU1MinusDesGPU2)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
desGPU1MinusDesGPU2[i] = desGPU1[i] - desGPU2[i];
}
extern "C"
float computeEuclideanDistance
(
const unsigned char * descriptionData2,
const unsigned char * descriptionData1,
size_t size
)
{
float * descriptionDataFloat1 = (float *)malloc(sizeof(float) * size);
float * descriptionDataFloat2 = (float *)malloc(sizeof(float) * size);
for (int i = 0; i < size; i++) {
descriptionDataFloat1[i] = static_cast<float>(descriptionData1[i]);
descriptionDataFloat2[i] = static_cast<float>(descriptionData2[i]);
}
float *desGPU1;
float *desGPU2;
hipMalloc((void**)&desGPU1, sizeof(float) * size);
hipMalloc((void**)&desGPU2, sizeof(float) * size);
hipMemcpy(desGPU1, descriptionDataFloat1, sizeof(float) * size, hipMemcpyHostToDevice);
hipMemcpy(desGPU2, descriptionDataFloat2, sizeof(float) * size, hipMemcpyHostToDevice);
/*dim3 dimBlock(size);
dim3 dimGrid(1);*/
dim3 dimBlock(1024);
dim3 dimGrid(256);
float *resultGPU;
hipMalloc((void**)&resultGPU, sizeof(float));
float *desGPU1MinusDesGPU2;
hipMalloc((void **)&desGPU1MinusDesGPU2, sizeof(float) * size);
float *desGPU1MinusDesGPU2Square;
hipMalloc((void **)&desGPU1MinusDesGPU2Square, sizeof(float) * size);
//kernel
hipDeviceSynchronize();
EUGPU_minus << <dimGrid, dimBlock >> > (desGPU1, desGPU2, desGPU1MinusDesGPU2);
hipDeviceSynchronize();
EUGPU_square << <dimGrid, dimBlock >> > (desGPU1MinusDesGPU2, desGPU1MinusDesGPU2Square);
float *desGPU1MinusDesGPU2SquareCPU = (float *)malloc(sizeof(float) * size);
hipMemcpy(desGPU1MinusDesGPU2SquareCPU, desGPU1MinusDesGPU2Square, sizeof(float) * size, hipMemcpyDeviceToHost);
float result = 0.0;
for (int i = 0; i < size; i++) {
result += desGPU1MinusDesGPU2SquareCPU[i];
}
free(desGPU1MinusDesGPU2SquareCPU);
hipFree(resultGPU);
hipFree(desGPU1MinusDesGPU2);
hipFree(desGPU1MinusDesGPU2Square);
hipFree(desGPU1);
hipFree(desGPU2);
free(descriptionDataFloat1);
free(descriptionDataFloat2);
return result;
}
| e6dd5cb069837ef1059eeb0f422200ddd50c7e5c.cu | #include <iostream>
//CUDA V 10.2
#include <math.h>
#include <algorithm>
#include "cuda_runtime.h"
#include "device_functions.h"
#include "device_launch_parameters.h"
#include "cublas_v2.h"
extern "C"
int testCUDACPP(int a,int b) {
int c = a + b;
return c;
}
__global__ void EUGPU_square(float *desGPU1MinusDesGPU2, float *desGPU1MinusDesGPU2Square)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
desGPU1MinusDesGPU2Square[i] = desGPU1MinusDesGPU2[i] * desGPU1MinusDesGPU2[i];
}
__global__ void EUGPU_minus(float *desGPU1, float *desGPU2, float *desGPU1MinusDesGPU2)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
desGPU1MinusDesGPU2[i] = desGPU1[i] - desGPU2[i];
}
extern "C"
float computeEuclideanDistance
(
const unsigned char * descriptionData2,
const unsigned char * descriptionData1,
size_t size
)
{
float * descriptionDataFloat1 = (float *)malloc(sizeof(float) * size);
float * descriptionDataFloat2 = (float *)malloc(sizeof(float) * size);
for (int i = 0; i < size; i++) {
descriptionDataFloat1[i] = static_cast<float>(descriptionData1[i]);
descriptionDataFloat2[i] = static_cast<float>(descriptionData2[i]);
}
float *desGPU1;
float *desGPU2;
cudaMalloc((void**)&desGPU1, sizeof(float) * size);
cudaMalloc((void**)&desGPU2, sizeof(float) * size);
cudaMemcpy(desGPU1, descriptionDataFloat1, sizeof(float) * size, cudaMemcpyHostToDevice);
cudaMemcpy(desGPU2, descriptionDataFloat2, sizeof(float) * size, cudaMemcpyHostToDevice);
/*dim3 dimBlock(size);
dim3 dimGrid(1);*/
dim3 dimBlock(1024);
dim3 dimGrid(256);
float *resultGPU;
cudaMalloc((void**)&resultGPU, sizeof(float));
float *desGPU1MinusDesGPU2;
cudaMalloc((void **)&desGPU1MinusDesGPU2, sizeof(float) * size);
float *desGPU1MinusDesGPU2Square;
cudaMalloc((void **)&desGPU1MinusDesGPU2Square, sizeof(float) * size);
//÷ī––kernel
cudaThreadSynchronize();
EUGPU_minus << <dimGrid, dimBlock >> > (desGPU1, desGPU2, desGPU1MinusDesGPU2);
cudaThreadSynchronize();
EUGPU_square << <dimGrid, dimBlock >> > (desGPU1MinusDesGPU2, desGPU1MinusDesGPU2Square);
float *desGPU1MinusDesGPU2SquareCPU = (float *)malloc(sizeof(float) * size);
cudaMemcpy(desGPU1MinusDesGPU2SquareCPU, desGPU1MinusDesGPU2Square, sizeof(float) * size, cudaMemcpyDeviceToHost);
float result = 0.0;
for (int i = 0; i < size; i++) {
result += desGPU1MinusDesGPU2SquareCPU[i];
}
free(desGPU1MinusDesGPU2SquareCPU);
cudaFree(resultGPU);
cudaFree(desGPU1MinusDesGPU2);
cudaFree(desGPU1MinusDesGPU2Square);
cudaFree(desGPU1);
cudaFree(desGPU2);
free(descriptionDataFloat1);
free(descriptionDataFloat2);
return result;
}
|
0b6001f553fe1b1d760b1b83aef9a0f8e94bec24.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/bucketize_op.h"
#include <thrust/binary_search.h>
#include <thrust/device_vector.h>
namespace caffe2 {
__global__ void BucketizeOpKernel(
const int N,
const int M,
const float* bounds,
const float* X,
int32_t* out) {
CUDA_1D_KERNEL_LOOP(i, N) {
int32_t low = -1, high = M;
while (high - low > 1) {
const int32_t median = low + (high - low) / 2;
if (bounds[median] < X[i]) {
low = median;
} else {
high = median;
}
}
out[i] = high;
}
}
template <>
bool BucketizeOp<CUDAContext>::RunOnDevice() {
auto& input = Input(X);
CAFFE_ENFORCE_GE(input.dim(), 1);
auto N = input.numel();
auto* output = Output(INDICES, input.sizes(), at::dtype<int32_t>());
const auto* input_data = input.template data<float>();
auto* output_data = output->template mutable_data<int32_t>();
hipLaunchKernelGGL(( BucketizeOpKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
boundaries_device_.numel(),
boundaries_device_.data<float>(),
input_data,
output_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
};
REGISTER_CUDA_OPERATOR(Bucketize, BucketizeOp<CUDAContext>);
} // namespace caffe2
using BucketizeCUDA = caffe2::BucketizeOp<caffe2::CUDAContext>;
C10_EXPORT_CAFFE2_OP_TO_C10_CUDA(
Bucketize,
BucketizeCUDA);
| 0b6001f553fe1b1d760b1b83aef9a0f8e94bec24.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/operators/bucketize_op.h"
#include <thrust/binary_search.h>
#include <thrust/device_vector.h>
namespace caffe2 {
__global__ void BucketizeOpKernel(
const int N,
const int M,
const float* bounds,
const float* X,
int32_t* out) {
CUDA_1D_KERNEL_LOOP(i, N) {
int32_t low = -1, high = M;
while (high - low > 1) {
const int32_t median = low + (high - low) / 2;
if (bounds[median] < X[i]) {
low = median;
} else {
high = median;
}
}
out[i] = high;
}
}
template <>
bool BucketizeOp<CUDAContext>::RunOnDevice() {
auto& input = Input(X);
CAFFE_ENFORCE_GE(input.dim(), 1);
auto N = input.numel();
auto* output = Output(INDICES, input.sizes(), at::dtype<int32_t>());
const auto* input_data = input.template data<float>();
auto* output_data = output->template mutable_data<int32_t>();
BucketizeOpKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
boundaries_device_.numel(),
boundaries_device_.data<float>(),
input_data,
output_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
};
REGISTER_CUDA_OPERATOR(Bucketize, BucketizeOp<CUDAContext>);
} // namespace caffe2
using BucketizeCUDA = caffe2::BucketizeOp<caffe2::CUDAContext>;
C10_EXPORT_CAFFE2_OP_TO_C10_CUDA(
Bucketize,
BucketizeCUDA);
|
5d1ba834ce50b1ae6120d342f5c09bf24788ec04.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "SHelloWorld.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
SHelloWorld), dim3(gridBlock),dim3(threadBlock), 0, 0, );
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
SHelloWorld), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
SHelloWorld), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 5d1ba834ce50b1ae6120d342f5c09bf24788ec04.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "SHelloWorld.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
SHelloWorld<<<gridBlock,threadBlock>>>();
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
SHelloWorld<<<gridBlock,threadBlock>>>();
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
SHelloWorld<<<gridBlock,threadBlock>>>();
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
466cf72515232d12a30f758df4bf8ca564550a89.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <thrust\device_vector.h>
#include <thrust/extrema.h>
#include <thrust/system/hip/execution_policy.h>
struct subject
{
float fitness;
int index;
};
extern "C"
{
__declspec(dllexport)
int Sum(void* p, int size) {
int* toSum = (int*)p;
thrust::device_ptr<int> d(toSum);
return thrust::reduce(d, d + size);
}
__declspec(dllexport)
float Avragei(int* p, int size) {
thrust::device_ptr<int> devicePtr(p);
return thrust::reduce(devicePtr, devicePtr + size)/(float)size;
}
__declspec(dllexport)
float Avragef(float* p, int size) {
thrust::device_ptr<float> devicePtr(p);
return thrust::reduce(devicePtr, devicePtr + size) / (float)size;
}
__declspec(dllexport)
float Maxf(float* p, int size) {
thrust::device_ptr<float> devicePtr(p);
return *thrust::max_element(devicePtr, devicePtr + size);
}
__declspec(dllexport)
int Maxi(int* p, int size) {
thrust::device_ptr<int> devicePtr(p);
return *thrust::max_element(devicePtr, devicePtr + size);
}
__declspec(dllexport)
int Minf(float* p, int size) {
thrust::device_ptr<float> devicePtr(p);
return *thrust::min_element(devicePtr, devicePtr + size);
}
__declspec(dllexport)
int Mini(int* p, int size) {
thrust::device_ptr<int> devicePtr(p);
return *thrust::min_element(devicePtr, devicePtr + size);
}
__declspec(dllexport)
void sort_by_key(float* keys,int* values,int size) {
thrust::device_ptr<float> deviceKeys(keys);
thrust::device_ptr<int> deviceValues(values);
thrust::sort_by_key(deviceKeys, deviceKeys + size, deviceValues,thrust::less<float>());
}
__declspec(dllexport)
void sort_by_keyDesc(float* keys, int* values, int size) {
thrust::device_ptr<float> deviceKeys(keys);
thrust::device_ptr<int> deviceValues(values);
thrust::sort_by_key(deviceKeys, deviceKeys + size, deviceValues, thrust::greater<float>());
}
__declspec(dllexport)
void sequence(int* values,int size) {
thrust::device_ptr<int> deviceValues(values);
thrust::sequence(deviceValues, deviceValues + size);
}
__declspec(dllexport)
void sort_by_key_multiple(float* keys, int* values,int col,int row) {
thrust::device_ptr<float> deviceKeys(keys);
thrust::device_ptr<int> deviceValues(values);
std::vector<hipStream_t> streams;
streams.resize(row);
for (int i = 0; i < row; i++)
{
hipStreamCreate(&streams[i]);
}
for (int i = 0; i < row; i++)
{
const auto currentKeys = deviceKeys + i*col;
const auto currentValues = deviceValues+ i*col;
thrust::sort_by_key(thrust::hip::par.on(streams[i]), currentKeys, currentKeys + col, currentValues);
}
for (auto& stream : streams) {
hipStreamSynchronize(stream);
}
for (auto& stream : streams) {
hipStreamDestroy(stream);
}
}
__declspec(dllexport)
subject FindFitest(
float* fitnesses,
int* sortedIndeces,
int size
) {
subject s;
hipMemcpy(&s.index, sortedIndeces+size-1, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(&s.fitness, fitnesses+size-1, sizeof(float), hipMemcpyDeviceToHost);
return s;
}
} | 466cf72515232d12a30f758df4bf8ca564550a89.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <thrust\device_vector.h>
#include <thrust/extrema.h>
#include <thrust/system/cuda/execution_policy.h>
struct subject
{
float fitness;
int index;
};
extern "C"
{
__declspec(dllexport)
int Sum(void* p, int size) {
int* toSum = (int*)p;
thrust::device_ptr<int> d(toSum);
return thrust::reduce(d, d + size);
}
__declspec(dllexport)
float Avragei(int* p, int size) {
thrust::device_ptr<int> devicePtr(p);
return thrust::reduce(devicePtr, devicePtr + size)/(float)size;
}
__declspec(dllexport)
float Avragef(float* p, int size) {
thrust::device_ptr<float> devicePtr(p);
return thrust::reduce(devicePtr, devicePtr + size) / (float)size;
}
__declspec(dllexport)
float Maxf(float* p, int size) {
thrust::device_ptr<float> devicePtr(p);
return *thrust::max_element(devicePtr, devicePtr + size);
}
__declspec(dllexport)
int Maxi(int* p, int size) {
thrust::device_ptr<int> devicePtr(p);
return *thrust::max_element(devicePtr, devicePtr + size);
}
__declspec(dllexport)
int Minf(float* p, int size) {
thrust::device_ptr<float> devicePtr(p);
return *thrust::min_element(devicePtr, devicePtr + size);
}
__declspec(dllexport)
int Mini(int* p, int size) {
thrust::device_ptr<int> devicePtr(p);
return *thrust::min_element(devicePtr, devicePtr + size);
}
__declspec(dllexport)
void sort_by_key(float* keys,int* values,int size) {
thrust::device_ptr<float> deviceKeys(keys);
thrust::device_ptr<int> deviceValues(values);
thrust::sort_by_key(deviceKeys, deviceKeys + size, deviceValues,thrust::less<float>());
}
__declspec(dllexport)
void sort_by_keyDesc(float* keys, int* values, int size) {
thrust::device_ptr<float> deviceKeys(keys);
thrust::device_ptr<int> deviceValues(values);
thrust::sort_by_key(deviceKeys, deviceKeys + size, deviceValues, thrust::greater<float>());
}
__declspec(dllexport)
void sequence(int* values,int size) {
thrust::device_ptr<int> deviceValues(values);
thrust::sequence(deviceValues, deviceValues + size);
}
__declspec(dllexport)
void sort_by_key_multiple(float* keys, int* values,int col,int row) {
thrust::device_ptr<float> deviceKeys(keys);
thrust::device_ptr<int> deviceValues(values);
std::vector<cudaStream_t> streams;
streams.resize(row);
for (int i = 0; i < row; i++)
{
cudaStreamCreate(&streams[i]);
}
for (int i = 0; i < row; i++)
{
const auto currentKeys = deviceKeys + i*col;
const auto currentValues = deviceValues+ i*col;
thrust::sort_by_key(thrust::cuda::par.on(streams[i]), currentKeys, currentKeys + col, currentValues);
}
for (auto& stream : streams) {
cudaStreamSynchronize(stream);
}
for (auto& stream : streams) {
cudaStreamDestroy(stream);
}
}
__declspec(dllexport)
subject FindFitest(
float* fitnesses,
int* sortedIndeces,
int size
) {
subject s;
cudaMemcpy(&s.index, sortedIndeces+size-1, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&s.fitness, fitnesses+size-1, sizeof(float), cudaMemcpyDeviceToHost);
return s;
}
} |
47cd49327384465252d8790bf41f17cce8edb490.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__device__ int _logarithm_2(int m)
{
int count = 0;
while (m != 1)
{
m = m/2;
count++;
}
return count;
}
__global__ void kernel_compute_gpu(int n, int nb_iters, int *T)
{
int iter;
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < n)
for(iter=0; iter<nb_iters; iter++)
T[id] = _logarithm_2(T[id]*T[id]+T[id]+iter);
}
| 47cd49327384465252d8790bf41f17cce8edb490.cu | __device__ int _logarithm_2(int m)
{
int count = 0;
while (m != 1)
{
m = m/2;
count++;
}
return count;
}
__global__ void kernel_compute_gpu(int n, int nb_iters, int *T)
{
int iter;
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < n)
for(iter=0; iter<nb_iters; iter++)
T[id] = _logarithm_2(T[id]*T[id]+T[id]+iter);
}
|
accab4736bca13927aff6623155b6ce7ebcfd7de.hip | // !!! This is a file automatically generated by hipify!!!
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_matrix_mul_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl"
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>;
using Gemm = cutlass::gemm::device::GemmSplitKParallel<
float, LayoutA,
float, LayoutB,
float, cutlass::layout::RowMajor, float,
cutlass::arch::OpClassSimt, cutlass::arch::Sm50,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp>;
template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>(
const typename Gemm::ElementA* d_A, size_t lda,
const typename Gemm::ElementB* d_B, size_t ldb,
typename Gemm::ElementC* d_C, size_t ldc,
int* workspace,
cutlass::gemm::GemmCoord const& problem_size,
typename Gemm::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream, int split_k_slices);
#pragma GCC diagnostic pop
#endif
| accab4736bca13927aff6623155b6ce7ebcfd7de.cu | #if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_matrix_mul_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl"
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>;
using Gemm = cutlass::gemm::device::GemmSplitKParallel<
float, LayoutA,
float, LayoutB,
float, cutlass::layout::RowMajor, float,
cutlass::arch::OpClassSimt, cutlass::arch::Sm50,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp>;
template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>(
const typename Gemm::ElementA* d_A, size_t lda,
const typename Gemm::ElementB* d_B, size_t ldb,
typename Gemm::ElementC* d_C, size_t ldc,
int* workspace,
cutlass::gemm::GemmCoord const& problem_size,
typename Gemm::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream, int split_k_slices);
#pragma GCC diagnostic pop
#endif
|
476d82f48040603fe8bf8ea6ad8d939b97687238.hip | // !!! This is a file automatically generated by hipify!!!
/*
** Code to implement a d2q9-bgk lattice boltzmann scheme.
** 'd2' inidates a 2-dimensional grid, and
** 'q9' indicates 9 velocities per grid cell.
** 'bgk' refers to the Bhatnagar-Gross-Krook collision step.
**
** The 'speeds' in each cell are numbered as follows:
**
** 6 2 5
** \|/
** 3-0-1
** /|\
** 7 4 8
**
** A 2D grid:
**
** cols
** --- --- ---
** | D | E | F |
** rows --- --- ---
** | A | B | C |
** --- --- ---
**
** 'unwrapped' in row major order to give a 1D array:
**
** --- --- --- --- --- ---
** | A | B | C | D | E | F |
** --- --- --- --- --- ---
**
** Grid indicies are:
**
** ny
** ^ cols(ii)
** | ----- ----- -----
** | | ... | ... | etc |
** | ----- ----- -----
** rows(jj) | | 1,0 | 1,1 | 1,2 |
** | ----- ----- -----
** | | 0,0 | 0,1 | 0,2 |
** | ----- ----- -----
** ----------------------> nx
**
** Note the names of the input parameter and obstacle files
** are passed on the command line, e.g.:
**
** ./d2q9-bgk input.params obstacles.dat
**
** Be sure to adjust the grid dimensions in the parameter file
** if you choose a different obstacle file.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <string.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#define WARMUPS 1000
#define NSPEEDS 9
#define LOCALSIZEX 128
#define LOCALSIZEY 1
/* dump output files for verification */
#define FINALSTATEFILE "final_state.dat"
#define AVVELSFILE "av_vels.dat"
/* struct to hold the parameter values */
typedef struct
{
int nx; /* no. of cells in x-direction */
int ny; /* no. of cells in y-direction */
int maxIters; /* no. of iterations */
int reynolds_dim; /* dimension for Reynolds number */
float density; /* density per link */
float accel; /* density redistribution */
float omega; /* relaxation parameter */
} t_param;
/* struct to hold the 'speed' values */
typedef struct
{
float speeds[NSPEEDS];
} t_speed;
/*
** function prototypes
*/
/* load params, allocate memory, load obstacles & initialise fluid particle densities */
int initialise(const char* paramfile, const char* obstaclefile,
t_param* params, t_speed** cells_ptr, t_speed** tmp_cells_ptr,
int** obstacles_ptr, float** av_vels_ptr);
/*
** The main calculation methods.
** timestep calls, in order, the functions:
** accelerate_flow(), propagate(), rebound() & collision()
*/
int write_values(const t_param params, t_speed* cells, int* obstacles, float* av_vels);
/* finalise, including freeing up allocated memory */
int finalise(t_speed* cells_ptr, t_speed* tmp_cells_ptr,
int* obstacles_ptr, float* av_vels_ptr);
/* Sum all the densities in the grid.
** The total should remain constant from one timestep to the next. */
float total_density(const t_param params, t_speed* cells);
/* compute average velocity */
float av_velocity(const t_param params, t_speed* cells, int* obstacles);
/* calculate Reynolds number */
float calc_reynolds(const t_param params, t_speed* cells, int* obstacles);
/* utility functions */
void die(const char* message, const int line, const char* file);
void usage(const char* exe);
__device__ bool
isGreater(const float x, const float y)
{
return x > y ? 1 : 0;
}
__global__ void d2q9_bgk(
const float* __restrict__ Speed0A,
const float* __restrict__ Speed1A,
const float* __restrict__ Speed2A,
const float* __restrict__ Speed3A,
const float* __restrict__ Speed4A,
const float* __restrict__ Speed5A,
const float* __restrict__ Speed6A,
const float* __restrict__ Speed7A,
const float* __restrict__ Speed8A,
float* __restrict__ Tmp0A,
float* __restrict__ Tmp1A,
float* __restrict__ Tmp2A,
float* __restrict__ Tmp3A,
float* __restrict__ Tmp4A,
float* __restrict__ Tmp5A,
float* __restrict__ Tmp6A,
float* __restrict__ Tmp7A,
float* __restrict__ Tmp8A,
const int* __restrict__ ObstaclesA,
float* __restrict__ Partial_Sum,
int* __restrict__ Partial_Sum2,
const float densityaccel,
const float omega,
const int nx,
const int ny,
const int tt)
{
//setup local memory
__shared__ int local_sum2[LOCALSIZEX*LOCALSIZEY];
__shared__ float local_sum[LOCALSIZEX*LOCALSIZEY];
/* get column and row indices */
const int ii = blockIdx.x * blockDim.x + threadIdx.x;
const int jj = blockIdx.y * blockDim.y + threadIdx.y;
const float c_sq_inv = 3.f;
const float c_sq = 1.f/c_sq_inv; /* square of speed of sound */
const float temp1 = 4.5f;
const float w1 = 1.f/9.f;
const float w0 = 4.f * w1; /* weighting factor */
const float w2 = 1.f/36.f; /* weighting factor */
const float w11 = densityaccel * w1;
const float w21 = densityaccel * w2;
/* determine indices of axis-direction neighbours
** respecting periodic boundary conditions (wrap around) */
const int y_n = (jj + 1) % ny;
const int x_e = (ii + 1) % nx;
const int y_s = (jj == 0) ? (jj + ny - 1) : (jj - 1);
const int x_w = (ii == 0) ? (ii + nx - 1) : (ii - 1);
/* propagate densities from neighbouring cells, following
** appropriate directions of travel and writing into
** scratch space grid */
float tmp_s0 = Speed0A[ii + jj*nx];
float tmp_s1 = (jj == ny-2 && (!ObstaclesA[x_w + jj*nx] && isGreater((Speed3A[x_w + jj*nx] - w11) , 0.f) && isGreater((Speed6A[x_w + jj*nx] - w21) , 0.f) && isGreater((Speed7A[x_w + jj*nx] - w21) , 0.f))) ? Speed1A[x_w + jj*nx]+w11 : Speed1A[x_w + jj*nx];
float tmp_s2 = Speed2A[ii + y_s*nx];
float tmp_s3 = (jj == ny-2 && (!ObstaclesA[x_e + jj*nx] && isGreater((Speed3A[x_e + jj*nx] - w11) , 0.f) && isGreater((Speed6A[x_e + jj*nx] - w21) , 0.f) && isGreater((Speed7A[x_e + jj*nx] - w21) , 0.f))) ? Speed3A[x_e + jj*nx]-w11 : Speed3A[x_e + jj*nx];
float tmp_s4 = Speed4A[ii + y_n*nx];
float tmp_s5 = (y_s == ny-2 && (!ObstaclesA[x_w + y_s*nx] && isGreater((Speed3A[x_w + y_s*nx] - w11) , 0.f) && isGreater((Speed6A[x_w + y_s*nx] - w21) , 0.f) && isGreater((Speed7A[x_w + y_s*nx] - w21) , 0.f))) ? Speed5A[x_w + y_s*nx]+w21 : Speed5A[x_w + y_s*nx];
float tmp_s6 = (y_s == ny-2 && (!ObstaclesA[x_e + y_s*nx] && isGreater((Speed3A[x_e + y_s*nx] - w11) , 0.f) && isGreater((Speed6A[x_e + y_s*nx] - w21) , 0.f) && isGreater((Speed7A[x_e + y_s*nx] - w21) , 0.f))) ? Speed6A[x_e + y_s*nx]-w21 : Speed6A[x_e + y_s*nx];
float tmp_s7 = (y_n == ny-2 && (!ObstaclesA[x_e + y_n*nx] && isGreater((Speed3A[x_e + y_n*nx] - w11) , 0.f) && isGreater((Speed6A[x_e + y_n*nx] - w21) , 0.f) && isGreater((Speed7A[x_e + y_n*nx] - w21) , 0.f))) ? Speed7A[x_e + y_n*nx]-w21 : Speed7A[x_e + y_n*nx];
float tmp_s8 = (y_n == ny-2 && (!ObstaclesA[x_w + y_n*nx] && isGreater((Speed3A[x_w + y_n*nx] - w11) , 0.f) && isGreater((Speed6A[x_w + y_n*nx] - w21) , 0.f) && isGreater((Speed7A[x_w + y_n*nx] - w21) , 0.f))) ? Speed8A[x_w + y_n*nx]+w21 : Speed8A[x_w + y_n*nx];
/* compute local density total */
float local_density = tmp_s0 + tmp_s1 + tmp_s2 + tmp_s3 + tmp_s4 + tmp_s5 + tmp_s6 + tmp_s7 + tmp_s8;
const float local_density_recip = 1.f/(local_density);
/* compute x velocity component */
float u_x = (tmp_s1
+ tmp_s5
+ tmp_s8
- tmp_s3
- tmp_s6
- tmp_s7)
* local_density_recip;
/* compute y velocity component */
float u_y = (tmp_s2
+ tmp_s5
+ tmp_s6
- tmp_s4
- tmp_s8
- tmp_s7)
* local_density_recip;
/* velocity squared */
const float temp2 = - (u_x * u_x + u_y * u_y)/(2.f * c_sq);
/* equilibrium densities */
float d_equ[NSPEEDS];
/* zero velocity density: weight w0 */
d_equ[0] = w0 * local_density
* (1.f + temp2);
/* axis speeds: weight w1 */
d_equ[1] = w1 * local_density * (1.f + u_x * c_sq_inv
+ (u_x * u_x) * temp1
+ temp2);
d_equ[2] = w1 * local_density * (1.f + u_y * c_sq_inv
+ (u_y * u_y) * temp1
+ temp2);
d_equ[3] = w1 * local_density * (1.f - u_x * c_sq_inv
+ (u_x * u_x) * temp1
+ temp2);
d_equ[4] = w1 * local_density * (1.f - u_y * c_sq_inv
+ (u_y * u_y) * temp1
+ temp2);
/* diagonal speeds: weight w2 */
d_equ[5] = w2 * local_density * (1.f + (u_x + u_y) * c_sq_inv
+ ((u_x + u_y) * (u_x + u_y)) * temp1
+ temp2);
d_equ[6] = w2 * local_density * (1.f + (-u_x + u_y) * c_sq_inv
+ ((-u_x + u_y) * (-u_x + u_y)) * temp1
+ temp2);
d_equ[7] = w2 * local_density * (1.f + (-u_x - u_y) * c_sq_inv
+ ((-u_x - u_y) * (-u_x - u_y)) * temp1
+ temp2);
d_equ[8] = w2 * local_density * (1.f + (u_x - u_y) * c_sq_inv
+ ((u_x - u_y) * (u_x - u_y)) * temp1
+ temp2);
float tmp;
int expression = ObstaclesA[ii + jj*nx];
tmp_s0 = expression ? tmp_s0 : (tmp_s0 + omega * (d_equ[0] - tmp_s0));
tmp = tmp_s1;
tmp_s1 = expression ? tmp_s3 : (tmp_s1 + omega * (d_equ[1] - tmp_s1));
tmp_s3 = expression ? tmp : (tmp_s3 + omega * (d_equ[3] - tmp_s3));
tmp = tmp_s2;
tmp_s2 = expression ? tmp_s4 : (tmp_s2 + omega * (d_equ[2] - tmp_s2));
tmp_s4 = expression ? tmp : (tmp_s4 + omega * (d_equ[4] - tmp_s4));
tmp = tmp_s5;
tmp_s5 = expression ? tmp_s7 : (tmp_s5 + omega * (d_equ[5] - tmp_s5));
tmp_s7 = expression ? tmp : (tmp_s7 + omega * (d_equ[7] - tmp_s7));
tmp = tmp_s6;
tmp_s6 = expression ? tmp_s8 : (tmp_s6 + omega * (d_equ[6] - tmp_s6));
tmp_s8 = expression ? tmp : (tmp_s8 + omega * (d_equ[8] - tmp_s8));
/* local density total */
local_density = 1.f/(tmp_s0 + tmp_s1 + tmp_s2 + tmp_s3 + tmp_s4 + tmp_s5 + tmp_s6 + tmp_s7 + tmp_s8);
/* x-component of velocity */
u_x = (tmp_s1
+ tmp_s5
+ tmp_s8
- tmp_s3
- tmp_s6
- tmp_s7)
* local_density;
/* compute y velocity component */
u_y = (tmp_s2
+ tmp_s5
+ tmp_s6
- tmp_s4
- tmp_s7
- tmp_s8)
* local_density;
Tmp0A[ii + jj*nx] = tmp_s0;
Tmp1A[ii + jj*nx] = tmp_s1;
Tmp2A[ii + jj*nx] = tmp_s2;
Tmp3A[ii + jj*nx] = tmp_s3;
Tmp4A[ii + jj*nx] = tmp_s4;
Tmp5A[ii + jj*nx] = tmp_s5;
Tmp6A[ii + jj*nx] = tmp_s6;
Tmp7A[ii + jj*nx] = tmp_s7;
Tmp8A[ii + jj*nx] = tmp_s8;
int local_idi = threadIdx.x;
int local_idj = threadIdx.y;
int local_sizei = blockDim.x;
int local_sizej = blockDim.y;
/* accumulate the norm of x- and y- velocity components */
local_sum[local_idi + local_idj*local_sizei] = (ObstaclesA[ii + jj*nx]) ? 0 : hypotf(u_x,u_y);
/* increase counter of inspected cells */
local_sum2[local_idi + local_idj*local_sizei] = (ObstaclesA[ii + jj*nx]) ? 0 : 1 ;
__syncthreads();
int group_id = blockIdx.x;
int group_id2 = blockIdx.y;
int group_size = gridDim.x;
int group_size2 = gridDim.y;
if(local_idi == 0 && local_idj == 0){
float sum = 0.0f;
int sum2 = 0;
for(int i = 0; i<local_sizei*local_sizej; i++){
sum += local_sum[i];
sum2 += local_sum2[i];
}
Partial_Sum[group_id+group_id2*group_size+tt*group_size*group_size2] = sum;
Partial_Sum2[group_id+group_id2*group_size+tt*group_size*group_size2] = sum2;
}
}
int main(int argc, char* argv[])
{
char* paramfile = NULL; /* input parameter file */
char* obstaclefile = NULL; /* input obstacle file */
t_param params; /* struct to hold parameter values */
t_speed* cells = NULL; /* grid containing fluid densities */
t_speed* tmp_cells = NULL; /* scratch space */
int* obstaclesHost = NULL;/* grid indicating which cells are blocked */
float* av_vels = NULL; /* a record of the av. velocity computed for each timestep */
struct timeval timstr; /* structure to hold elapsed time */
double tic, toc; /* floating point numbers to calculate elapsed wallclock time */
/* parse the command line */
if (argc != 3)
{
usage(argv[0]);
}
else
{
paramfile = argv[1];
obstaclefile = argv[2];
}
/* initialise our data structures and load values from file */
initialise(paramfile, obstaclefile, ¶ms, &cells,
&tmp_cells, &obstaclesHost, &av_vels);
// declare host arrays
int Ny = params.ny;
int Nx = params.nx;
int MaxIters = params.maxIters;
float *speedsHostS0 = (float*) malloc (sizeof(float)*Ny*Nx);
float *speedsHostS1 = (float*) malloc (sizeof(float)*Ny*Nx);
float *speedsHostS2 = (float*) malloc (sizeof(float)*Ny*Nx);
float *speedsHostS3 = (float*) malloc (sizeof(float)*Ny*Nx);
float *speedsHostS4 = (float*) malloc (sizeof(float)*Ny*Nx);
float *speedsHostS5 = (float*) malloc (sizeof(float)*Ny*Nx);
float *speedsHostS6 = (float*) malloc (sizeof(float)*Ny*Nx);
float *speedsHostS7 = (float*) malloc (sizeof(float)*Ny*Nx);
float *speedsHostS8 = (float*) malloc (sizeof(float)*Ny*Nx);
float *tot_up = (float*) malloc (sizeof(float) * (Ny/LOCALSIZEY) * (Nx/LOCALSIZEX) * MaxIters);
int *tot_cellsp = (int*) malloc (sizeof(int) * (Ny/LOCALSIZEY) * (Nx/LOCALSIZEX) * MaxIters);
// Init arrays
/* loop over _all_ cells */
for (int jj = 0; jj < Ny; jj++)
{
for (int ii = 0; ii < Nx; ii++)
{
speedsHostS0[ii + jj*Nx] = cells[ii + jj*Nx].speeds[0];
speedsHostS1[ii + jj*Nx] = cells[ii + jj*Nx].speeds[1];
speedsHostS2[ii + jj*Nx] = cells[ii + jj*Nx].speeds[2];
speedsHostS3[ii + jj*Nx] = cells[ii + jj*Nx].speeds[3];
speedsHostS4[ii + jj*Nx] = cells[ii + jj*Nx].speeds[4];
speedsHostS5[ii + jj*Nx] = cells[ii + jj*Nx].speeds[5];
speedsHostS6[ii + jj*Nx] = cells[ii + jj*Nx].speeds[6];
speedsHostS7[ii + jj*Nx] = cells[ii + jj*Nx].speeds[7];
speedsHostS8[ii + jj*Nx] = cells[ii + jj*Nx].speeds[8];
}
}
// Creating buffers which are bound to host arrays
float *speeds0, *speeds1, *speeds2, *speeds3, *speeds4,
*speeds5, *speeds6, *speeds7, *speeds8;
float *tmp_speeds0, *tmp_speeds1, *tmp_speeds2, *tmp_speeds3, *tmp_speeds4,
*tmp_speeds5, *tmp_speeds6, *tmp_speeds7, *tmp_speeds8;
hipMalloc((void**)&speeds0, sizeof(float)*Ny*Nx);
hipMemcpy(speeds0, speedsHostS0, sizeof(float)*Ny*Nx, hipMemcpyHostToDevice);
hipMalloc((void**)&speeds1, sizeof(float)*Ny*Nx);
hipMemcpy(speeds1, speedsHostS1, sizeof(float)*Ny*Nx, hipMemcpyHostToDevice);
hipMalloc((void**)&speeds2, sizeof(float)*Ny*Nx);
hipMemcpy(speeds2, speedsHostS2, sizeof(float)*Ny*Nx, hipMemcpyHostToDevice);
hipMalloc((void**)&speeds3, sizeof(float)*Ny*Nx);
hipMemcpy(speeds3, speedsHostS3, sizeof(float)*Ny*Nx, hipMemcpyHostToDevice);
hipMalloc((void**)&speeds4, sizeof(float)*Ny*Nx);
hipMemcpy(speeds4, speedsHostS4, sizeof(float)*Ny*Nx, hipMemcpyHostToDevice);
hipMalloc((void**)&speeds5, sizeof(float)*Ny*Nx);
hipMemcpy(speeds5, speedsHostS5, sizeof(float)*Ny*Nx, hipMemcpyHostToDevice);
hipMalloc((void**)&speeds6, sizeof(float)*Ny*Nx);
hipMemcpy(speeds6, speedsHostS6, sizeof(float)*Ny*Nx, hipMemcpyHostToDevice);
hipMalloc((void**)&speeds7, sizeof(float)*Ny*Nx);
hipMemcpy(speeds7, speedsHostS7, sizeof(float)*Ny*Nx, hipMemcpyHostToDevice);
hipMalloc((void**)&speeds8, sizeof(float)*Ny*Nx);
hipMemcpy(speeds8, speedsHostS8, sizeof(float)*Ny*Nx, hipMemcpyHostToDevice);
hipMalloc((void**)&tmp_speeds0, sizeof(float)*Ny*Nx);
hipMalloc((void**)&tmp_speeds1, sizeof(float)*Ny*Nx);
hipMalloc((void**)&tmp_speeds2, sizeof(float)*Ny*Nx);
hipMalloc((void**)&tmp_speeds3, sizeof(float)*Ny*Nx);
hipMalloc((void**)&tmp_speeds4, sizeof(float)*Ny*Nx);
hipMalloc((void**)&tmp_speeds5, sizeof(float)*Ny*Nx);
hipMalloc((void**)&tmp_speeds6, sizeof(float)*Ny*Nx);
hipMalloc((void**)&tmp_speeds7, sizeof(float)*Ny*Nx);
hipMalloc((void**)&tmp_speeds8, sizeof(float)*Ny*Nx);
int *obstacles, *partial_sum2;
float *partial_sum;
hipMalloc((void**)&obstacles, sizeof(int)*Ny*Nx);
hipMalloc((void**)&partial_sum, sizeof(float)*(Ny/LOCALSIZEY)*(Nx/LOCALSIZEX)*MaxIters);
hipMalloc((void**)&partial_sum2, sizeof(int)*(Ny/LOCALSIZEY)*(Nx/LOCALSIZEX)*MaxIters);
hipMemcpy(obstacles, obstaclesHost, sizeof(int)*Ny*Nx, hipMemcpyHostToDevice);
// parameters for kernel
float omega = params.omega;
float densityaccel = params.density*params.accel;
dim3 grids(Nx/LOCALSIZEX, Ny/LOCALSIZEY);
dim3 threads(LOCALSIZEX, LOCALSIZEY);
for (int tt = 0; tt < MaxIters; tt++){
if (tt == WARMUPS - 1) {
//start timer after warmup
hipDeviceSynchronize();
gettimeofday(&timstr, NULL);
tic = timstr.tv_sec * 1e6 + timstr.tv_usec;
}
hipLaunchKernelGGL(( d2q9_bgk), dim3(grids), dim3(threads), 0, 0,
speeds0,
speeds1,
speeds2,
speeds3,
speeds4,
speeds5,
speeds6,
speeds7,
speeds8,
tmp_speeds0,
tmp_speeds1,
tmp_speeds2,
tmp_speeds3,
tmp_speeds4,
tmp_speeds5,
tmp_speeds6,
tmp_speeds7,
tmp_speeds8,
obstacles,
partial_sum,
partial_sum2,
densityaccel,
omega,
Nx,
Ny,
tt );
// swap the buffers
float* speed_tmp = speeds0;
speeds0 = tmp_speeds0;
tmp_speeds0 = speed_tmp;
speed_tmp = speeds1;
speeds1 = tmp_speeds1;
tmp_speeds1 = speed_tmp;
speed_tmp = speeds2;
speeds2 = tmp_speeds2;
tmp_speeds2 = speed_tmp;
speed_tmp = speeds3;
speeds3 = tmp_speeds3;
tmp_speeds3 = speed_tmp;
speed_tmp = speeds4;
speeds4 = tmp_speeds4;
tmp_speeds4 = speed_tmp;
speed_tmp = speeds5;
speeds5 = tmp_speeds5;
tmp_speeds5 = speed_tmp;
speed_tmp = speeds6;
speeds6 = tmp_speeds6;
tmp_speeds6 = speed_tmp;
speed_tmp = speeds7;
speeds7 = tmp_speeds7;
tmp_speeds7 = speed_tmp;
speed_tmp = speeds8;
speeds8 = tmp_speeds8;
tmp_speeds8 = speed_tmp;
}
//end timer
hipDeviceSynchronize();
gettimeofday(&timstr, NULL);
toc = timstr.tv_sec * 1e6 + timstr.tv_usec;
printf("After warmup for %d iterations, ", WARMUPS);
printf("average kernel execution time over %d iterations:\t\t\t%.6lf (us)\n",
MaxIters - WARMUPS, (toc - tic) / (MaxIters - WARMUPS));
hipMemcpy(tot_up, partial_sum, sizeof(float)*(Ny/LOCALSIZEY)*(Nx/LOCALSIZEX)*MaxIters, hipMemcpyDeviceToHost);
hipMemcpy(tot_cellsp, partial_sum2, sizeof(int)*(Ny/LOCALSIZEY)*(Nx/LOCALSIZEX)*MaxIters, hipMemcpyDeviceToHost);
hipMemcpy(speedsHostS0, speeds0, sizeof(float)*Ny*Nx, hipMemcpyDeviceToHost);
hipMemcpy(speedsHostS1, speeds1, sizeof(float)*Ny*Nx, hipMemcpyDeviceToHost);
hipMemcpy(speedsHostS2, speeds2, sizeof(float)*Ny*Nx, hipMemcpyDeviceToHost);
hipMemcpy(speedsHostS3, speeds3, sizeof(float)*Ny*Nx, hipMemcpyDeviceToHost);
hipMemcpy(speedsHostS4, speeds4, sizeof(float)*Ny*Nx, hipMemcpyDeviceToHost);
hipMemcpy(speedsHostS5, speeds5, sizeof(float)*Ny*Nx, hipMemcpyDeviceToHost);
hipMemcpy(speedsHostS6, speeds6, sizeof(float)*Ny*Nx, hipMemcpyDeviceToHost);
hipMemcpy(speedsHostS7, speeds7, sizeof(float)*Ny*Nx, hipMemcpyDeviceToHost);
hipMemcpy(speedsHostS8, speeds8, sizeof(float)*Ny*Nx, hipMemcpyDeviceToHost);
hipFree(speeds0);
hipFree(speeds1);
hipFree(speeds2);
hipFree(speeds3);
hipFree(speeds4);
hipFree(speeds5);
hipFree(speeds6);
hipFree(speeds7);
hipFree(speeds8);
hipFree(tmp_speeds0);
hipFree(tmp_speeds1);
hipFree(tmp_speeds2);
hipFree(tmp_speeds3);
hipFree(tmp_speeds4);
hipFree(tmp_speeds5);
hipFree(tmp_speeds6);
hipFree(tmp_speeds7);
hipFree(tmp_speeds8);
hipFree(obstacles);
hipFree(partial_sum2);
hipFree(partial_sum);
float tot_u = 0;
int tot_cells = 0;
for (int tt = 0; tt < params.maxIters; tt++){
tot_u = 0;
tot_cells = 0;
for(int i = 0; i < params.nx/LOCALSIZEX*params.ny/LOCALSIZEY; i++){
tot_u += tot_up[i+tt*params.nx/LOCALSIZEX*params.ny/LOCALSIZEY];
tot_cells += tot_cellsp[i+tt*params.nx/LOCALSIZEX*params.ny/LOCALSIZEY];
//printf("%d %f %d\n", i, tot_u, tot_cells);
}
av_vels[tt] = tot_u/tot_cells;
}
// put answers back into cells
for (int jj = 0; jj < params.ny; jj++)
{
for (int ii = 0; ii < params.nx; ii++)
{
cells[ii + jj*params.nx].speeds[0] = speedsHostS0[ii + jj*params.nx];
cells[ii + jj*params.nx].speeds[1] = speedsHostS1[ii + jj*params.nx];
cells[ii + jj*params.nx].speeds[2] = speedsHostS2[ii + jj*params.nx];
cells[ii + jj*params.nx].speeds[3] = speedsHostS3[ii + jj*params.nx];
cells[ii + jj*params.nx].speeds[4] = speedsHostS4[ii + jj*params.nx];
cells[ii + jj*params.nx].speeds[5] = speedsHostS5[ii + jj*params.nx];
cells[ii + jj*params.nx].speeds[6] = speedsHostS6[ii + jj*params.nx];
cells[ii + jj*params.nx].speeds[7] = speedsHostS7[ii + jj*params.nx];
cells[ii + jj*params.nx].speeds[8] = speedsHostS8[ii + jj*params.nx];
}
}
/* write final values and free memory */
printf("==done==\n");
printf("Reynolds number:\t\t%.12E\n", calc_reynolds(params, cells, obstaclesHost));
write_values(params, cells, obstaclesHost, av_vels);
finalise(cells, tmp_cells, obstaclesHost, av_vels);
free(speedsHostS0);
free(speedsHostS1);
free(speedsHostS2);
free(speedsHostS3);
free(speedsHostS4);
free(speedsHostS5);
free(speedsHostS6);
free(speedsHostS7);
free(speedsHostS8);
free(tot_up);
free(tot_cellsp);
return EXIT_SUCCESS;
}
float av_velocity(const t_param params, t_speed* cells, int* obstacles)
{
int tot_cells = 0; /* no. of cells used in calculation */
float tot_u; /* accumulated magnitudes of velocity for each cell */
/* initialise */
tot_u = 0.f;
/* loop over all non-blocked cells */
for (int jj = 0; jj < params.ny; jj++)
{
for (int ii = 0; ii < params.nx; ii++)
{
/* ignore occupied cells */
if (!obstacles[ii + jj*params.nx])
{
/* local density total */
float local_density = 0.f;
for (int kk = 0; kk < NSPEEDS; kk++)
{
local_density += cells[ii + jj*params.nx].speeds[kk];
}
/* x-component of velocity */
float u_x = (cells[ii + jj*params.nx].speeds[1]
+ cells[ii + jj*params.nx].speeds[5]
+ cells[ii + jj*params.nx].speeds[8]
- (cells[ii + jj*params.nx].speeds[3]
+ cells[ii + jj*params.nx].speeds[6]
+ cells[ii + jj*params.nx].speeds[7]))
/ local_density;
/* compute y velocity component */
float u_y = (cells[ii + jj*params.nx].speeds[2]
+ cells[ii + jj*params.nx].speeds[5]
+ cells[ii + jj*params.nx].speeds[6]
- (cells[ii + jj*params.nx].speeds[4]
+ cells[ii + jj*params.nx].speeds[7]
+ cells[ii + jj*params.nx].speeds[8]))
/ local_density;
/* accumulate the norm of x- and y- velocity components */
tot_u += sqrtf((u_x * u_x) + (u_y * u_y));
/* increase counter of inspected cells */
++tot_cells;
}
}
}
return tot_u / (float)tot_cells;
}
int initialise(const char* paramfile, const char* obstaclefile,
t_param* params, t_speed** cells_ptr, t_speed** tmp_cells_ptr,
int** obstacles_ptr, float** av_vels_ptr){
char message[1024]; /* message buffer */
FILE* fp; /* file pointer */
int xx, yy; /* generic array indices */
int blocked; /* indicates whether a cell is blocked by an obstacle */
int retval; /* to hold return value for checking */
/* open the parameter file */
fp = fopen(paramfile, "r");
if (fp == NULL)
{
sprintf(message, "could not open input parameter file: %s", paramfile);
die(message, __LINE__, __FILE__);
}
/* read in the parameter values */
retval = fscanf(fp, "%d\n", &(params->nx));
if (retval != 1) die("could not read param file: nx", __LINE__, __FILE__);
retval = fscanf(fp, "%d\n", &(params->ny));
if (retval != 1) die("could not read param file: ny", __LINE__, __FILE__);
retval = fscanf(fp, "%d\n", &(params->maxIters));
if (retval != 1) die("could not read param file: maxIters", __LINE__, __FILE__);
retval = fscanf(fp, "%d\n", &(params->reynolds_dim));
if (retval != 1) die("could not read param file: reynolds_dim", __LINE__, __FILE__);
retval = fscanf(fp, "%f\n", &(params->density));
if (retval != 1) die("could not read param file: density", __LINE__, __FILE__);
retval = fscanf(fp, "%f\n", &(params->accel));
if (retval != 1) die("could not read param file: accel", __LINE__, __FILE__);
retval = fscanf(fp, "%f\n", &(params->omega));
if (retval != 1) die("could not read param file: omega", __LINE__, __FILE__);
/* and close up the file */
fclose(fp);
/*
** allocate memory.
**
** remember C is pass-by-value, so we need to
** pass pointers into the initialise function.
**
** nb we are allocating a 1D array, so that the
** memory will be contiguous. We still want to
** index this memory as if it were a (row major
** ordered) 2D array, however. We will perform
** some arithmetic using the row and column
** coordinates, inside the square brackets, when
** we want to access elements of this array.
**
** note also that we are using a structure to
** hold an array of 'speeds'. We will allocate
** a 1D array of these structs.
*/
/* main grid */
*cells_ptr = (t_speed*)malloc(sizeof(t_speed) * (params->ny * params->nx));
if (*cells_ptr == NULL) die("cannot allocate memory for cells", __LINE__, __FILE__);
/* 'helper' grid, used as scratch space */
*tmp_cells_ptr = (t_speed*)malloc(sizeof(t_speed) * (params->ny * params->nx));
if (*tmp_cells_ptr == NULL) die("cannot allocate memory for tmp_cells", __LINE__, __FILE__);
/* the map of obstacles */
*obstacles_ptr = (int*) malloc (sizeof(int) * params->ny * params->nx);
if (*obstacles_ptr == NULL) die("cannot allocate column memory for obstacles", __LINE__, __FILE__);
/* initialise densities */
float w0 = params->density * 4.f / 9.f;
float w1 = params->density / 9.f;
float w2 = params->density / 36.f;
for (int jj = 0; jj < params->ny; jj++)
{
for (int ii = 0; ii < params->nx; ii++)
{
/* centre */
(*cells_ptr)[ii + jj*params->nx].speeds[0] = w0;
/* axis directions */
(*cells_ptr)[ii + jj*params->nx].speeds[1] = w1;
(*cells_ptr)[ii + jj*params->nx].speeds[2] = w1;
(*cells_ptr)[ii + jj*params->nx].speeds[3] = w1;
(*cells_ptr)[ii + jj*params->nx].speeds[4] = w1;
/* diagonals */
(*cells_ptr)[ii + jj*params->nx].speeds[5] = w2;
(*cells_ptr)[ii + jj*params->nx].speeds[6] = w2;
(*cells_ptr)[ii + jj*params->nx].speeds[7] = w2;
(*cells_ptr)[ii + jj*params->nx].speeds[8] = w2;
}
}
/* first set all cells in obstacle array to zero */
for (int jj = 0; jj < params->ny; jj++)
{
for (int ii = 0; ii < params->nx; ii++)
{
(*obstacles_ptr)[ii + jj*params->nx] = 0;
}
}
/* open the obstacle data file */
fp = fopen(obstaclefile, "r");
if (fp == NULL)
{
sprintf(message, "could not open input obstacles file: %s", obstaclefile);
die(message, __LINE__, __FILE__);
}
/* read-in the blocked cells list */
while ((retval = fscanf(fp, "%d %d %d\n", &xx, &yy, &blocked)) != EOF)
{
/* some checks */
if (retval != 3) die("expected 3 values per line in obstacle file", __LINE__, __FILE__);
if (xx < 0 || xx > params->nx - 1) die("obstacle x-coord out of range", __LINE__, __FILE__);
if (yy < 0 || yy > params->ny - 1) die("obstacle y-coord out of range", __LINE__, __FILE__);
if (blocked != 1) die("obstacle blocked value should be 1", __LINE__, __FILE__);
/* assign to array */
(*obstacles_ptr)[xx + yy*params->nx] = blocked;
}
/* and close the file */
fclose(fp);
/*
** allocate space to hold a record of the avarage velocities computed
** at each timestep
*/
*av_vels_ptr = (float*)malloc(sizeof(float) * params->maxIters);
return EXIT_SUCCESS;
}
int finalise(t_speed* cells_ptr, t_speed* tmp_cells_ptr,
int* obstacles_ptr, float* av_vels_ptr)
{
/*
** free up allocated memory
*/
free(cells_ptr);
free(tmp_cells_ptr);
free(obstacles_ptr);
free(av_vels_ptr);
return EXIT_SUCCESS;
}
float calc_reynolds(const t_param params, t_speed* cells, int* obstacles)
{
const float viscosity = 1.f / 6.f * (2.f / params.omega - 1.f);
return av_velocity(params, cells, obstacles) * params.reynolds_dim / viscosity;
}
float total_density(const t_param params, t_speed* cells)
{
float total = 0.f; /* accumulator */
for (int jj = 0; jj < params.ny; jj++)
{
for (int ii = 0; ii < params.nx; ii++)
{
for (int kk = 0; kk < NSPEEDS; kk++)
{
total += cells[ii + jj*params.nx].speeds[kk];
}
}
}
return total;
}
int write_values(const t_param params, t_speed* cells, int* obstacles, float* av_vels)
{
FILE* fp; /* file pointer */
const float c_sq = 1.f / 3.f; /* sq. of speed of sound */
float local_density; /* per grid cell sum of densities */
float pressure; /* fluid pressure in grid cell */
float u_x; /* x-component of velocity in grid cell */
float u_y; /* y-component of velocity in grid cell */
float u; /* norm--root of summed squares--of u_x and u_y */
fp = fopen(FINALSTATEFILE, "w");
if (fp == NULL)
{
die("could not open file output file", __LINE__, __FILE__);
}
for (int jj = 0; jj < params.ny; jj++)
{
for (int ii = 0; ii < params.nx; ii++)
{
/* an occupied cell */
if (obstacles[ii + jj*params.nx])
{
u_x = u_y = u = 0.f;
pressure = params.density * c_sq;
}
/* no obstacle */
else
{
local_density = 0.f;
for (int kk = 0; kk < NSPEEDS; kk++)
{
local_density += cells[ii + jj*params.nx].speeds[kk];
}
/* compute x velocity component */
u_x = (cells[ii + jj*params.nx].speeds[1]
+ cells[ii + jj*params.nx].speeds[5]
+ cells[ii + jj*params.nx].speeds[8]
- (cells[ii + jj*params.nx].speeds[3]
+ cells[ii + jj*params.nx].speeds[6]
+ cells[ii + jj*params.nx].speeds[7]))
/ local_density;
/* compute y velocity component */
u_y = (cells[ii + jj*params.nx].speeds[2]
+ cells[ii + jj*params.nx].speeds[5]
+ cells[ii + jj*params.nx].speeds[6]
- (cells[ii + jj*params.nx].speeds[4]
+ cells[ii + jj*params.nx].speeds[7]
+ cells[ii + jj*params.nx].speeds[8]))
/ local_density;
/* compute norm of velocity */
u = sqrtf((u_x * u_x) + (u_y * u_y));
/* compute pressure */
pressure = local_density * c_sq;
}
/* write to file */
fprintf(fp, "%d %d %.12E %.12E %.12E %.12E %d\n", ii, jj, u_x, u_y, u, pressure, obstacles[ii * params.nx + jj]);
}
}
fclose(fp);
fp = fopen(AVVELSFILE, "w");
if (fp == NULL)
{
die("could not open file output file", __LINE__, __FILE__);
}
for (int ii = 0; ii < params.maxIters; ii++)
{
fprintf(fp, "%d:\t%.12E\n", ii, av_vels[ii]);
}
fclose(fp);
return EXIT_SUCCESS;
}
void die(const char* message, const int line, const char* file)
{
fprintf(stderr, "Error at line %d of file %s:\n", line, file);
fprintf(stderr, "%s\n", message);
fflush(stderr);
exit(EXIT_FAILURE);
}
void usage(const char* exe)
{
fprintf(stderr, "Usage: %s <paramfile> <obstaclefile>\n", exe);
exit(EXIT_FAILURE);
}
| 476d82f48040603fe8bf8ea6ad8d939b97687238.cu | /*
** Code to implement a d2q9-bgk lattice boltzmann scheme.
** 'd2' inidates a 2-dimensional grid, and
** 'q9' indicates 9 velocities per grid cell.
** 'bgk' refers to the Bhatnagar-Gross-Krook collision step.
**
** The 'speeds' in each cell are numbered as follows:
**
** 6 2 5
** \|/
** 3-0-1
** /|\
** 7 4 8
**
** A 2D grid:
**
** cols
** --- --- ---
** | D | E | F |
** rows --- --- ---
** | A | B | C |
** --- --- ---
**
** 'unwrapped' in row major order to give a 1D array:
**
** --- --- --- --- --- ---
** | A | B | C | D | E | F |
** --- --- --- --- --- ---
**
** Grid indicies are:
**
** ny
** ^ cols(ii)
** | ----- ----- -----
** | | ... | ... | etc |
** | ----- ----- -----
** rows(jj) | | 1,0 | 1,1 | 1,2 |
** | ----- ----- -----
** | | 0,0 | 0,1 | 0,2 |
** | ----- ----- -----
** ----------------------> nx
**
** Note the names of the input parameter and obstacle files
** are passed on the command line, e.g.:
**
** ./d2q9-bgk input.params obstacles.dat
**
** Be sure to adjust the grid dimensions in the parameter file
** if you choose a different obstacle file.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <string.h>
#include <sys/time.h>
#include <cuda.h>
#define WARMUPS 1000
#define NSPEEDS 9
#define LOCALSIZEX 128
#define LOCALSIZEY 1
/* dump output files for verification */
#define FINALSTATEFILE "final_state.dat"
#define AVVELSFILE "av_vels.dat"
/* struct to hold the parameter values */
typedef struct
{
int nx; /* no. of cells in x-direction */
int ny; /* no. of cells in y-direction */
int maxIters; /* no. of iterations */
int reynolds_dim; /* dimension for Reynolds number */
float density; /* density per link */
float accel; /* density redistribution */
float omega; /* relaxation parameter */
} t_param;
/* struct to hold the 'speed' values */
typedef struct
{
float speeds[NSPEEDS];
} t_speed;
/*
** function prototypes
*/
/* load params, allocate memory, load obstacles & initialise fluid particle densities */
int initialise(const char* paramfile, const char* obstaclefile,
t_param* params, t_speed** cells_ptr, t_speed** tmp_cells_ptr,
int** obstacles_ptr, float** av_vels_ptr);
/*
** The main calculation methods.
** timestep calls, in order, the functions:
** accelerate_flow(), propagate(), rebound() & collision()
*/
int write_values(const t_param params, t_speed* cells, int* obstacles, float* av_vels);
/* finalise, including freeing up allocated memory */
int finalise(t_speed* cells_ptr, t_speed* tmp_cells_ptr,
int* obstacles_ptr, float* av_vels_ptr);
/* Sum all the densities in the grid.
** The total should remain constant from one timestep to the next. */
float total_density(const t_param params, t_speed* cells);
/* compute average velocity */
float av_velocity(const t_param params, t_speed* cells, int* obstacles);
/* calculate Reynolds number */
float calc_reynolds(const t_param params, t_speed* cells, int* obstacles);
/* utility functions */
void die(const char* message, const int line, const char* file);
void usage(const char* exe);
__device__ bool
isGreater(const float x, const float y)
{
return x > y ? 1 : 0;
}
__global__ void d2q9_bgk(
const float* __restrict__ Speed0A,
const float* __restrict__ Speed1A,
const float* __restrict__ Speed2A,
const float* __restrict__ Speed3A,
const float* __restrict__ Speed4A,
const float* __restrict__ Speed5A,
const float* __restrict__ Speed6A,
const float* __restrict__ Speed7A,
const float* __restrict__ Speed8A,
float* __restrict__ Tmp0A,
float* __restrict__ Tmp1A,
float* __restrict__ Tmp2A,
float* __restrict__ Tmp3A,
float* __restrict__ Tmp4A,
float* __restrict__ Tmp5A,
float* __restrict__ Tmp6A,
float* __restrict__ Tmp7A,
float* __restrict__ Tmp8A,
const int* __restrict__ ObstaclesA,
float* __restrict__ Partial_Sum,
int* __restrict__ Partial_Sum2,
const float densityaccel,
const float omega,
const int nx,
const int ny,
const int tt)
{
//setup local memory
__shared__ int local_sum2[LOCALSIZEX*LOCALSIZEY];
__shared__ float local_sum[LOCALSIZEX*LOCALSIZEY];
/* get column and row indices */
const int ii = blockIdx.x * blockDim.x + threadIdx.x;
const int jj = blockIdx.y * blockDim.y + threadIdx.y;
const float c_sq_inv = 3.f;
const float c_sq = 1.f/c_sq_inv; /* square of speed of sound */
const float temp1 = 4.5f;
const float w1 = 1.f/9.f;
const float w0 = 4.f * w1; /* weighting factor */
const float w2 = 1.f/36.f; /* weighting factor */
const float w11 = densityaccel * w1;
const float w21 = densityaccel * w2;
/* determine indices of axis-direction neighbours
** respecting periodic boundary conditions (wrap around) */
const int y_n = (jj + 1) % ny;
const int x_e = (ii + 1) % nx;
const int y_s = (jj == 0) ? (jj + ny - 1) : (jj - 1);
const int x_w = (ii == 0) ? (ii + nx - 1) : (ii - 1);
/* propagate densities from neighbouring cells, following
** appropriate directions of travel and writing into
** scratch space grid */
float tmp_s0 = Speed0A[ii + jj*nx];
float tmp_s1 = (jj == ny-2 && (!ObstaclesA[x_w + jj*nx] && isGreater((Speed3A[x_w + jj*nx] - w11) , 0.f) && isGreater((Speed6A[x_w + jj*nx] - w21) , 0.f) && isGreater((Speed7A[x_w + jj*nx] - w21) , 0.f))) ? Speed1A[x_w + jj*nx]+w11 : Speed1A[x_w + jj*nx];
float tmp_s2 = Speed2A[ii + y_s*nx];
float tmp_s3 = (jj == ny-2 && (!ObstaclesA[x_e + jj*nx] && isGreater((Speed3A[x_e + jj*nx] - w11) , 0.f) && isGreater((Speed6A[x_e + jj*nx] - w21) , 0.f) && isGreater((Speed7A[x_e + jj*nx] - w21) , 0.f))) ? Speed3A[x_e + jj*nx]-w11 : Speed3A[x_e + jj*nx];
float tmp_s4 = Speed4A[ii + y_n*nx];
float tmp_s5 = (y_s == ny-2 && (!ObstaclesA[x_w + y_s*nx] && isGreater((Speed3A[x_w + y_s*nx] - w11) , 0.f) && isGreater((Speed6A[x_w + y_s*nx] - w21) , 0.f) && isGreater((Speed7A[x_w + y_s*nx] - w21) , 0.f))) ? Speed5A[x_w + y_s*nx]+w21 : Speed5A[x_w + y_s*nx];
float tmp_s6 = (y_s == ny-2 && (!ObstaclesA[x_e + y_s*nx] && isGreater((Speed3A[x_e + y_s*nx] - w11) , 0.f) && isGreater((Speed6A[x_e + y_s*nx] - w21) , 0.f) && isGreater((Speed7A[x_e + y_s*nx] - w21) , 0.f))) ? Speed6A[x_e + y_s*nx]-w21 : Speed6A[x_e + y_s*nx];
float tmp_s7 = (y_n == ny-2 && (!ObstaclesA[x_e + y_n*nx] && isGreater((Speed3A[x_e + y_n*nx] - w11) , 0.f) && isGreater((Speed6A[x_e + y_n*nx] - w21) , 0.f) && isGreater((Speed7A[x_e + y_n*nx] - w21) , 0.f))) ? Speed7A[x_e + y_n*nx]-w21 : Speed7A[x_e + y_n*nx];
float tmp_s8 = (y_n == ny-2 && (!ObstaclesA[x_w + y_n*nx] && isGreater((Speed3A[x_w + y_n*nx] - w11) , 0.f) && isGreater((Speed6A[x_w + y_n*nx] - w21) , 0.f) && isGreater((Speed7A[x_w + y_n*nx] - w21) , 0.f))) ? Speed8A[x_w + y_n*nx]+w21 : Speed8A[x_w + y_n*nx];
/* compute local density total */
float local_density = tmp_s0 + tmp_s1 + tmp_s2 + tmp_s3 + tmp_s4 + tmp_s5 + tmp_s6 + tmp_s7 + tmp_s8;
const float local_density_recip = 1.f/(local_density);
/* compute x velocity component */
float u_x = (tmp_s1
+ tmp_s5
+ tmp_s8
- tmp_s3
- tmp_s6
- tmp_s7)
* local_density_recip;
/* compute y velocity component */
float u_y = (tmp_s2
+ tmp_s5
+ tmp_s6
- tmp_s4
- tmp_s8
- tmp_s7)
* local_density_recip;
/* velocity squared */
const float temp2 = - (u_x * u_x + u_y * u_y)/(2.f * c_sq);
/* equilibrium densities */
float d_equ[NSPEEDS];
/* zero velocity density: weight w0 */
d_equ[0] = w0 * local_density
* (1.f + temp2);
/* axis speeds: weight w1 */
d_equ[1] = w1 * local_density * (1.f + u_x * c_sq_inv
+ (u_x * u_x) * temp1
+ temp2);
d_equ[2] = w1 * local_density * (1.f + u_y * c_sq_inv
+ (u_y * u_y) * temp1
+ temp2);
d_equ[3] = w1 * local_density * (1.f - u_x * c_sq_inv
+ (u_x * u_x) * temp1
+ temp2);
d_equ[4] = w1 * local_density * (1.f - u_y * c_sq_inv
+ (u_y * u_y) * temp1
+ temp2);
/* diagonal speeds: weight w2 */
d_equ[5] = w2 * local_density * (1.f + (u_x + u_y) * c_sq_inv
+ ((u_x + u_y) * (u_x + u_y)) * temp1
+ temp2);
d_equ[6] = w2 * local_density * (1.f + (-u_x + u_y) * c_sq_inv
+ ((-u_x + u_y) * (-u_x + u_y)) * temp1
+ temp2);
d_equ[7] = w2 * local_density * (1.f + (-u_x - u_y) * c_sq_inv
+ ((-u_x - u_y) * (-u_x - u_y)) * temp1
+ temp2);
d_equ[8] = w2 * local_density * (1.f + (u_x - u_y) * c_sq_inv
+ ((u_x - u_y) * (u_x - u_y)) * temp1
+ temp2);
float tmp;
int expression = ObstaclesA[ii + jj*nx];
tmp_s0 = expression ? tmp_s0 : (tmp_s0 + omega * (d_equ[0] - tmp_s0));
tmp = tmp_s1;
tmp_s1 = expression ? tmp_s3 : (tmp_s1 + omega * (d_equ[1] - tmp_s1));
tmp_s3 = expression ? tmp : (tmp_s3 + omega * (d_equ[3] - tmp_s3));
tmp = tmp_s2;
tmp_s2 = expression ? tmp_s4 : (tmp_s2 + omega * (d_equ[2] - tmp_s2));
tmp_s4 = expression ? tmp : (tmp_s4 + omega * (d_equ[4] - tmp_s4));
tmp = tmp_s5;
tmp_s5 = expression ? tmp_s7 : (tmp_s5 + omega * (d_equ[5] - tmp_s5));
tmp_s7 = expression ? tmp : (tmp_s7 + omega * (d_equ[7] - tmp_s7));
tmp = tmp_s6;
tmp_s6 = expression ? tmp_s8 : (tmp_s6 + omega * (d_equ[6] - tmp_s6));
tmp_s8 = expression ? tmp : (tmp_s8 + omega * (d_equ[8] - tmp_s8));
/* local density total */
local_density = 1.f/(tmp_s0 + tmp_s1 + tmp_s2 + tmp_s3 + tmp_s4 + tmp_s5 + tmp_s6 + tmp_s7 + tmp_s8);
/* x-component of velocity */
u_x = (tmp_s1
+ tmp_s5
+ tmp_s8
- tmp_s3
- tmp_s6
- tmp_s7)
* local_density;
/* compute y velocity component */
u_y = (tmp_s2
+ tmp_s5
+ tmp_s6
- tmp_s4
- tmp_s7
- tmp_s8)
* local_density;
Tmp0A[ii + jj*nx] = tmp_s0;
Tmp1A[ii + jj*nx] = tmp_s1;
Tmp2A[ii + jj*nx] = tmp_s2;
Tmp3A[ii + jj*nx] = tmp_s3;
Tmp4A[ii + jj*nx] = tmp_s4;
Tmp5A[ii + jj*nx] = tmp_s5;
Tmp6A[ii + jj*nx] = tmp_s6;
Tmp7A[ii + jj*nx] = tmp_s7;
Tmp8A[ii + jj*nx] = tmp_s8;
int local_idi = threadIdx.x;
int local_idj = threadIdx.y;
int local_sizei = blockDim.x;
int local_sizej = blockDim.y;
/* accumulate the norm of x- and y- velocity components */
local_sum[local_idi + local_idj*local_sizei] = (ObstaclesA[ii + jj*nx]) ? 0 : hypotf(u_x,u_y);
/* increase counter of inspected cells */
local_sum2[local_idi + local_idj*local_sizei] = (ObstaclesA[ii + jj*nx]) ? 0 : 1 ;
__syncthreads();
int group_id = blockIdx.x;
int group_id2 = blockIdx.y;
int group_size = gridDim.x;
int group_size2 = gridDim.y;
if(local_idi == 0 && local_idj == 0){
float sum = 0.0f;
int sum2 = 0;
for(int i = 0; i<local_sizei*local_sizej; i++){
sum += local_sum[i];
sum2 += local_sum2[i];
}
Partial_Sum[group_id+group_id2*group_size+tt*group_size*group_size2] = sum;
Partial_Sum2[group_id+group_id2*group_size+tt*group_size*group_size2] = sum2;
}
}
int main(int argc, char* argv[])
{
char* paramfile = NULL; /* input parameter file */
char* obstaclefile = NULL; /* input obstacle file */
t_param params; /* struct to hold parameter values */
t_speed* cells = NULL; /* grid containing fluid densities */
t_speed* tmp_cells = NULL; /* scratch space */
int* obstaclesHost = NULL;/* grid indicating which cells are blocked */
float* av_vels = NULL; /* a record of the av. velocity computed for each timestep */
struct timeval timstr; /* structure to hold elapsed time */
double tic, toc; /* floating point numbers to calculate elapsed wallclock time */
/* parse the command line */
if (argc != 3)
{
usage(argv[0]);
}
else
{
paramfile = argv[1];
obstaclefile = argv[2];
}
/* initialise our data structures and load values from file */
initialise(paramfile, obstaclefile, ¶ms, &cells,
&tmp_cells, &obstaclesHost, &av_vels);
// declare host arrays
int Ny = params.ny;
int Nx = params.nx;
int MaxIters = params.maxIters;
float *speedsHostS0 = (float*) malloc (sizeof(float)*Ny*Nx);
float *speedsHostS1 = (float*) malloc (sizeof(float)*Ny*Nx);
float *speedsHostS2 = (float*) malloc (sizeof(float)*Ny*Nx);
float *speedsHostS3 = (float*) malloc (sizeof(float)*Ny*Nx);
float *speedsHostS4 = (float*) malloc (sizeof(float)*Ny*Nx);
float *speedsHostS5 = (float*) malloc (sizeof(float)*Ny*Nx);
float *speedsHostS6 = (float*) malloc (sizeof(float)*Ny*Nx);
float *speedsHostS7 = (float*) malloc (sizeof(float)*Ny*Nx);
float *speedsHostS8 = (float*) malloc (sizeof(float)*Ny*Nx);
float *tot_up = (float*) malloc (sizeof(float) * (Ny/LOCALSIZEY) * (Nx/LOCALSIZEX) * MaxIters);
int *tot_cellsp = (int*) malloc (sizeof(int) * (Ny/LOCALSIZEY) * (Nx/LOCALSIZEX) * MaxIters);
// Init arrays
/* loop over _all_ cells */
for (int jj = 0; jj < Ny; jj++)
{
for (int ii = 0; ii < Nx; ii++)
{
speedsHostS0[ii + jj*Nx] = cells[ii + jj*Nx].speeds[0];
speedsHostS1[ii + jj*Nx] = cells[ii + jj*Nx].speeds[1];
speedsHostS2[ii + jj*Nx] = cells[ii + jj*Nx].speeds[2];
speedsHostS3[ii + jj*Nx] = cells[ii + jj*Nx].speeds[3];
speedsHostS4[ii + jj*Nx] = cells[ii + jj*Nx].speeds[4];
speedsHostS5[ii + jj*Nx] = cells[ii + jj*Nx].speeds[5];
speedsHostS6[ii + jj*Nx] = cells[ii + jj*Nx].speeds[6];
speedsHostS7[ii + jj*Nx] = cells[ii + jj*Nx].speeds[7];
speedsHostS8[ii + jj*Nx] = cells[ii + jj*Nx].speeds[8];
}
}
// Creating buffers which are bound to host arrays
float *speeds0, *speeds1, *speeds2, *speeds3, *speeds4,
*speeds5, *speeds6, *speeds7, *speeds8;
float *tmp_speeds0, *tmp_speeds1, *tmp_speeds2, *tmp_speeds3, *tmp_speeds4,
*tmp_speeds5, *tmp_speeds6, *tmp_speeds7, *tmp_speeds8;
cudaMalloc((void**)&speeds0, sizeof(float)*Ny*Nx);
cudaMemcpy(speeds0, speedsHostS0, sizeof(float)*Ny*Nx, cudaMemcpyHostToDevice);
cudaMalloc((void**)&speeds1, sizeof(float)*Ny*Nx);
cudaMemcpy(speeds1, speedsHostS1, sizeof(float)*Ny*Nx, cudaMemcpyHostToDevice);
cudaMalloc((void**)&speeds2, sizeof(float)*Ny*Nx);
cudaMemcpy(speeds2, speedsHostS2, sizeof(float)*Ny*Nx, cudaMemcpyHostToDevice);
cudaMalloc((void**)&speeds3, sizeof(float)*Ny*Nx);
cudaMemcpy(speeds3, speedsHostS3, sizeof(float)*Ny*Nx, cudaMemcpyHostToDevice);
cudaMalloc((void**)&speeds4, sizeof(float)*Ny*Nx);
cudaMemcpy(speeds4, speedsHostS4, sizeof(float)*Ny*Nx, cudaMemcpyHostToDevice);
cudaMalloc((void**)&speeds5, sizeof(float)*Ny*Nx);
cudaMemcpy(speeds5, speedsHostS5, sizeof(float)*Ny*Nx, cudaMemcpyHostToDevice);
cudaMalloc((void**)&speeds6, sizeof(float)*Ny*Nx);
cudaMemcpy(speeds6, speedsHostS6, sizeof(float)*Ny*Nx, cudaMemcpyHostToDevice);
cudaMalloc((void**)&speeds7, sizeof(float)*Ny*Nx);
cudaMemcpy(speeds7, speedsHostS7, sizeof(float)*Ny*Nx, cudaMemcpyHostToDevice);
cudaMalloc((void**)&speeds8, sizeof(float)*Ny*Nx);
cudaMemcpy(speeds8, speedsHostS8, sizeof(float)*Ny*Nx, cudaMemcpyHostToDevice);
cudaMalloc((void**)&tmp_speeds0, sizeof(float)*Ny*Nx);
cudaMalloc((void**)&tmp_speeds1, sizeof(float)*Ny*Nx);
cudaMalloc((void**)&tmp_speeds2, sizeof(float)*Ny*Nx);
cudaMalloc((void**)&tmp_speeds3, sizeof(float)*Ny*Nx);
cudaMalloc((void**)&tmp_speeds4, sizeof(float)*Ny*Nx);
cudaMalloc((void**)&tmp_speeds5, sizeof(float)*Ny*Nx);
cudaMalloc((void**)&tmp_speeds6, sizeof(float)*Ny*Nx);
cudaMalloc((void**)&tmp_speeds7, sizeof(float)*Ny*Nx);
cudaMalloc((void**)&tmp_speeds8, sizeof(float)*Ny*Nx);
int *obstacles, *partial_sum2;
float *partial_sum;
cudaMalloc((void**)&obstacles, sizeof(int)*Ny*Nx);
cudaMalloc((void**)&partial_sum, sizeof(float)*(Ny/LOCALSIZEY)*(Nx/LOCALSIZEX)*MaxIters);
cudaMalloc((void**)&partial_sum2, sizeof(int)*(Ny/LOCALSIZEY)*(Nx/LOCALSIZEX)*MaxIters);
cudaMemcpy(obstacles, obstaclesHost, sizeof(int)*Ny*Nx, cudaMemcpyHostToDevice);
// parameters for kernel
float omega = params.omega;
float densityaccel = params.density*params.accel;
dim3 grids(Nx/LOCALSIZEX, Ny/LOCALSIZEY);
dim3 threads(LOCALSIZEX, LOCALSIZEY);
for (int tt = 0; tt < MaxIters; tt++){
if (tt == WARMUPS - 1) {
//start timer after warmup
cudaDeviceSynchronize();
gettimeofday(&timstr, NULL);
tic = timstr.tv_sec * 1e6 + timstr.tv_usec;
}
d2q9_bgk<<<grids, threads>>>(
speeds0,
speeds1,
speeds2,
speeds3,
speeds4,
speeds5,
speeds6,
speeds7,
speeds8,
tmp_speeds0,
tmp_speeds1,
tmp_speeds2,
tmp_speeds3,
tmp_speeds4,
tmp_speeds5,
tmp_speeds6,
tmp_speeds7,
tmp_speeds8,
obstacles,
partial_sum,
partial_sum2,
densityaccel,
omega,
Nx,
Ny,
tt );
// swap the buffers
float* speed_tmp = speeds0;
speeds0 = tmp_speeds0;
tmp_speeds0 = speed_tmp;
speed_tmp = speeds1;
speeds1 = tmp_speeds1;
tmp_speeds1 = speed_tmp;
speed_tmp = speeds2;
speeds2 = tmp_speeds2;
tmp_speeds2 = speed_tmp;
speed_tmp = speeds3;
speeds3 = tmp_speeds3;
tmp_speeds3 = speed_tmp;
speed_tmp = speeds4;
speeds4 = tmp_speeds4;
tmp_speeds4 = speed_tmp;
speed_tmp = speeds5;
speeds5 = tmp_speeds5;
tmp_speeds5 = speed_tmp;
speed_tmp = speeds6;
speeds6 = tmp_speeds6;
tmp_speeds6 = speed_tmp;
speed_tmp = speeds7;
speeds7 = tmp_speeds7;
tmp_speeds7 = speed_tmp;
speed_tmp = speeds8;
speeds8 = tmp_speeds8;
tmp_speeds8 = speed_tmp;
}
//end timer
cudaDeviceSynchronize();
gettimeofday(&timstr, NULL);
toc = timstr.tv_sec * 1e6 + timstr.tv_usec;
printf("After warmup for %d iterations, ", WARMUPS);
printf("average kernel execution time over %d iterations:\t\t\t%.6lf (us)\n",
MaxIters - WARMUPS, (toc - tic) / (MaxIters - WARMUPS));
cudaMemcpy(tot_up, partial_sum, sizeof(float)*(Ny/LOCALSIZEY)*(Nx/LOCALSIZEX)*MaxIters, cudaMemcpyDeviceToHost);
cudaMemcpy(tot_cellsp, partial_sum2, sizeof(int)*(Ny/LOCALSIZEY)*(Nx/LOCALSIZEX)*MaxIters, cudaMemcpyDeviceToHost);
cudaMemcpy(speedsHostS0, speeds0, sizeof(float)*Ny*Nx, cudaMemcpyDeviceToHost);
cudaMemcpy(speedsHostS1, speeds1, sizeof(float)*Ny*Nx, cudaMemcpyDeviceToHost);
cudaMemcpy(speedsHostS2, speeds2, sizeof(float)*Ny*Nx, cudaMemcpyDeviceToHost);
cudaMemcpy(speedsHostS3, speeds3, sizeof(float)*Ny*Nx, cudaMemcpyDeviceToHost);
cudaMemcpy(speedsHostS4, speeds4, sizeof(float)*Ny*Nx, cudaMemcpyDeviceToHost);
cudaMemcpy(speedsHostS5, speeds5, sizeof(float)*Ny*Nx, cudaMemcpyDeviceToHost);
cudaMemcpy(speedsHostS6, speeds6, sizeof(float)*Ny*Nx, cudaMemcpyDeviceToHost);
cudaMemcpy(speedsHostS7, speeds7, sizeof(float)*Ny*Nx, cudaMemcpyDeviceToHost);
cudaMemcpy(speedsHostS8, speeds8, sizeof(float)*Ny*Nx, cudaMemcpyDeviceToHost);
cudaFree(speeds0);
cudaFree(speeds1);
cudaFree(speeds2);
cudaFree(speeds3);
cudaFree(speeds4);
cudaFree(speeds5);
cudaFree(speeds6);
cudaFree(speeds7);
cudaFree(speeds8);
cudaFree(tmp_speeds0);
cudaFree(tmp_speeds1);
cudaFree(tmp_speeds2);
cudaFree(tmp_speeds3);
cudaFree(tmp_speeds4);
cudaFree(tmp_speeds5);
cudaFree(tmp_speeds6);
cudaFree(tmp_speeds7);
cudaFree(tmp_speeds8);
cudaFree(obstacles);
cudaFree(partial_sum2);
cudaFree(partial_sum);
float tot_u = 0;
int tot_cells = 0;
for (int tt = 0; tt < params.maxIters; tt++){
tot_u = 0;
tot_cells = 0;
for(int i = 0; i < params.nx/LOCALSIZEX*params.ny/LOCALSIZEY; i++){
tot_u += tot_up[i+tt*params.nx/LOCALSIZEX*params.ny/LOCALSIZEY];
tot_cells += tot_cellsp[i+tt*params.nx/LOCALSIZEX*params.ny/LOCALSIZEY];
//printf("%d %f %d\n", i, tot_u, tot_cells);
}
av_vels[tt] = tot_u/tot_cells;
}
// put answers back into cells
for (int jj = 0; jj < params.ny; jj++)
{
for (int ii = 0; ii < params.nx; ii++)
{
cells[ii + jj*params.nx].speeds[0] = speedsHostS0[ii + jj*params.nx];
cells[ii + jj*params.nx].speeds[1] = speedsHostS1[ii + jj*params.nx];
cells[ii + jj*params.nx].speeds[2] = speedsHostS2[ii + jj*params.nx];
cells[ii + jj*params.nx].speeds[3] = speedsHostS3[ii + jj*params.nx];
cells[ii + jj*params.nx].speeds[4] = speedsHostS4[ii + jj*params.nx];
cells[ii + jj*params.nx].speeds[5] = speedsHostS5[ii + jj*params.nx];
cells[ii + jj*params.nx].speeds[6] = speedsHostS6[ii + jj*params.nx];
cells[ii + jj*params.nx].speeds[7] = speedsHostS7[ii + jj*params.nx];
cells[ii + jj*params.nx].speeds[8] = speedsHostS8[ii + jj*params.nx];
}
}
/* write final values and free memory */
printf("==done==\n");
printf("Reynolds number:\t\t%.12E\n", calc_reynolds(params, cells, obstaclesHost));
write_values(params, cells, obstaclesHost, av_vels);
finalise(cells, tmp_cells, obstaclesHost, av_vels);
free(speedsHostS0);
free(speedsHostS1);
free(speedsHostS2);
free(speedsHostS3);
free(speedsHostS4);
free(speedsHostS5);
free(speedsHostS6);
free(speedsHostS7);
free(speedsHostS8);
free(tot_up);
free(tot_cellsp);
return EXIT_SUCCESS;
}
float av_velocity(const t_param params, t_speed* cells, int* obstacles)
{
int tot_cells = 0; /* no. of cells used in calculation */
float tot_u; /* accumulated magnitudes of velocity for each cell */
/* initialise */
tot_u = 0.f;
/* loop over all non-blocked cells */
for (int jj = 0; jj < params.ny; jj++)
{
for (int ii = 0; ii < params.nx; ii++)
{
/* ignore occupied cells */
if (!obstacles[ii + jj*params.nx])
{
/* local density total */
float local_density = 0.f;
for (int kk = 0; kk < NSPEEDS; kk++)
{
local_density += cells[ii + jj*params.nx].speeds[kk];
}
/* x-component of velocity */
float u_x = (cells[ii + jj*params.nx].speeds[1]
+ cells[ii + jj*params.nx].speeds[5]
+ cells[ii + jj*params.nx].speeds[8]
- (cells[ii + jj*params.nx].speeds[3]
+ cells[ii + jj*params.nx].speeds[6]
+ cells[ii + jj*params.nx].speeds[7]))
/ local_density;
/* compute y velocity component */
float u_y = (cells[ii + jj*params.nx].speeds[2]
+ cells[ii + jj*params.nx].speeds[5]
+ cells[ii + jj*params.nx].speeds[6]
- (cells[ii + jj*params.nx].speeds[4]
+ cells[ii + jj*params.nx].speeds[7]
+ cells[ii + jj*params.nx].speeds[8]))
/ local_density;
/* accumulate the norm of x- and y- velocity components */
tot_u += sqrtf((u_x * u_x) + (u_y * u_y));
/* increase counter of inspected cells */
++tot_cells;
}
}
}
return tot_u / (float)tot_cells;
}
int initialise(const char* paramfile, const char* obstaclefile,
t_param* params, t_speed** cells_ptr, t_speed** tmp_cells_ptr,
int** obstacles_ptr, float** av_vels_ptr){
char message[1024]; /* message buffer */
FILE* fp; /* file pointer */
int xx, yy; /* generic array indices */
int blocked; /* indicates whether a cell is blocked by an obstacle */
int retval; /* to hold return value for checking */
/* open the parameter file */
fp = fopen(paramfile, "r");
if (fp == NULL)
{
sprintf(message, "could not open input parameter file: %s", paramfile);
die(message, __LINE__, __FILE__);
}
/* read in the parameter values */
retval = fscanf(fp, "%d\n", &(params->nx));
if (retval != 1) die("could not read param file: nx", __LINE__, __FILE__);
retval = fscanf(fp, "%d\n", &(params->ny));
if (retval != 1) die("could not read param file: ny", __LINE__, __FILE__);
retval = fscanf(fp, "%d\n", &(params->maxIters));
if (retval != 1) die("could not read param file: maxIters", __LINE__, __FILE__);
retval = fscanf(fp, "%d\n", &(params->reynolds_dim));
if (retval != 1) die("could not read param file: reynolds_dim", __LINE__, __FILE__);
retval = fscanf(fp, "%f\n", &(params->density));
if (retval != 1) die("could not read param file: density", __LINE__, __FILE__);
retval = fscanf(fp, "%f\n", &(params->accel));
if (retval != 1) die("could not read param file: accel", __LINE__, __FILE__);
retval = fscanf(fp, "%f\n", &(params->omega));
if (retval != 1) die("could not read param file: omega", __LINE__, __FILE__);
/* and close up the file */
fclose(fp);
/*
** allocate memory.
**
** remember C is pass-by-value, so we need to
** pass pointers into the initialise function.
**
** nb we are allocating a 1D array, so that the
** memory will be contiguous. We still want to
** index this memory as if it were a (row major
** ordered) 2D array, however. We will perform
** some arithmetic using the row and column
** coordinates, inside the square brackets, when
** we want to access elements of this array.
**
** note also that we are using a structure to
** hold an array of 'speeds'. We will allocate
** a 1D array of these structs.
*/
/* main grid */
*cells_ptr = (t_speed*)malloc(sizeof(t_speed) * (params->ny * params->nx));
if (*cells_ptr == NULL) die("cannot allocate memory for cells", __LINE__, __FILE__);
/* 'helper' grid, used as scratch space */
*tmp_cells_ptr = (t_speed*)malloc(sizeof(t_speed) * (params->ny * params->nx));
if (*tmp_cells_ptr == NULL) die("cannot allocate memory for tmp_cells", __LINE__, __FILE__);
/* the map of obstacles */
*obstacles_ptr = (int*) malloc (sizeof(int) * params->ny * params->nx);
if (*obstacles_ptr == NULL) die("cannot allocate column memory for obstacles", __LINE__, __FILE__);
/* initialise densities */
float w0 = params->density * 4.f / 9.f;
float w1 = params->density / 9.f;
float w2 = params->density / 36.f;
for (int jj = 0; jj < params->ny; jj++)
{
for (int ii = 0; ii < params->nx; ii++)
{
/* centre */
(*cells_ptr)[ii + jj*params->nx].speeds[0] = w0;
/* axis directions */
(*cells_ptr)[ii + jj*params->nx].speeds[1] = w1;
(*cells_ptr)[ii + jj*params->nx].speeds[2] = w1;
(*cells_ptr)[ii + jj*params->nx].speeds[3] = w1;
(*cells_ptr)[ii + jj*params->nx].speeds[4] = w1;
/* diagonals */
(*cells_ptr)[ii + jj*params->nx].speeds[5] = w2;
(*cells_ptr)[ii + jj*params->nx].speeds[6] = w2;
(*cells_ptr)[ii + jj*params->nx].speeds[7] = w2;
(*cells_ptr)[ii + jj*params->nx].speeds[8] = w2;
}
}
/* first set all cells in obstacle array to zero */
for (int jj = 0; jj < params->ny; jj++)
{
for (int ii = 0; ii < params->nx; ii++)
{
(*obstacles_ptr)[ii + jj*params->nx] = 0;
}
}
/* open the obstacle data file */
fp = fopen(obstaclefile, "r");
if (fp == NULL)
{
sprintf(message, "could not open input obstacles file: %s", obstaclefile);
die(message, __LINE__, __FILE__);
}
/* read-in the blocked cells list */
while ((retval = fscanf(fp, "%d %d %d\n", &xx, &yy, &blocked)) != EOF)
{
/* some checks */
if (retval != 3) die("expected 3 values per line in obstacle file", __LINE__, __FILE__);
if (xx < 0 || xx > params->nx - 1) die("obstacle x-coord out of range", __LINE__, __FILE__);
if (yy < 0 || yy > params->ny - 1) die("obstacle y-coord out of range", __LINE__, __FILE__);
if (blocked != 1) die("obstacle blocked value should be 1", __LINE__, __FILE__);
/* assign to array */
(*obstacles_ptr)[xx + yy*params->nx] = blocked;
}
/* and close the file */
fclose(fp);
/*
** allocate space to hold a record of the avarage velocities computed
** at each timestep
*/
*av_vels_ptr = (float*)malloc(sizeof(float) * params->maxIters);
return EXIT_SUCCESS;
}
int finalise(t_speed* cells_ptr, t_speed* tmp_cells_ptr,
int* obstacles_ptr, float* av_vels_ptr)
{
/*
** free up allocated memory
*/
free(cells_ptr);
free(tmp_cells_ptr);
free(obstacles_ptr);
free(av_vels_ptr);
return EXIT_SUCCESS;
}
float calc_reynolds(const t_param params, t_speed* cells, int* obstacles)
{
const float viscosity = 1.f / 6.f * (2.f / params.omega - 1.f);
return av_velocity(params, cells, obstacles) * params.reynolds_dim / viscosity;
}
float total_density(const t_param params, t_speed* cells)
{
float total = 0.f; /* accumulator */
for (int jj = 0; jj < params.ny; jj++)
{
for (int ii = 0; ii < params.nx; ii++)
{
for (int kk = 0; kk < NSPEEDS; kk++)
{
total += cells[ii + jj*params.nx].speeds[kk];
}
}
}
return total;
}
int write_values(const t_param params, t_speed* cells, int* obstacles, float* av_vels)
{
FILE* fp; /* file pointer */
const float c_sq = 1.f / 3.f; /* sq. of speed of sound */
float local_density; /* per grid cell sum of densities */
float pressure; /* fluid pressure in grid cell */
float u_x; /* x-component of velocity in grid cell */
float u_y; /* y-component of velocity in grid cell */
float u; /* norm--root of summed squares--of u_x and u_y */
fp = fopen(FINALSTATEFILE, "w");
if (fp == NULL)
{
die("could not open file output file", __LINE__, __FILE__);
}
for (int jj = 0; jj < params.ny; jj++)
{
for (int ii = 0; ii < params.nx; ii++)
{
/* an occupied cell */
if (obstacles[ii + jj*params.nx])
{
u_x = u_y = u = 0.f;
pressure = params.density * c_sq;
}
/* no obstacle */
else
{
local_density = 0.f;
for (int kk = 0; kk < NSPEEDS; kk++)
{
local_density += cells[ii + jj*params.nx].speeds[kk];
}
/* compute x velocity component */
u_x = (cells[ii + jj*params.nx].speeds[1]
+ cells[ii + jj*params.nx].speeds[5]
+ cells[ii + jj*params.nx].speeds[8]
- (cells[ii + jj*params.nx].speeds[3]
+ cells[ii + jj*params.nx].speeds[6]
+ cells[ii + jj*params.nx].speeds[7]))
/ local_density;
/* compute y velocity component */
u_y = (cells[ii + jj*params.nx].speeds[2]
+ cells[ii + jj*params.nx].speeds[5]
+ cells[ii + jj*params.nx].speeds[6]
- (cells[ii + jj*params.nx].speeds[4]
+ cells[ii + jj*params.nx].speeds[7]
+ cells[ii + jj*params.nx].speeds[8]))
/ local_density;
/* compute norm of velocity */
u = sqrtf((u_x * u_x) + (u_y * u_y));
/* compute pressure */
pressure = local_density * c_sq;
}
/* write to file */
fprintf(fp, "%d %d %.12E %.12E %.12E %.12E %d\n", ii, jj, u_x, u_y, u, pressure, obstacles[ii * params.nx + jj]);
}
}
fclose(fp);
fp = fopen(AVVELSFILE, "w");
if (fp == NULL)
{
die("could not open file output file", __LINE__, __FILE__);
}
for (int ii = 0; ii < params.maxIters; ii++)
{
fprintf(fp, "%d:\t%.12E\n", ii, av_vels[ii]);
}
fclose(fp);
return EXIT_SUCCESS;
}
void die(const char* message, const int line, const char* file)
{
fprintf(stderr, "Error at line %d of file %s:\n", line, file);
fprintf(stderr, "%s\n", message);
fflush(stderr);
exit(EXIT_FAILURE);
}
void usage(const char* exe)
{
fprintf(stderr, "Usage: %s <paramfile> <obstaclefile>\n", exe);
exit(EXIT_FAILURE);
}
|
7cf29ba746190b830fca2c3f5b74ea771aa1f4ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zgerbt.cu normal z -> d, Fri Jan 30 19:00:08 2015
@author Adrien REMY
*/
#include "common_magma.h"
#include "dgerbt.h"
#define block_height 32
#define block_width 4
#define block_length 256
#define NB 64
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
DPRBT_MVT compute B = UTB to randomize B
Arguments
---------
@param[in]
n INTEGER
The number of values of db. n >= 0.
@param[in]
du DOUBLE_PRECISION array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in,out]
db DOUBLE_PRECISION array, dimension (n)
The n vector db computed by DGESV_NOPIV_GPU
On exit db = du*db
@param[in]
queue magma_queue_t
Queue to execute in.
********************************************************************/
extern "C" void
magmablas_dprbt_mtv_q(
magma_int_t n,
double *du, double *db,
magma_queue_t queue)
{
/*
*/
magma_int_t threads = block_length;
magma_int_t grid = n/(4*block_length) + ((n%(4*block_length))!=0);
hipLaunchKernelGGL(( magmablas_dapply_transpose_vector_kernel), dim3(grid), dim3(threads), 0, queue , n/2, du, n, db, 0);
hipLaunchKernelGGL(( magmablas_dapply_transpose_vector_kernel), dim3(grid), dim3(threads), 0, queue , n/2, du, n+n/2, db, n/2);
threads = block_length;
grid = n/(2*block_length) + ((n%(2*block_length))!=0);
hipLaunchKernelGGL(( magmablas_dapply_transpose_vector_kernel), dim3(grid), dim3(threads), 0, queue , n, du, 0, db, 0);
}
/**
@see magmablas_dprbt_mtv_q
********************************************************************/
extern "C" void
magmablas_dprbt_mtv(
magma_int_t n,
double *du, double *db)
{
magmablas_dprbt_mtv_q(n, du, db, magma_stream);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
DPRBT_MV compute B = VB to obtain the non randomized solution
Arguments
---------
@param[in]
n INTEGER
The number of values of db. n >= 0.
@param[in,out]
db DOUBLE_PRECISION array, dimension (n)
The n vector db computed by DGESV_NOPIV_GPU
On exit db = dv*db
@param[in]
dv DOUBLE_PRECISION array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in]
queue magma_queue_t
Queue to execute in.
********************************************************************/
extern "C" void
magmablas_dprbt_mv_q(
magma_int_t n,
double *dv, double *db,
magma_queue_t queue)
{
magma_int_t threads = block_length;
magma_int_t grid = n/(2*block_length) + ((n%(2*block_length))!=0);
hipLaunchKernelGGL(( magmablas_dapply_vector_kernel), dim3(grid), dim3(threads), 0, queue , n, dv, 0, db, 0);
threads = block_length;
grid = n/(4*block_length) + ((n%(4*block_length))!=0);
hipLaunchKernelGGL(( magmablas_dapply_vector_kernel), dim3(grid), dim3(threads), 0, queue , n/2, dv, n, db, 0);
hipLaunchKernelGGL(( magmablas_dapply_vector_kernel), dim3(grid), dim3(threads), 0, queue , n/2, dv, n+n/2, db, n/2);
}
/**
@see magmablas_dprbt_mtv_q
********************************************************************/
extern "C" void
magmablas_dprbt_mv(
magma_int_t n,
double *dv, double *db)
{
magmablas_dprbt_mv_q(n, dv, db, magma_stream);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
DPRBT randomize a square general matrix using partial randomized transformation
Arguments
---------
@param[in]
n INTEGER
The number of columns and rows of the matrix dA. n >= 0.
@param[in,out]
dA DOUBLE_PRECISION array, dimension (n,ldda)
The n-by-n matrix dA
On exit dA = duT*dA*d_V
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDA >= max(1,n).
@param[in]
du DOUBLE_PRECISION array, dimension (n,2)
The 2*n vector representing the random butterfly matrix U
@param[in]
dv DOUBLE_PRECISION array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in]
queue magma_queue_t
Queue to execute in.
********************************************************************/
extern "C" void
magmablas_dprbt_q(
magma_int_t n,
double *dA, magma_int_t ldda,
double *du, double *dv,
magma_queue_t queue)
{
du += ldda;
dv += ldda;
dim3 threads(block_height, block_width);
dim3 grid(n/(4*block_height) + ((n%(4*block_height))!=0),
n/(4*block_width) + ((n%(4*block_width))!=0));
hipLaunchKernelGGL(( magmablas_delementary_multiplication_kernel), dim3(grid), dim3(threads), 0, queue , n/2, dA, 0, ldda, du, 0, dv, 0);
hipLaunchKernelGGL(( magmablas_delementary_multiplication_kernel), dim3(grid), dim3(threads), 0, queue , n/2, dA, ldda*n/2, ldda, du, 0, dv, n/2);
hipLaunchKernelGGL(( magmablas_delementary_multiplication_kernel), dim3(grid), dim3(threads), 0, queue , n/2, dA, n/2, ldda, du, n/2, dv, 0);
hipLaunchKernelGGL(( magmablas_delementary_multiplication_kernel), dim3(grid), dim3(threads), 0, queue , n/2, dA, ldda*n/2+n/2, ldda, du, n/2, dv, n/2);
dim3 threads2(block_height, block_width);
dim3 grid2(n/(2*block_height) + ((n%(2*block_height))!=0),
n/(2*block_width) + ((n%(2*block_width))!=0));
hipLaunchKernelGGL(( magmablas_delementary_multiplication_kernel), dim3(grid2), dim3(threads2), 0, queue , n, dA, 0, ldda, du, -ldda, dv, -ldda);
}
/**
@see magmablas_dprbt_q
********************************************************************/
extern "C" void
magmablas_dprbt(
magma_int_t n,
double *dA, magma_int_t ldda,
double *du, double *dv)
{
magmablas_dprbt_q(n, dA, ldda, du, dv, magma_stream);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
// adds x += r --and--
// copies r = b
// each thread does one index, x[i] and r[i]
__global__ void
daxpycp2_kernel(
int m, double *r, double *x,
const double *b)
{
const int i = threadIdx.x + blockIdx.x*NB;
if ( i < m ) {
x[i] = MAGMA_D_ADD( x[i], r[i] );
r[i] = b[i];
}
}
// ----------------------------------------------------------------------
// adds x += r --and--
// copies r = b
extern "C" void
magmablas_daxpycp2_q(
magma_int_t m, double *r, double *x,
const double *b,
magma_queue_t queue )
{
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB );
hipLaunchKernelGGL(( daxpycp2_kernel) , dim3(grid), dim3(threads), 0, queue , m, r, x, b );
}
extern "C" void
magmablas_daxpycp2(
magma_int_t m, double *r, double *x,
const double *b)
{
magmablas_daxpycp2_q( m, r, x, b, magma_stream );
}
| 7cf29ba746190b830fca2c3f5b74ea771aa1f4ad.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zgerbt.cu normal z -> d, Fri Jan 30 19:00:08 2015
@author Adrien REMY
*/
#include "common_magma.h"
#include "dgerbt.h"
#define block_height 32
#define block_width 4
#define block_length 256
#define NB 64
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
DPRBT_MVT compute B = UTB to randomize B
Arguments
---------
@param[in]
n INTEGER
The number of values of db. n >= 0.
@param[in]
du DOUBLE_PRECISION array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in,out]
db DOUBLE_PRECISION array, dimension (n)
The n vector db computed by DGESV_NOPIV_GPU
On exit db = du*db
@param[in]
queue magma_queue_t
Queue to execute in.
********************************************************************/
extern "C" void
magmablas_dprbt_mtv_q(
magma_int_t n,
double *du, double *db,
magma_queue_t queue)
{
/*
*/
magma_int_t threads = block_length;
magma_int_t grid = n/(4*block_length) + ((n%(4*block_length))!=0);
magmablas_dapply_transpose_vector_kernel<<< grid, threads, 0, queue >>>(n/2, du, n, db, 0);
magmablas_dapply_transpose_vector_kernel<<< grid, threads, 0, queue >>>(n/2, du, n+n/2, db, n/2);
threads = block_length;
grid = n/(2*block_length) + ((n%(2*block_length))!=0);
magmablas_dapply_transpose_vector_kernel<<< grid, threads, 0, queue >>>(n, du, 0, db, 0);
}
/**
@see magmablas_dprbt_mtv_q
********************************************************************/
extern "C" void
magmablas_dprbt_mtv(
magma_int_t n,
double *du, double *db)
{
magmablas_dprbt_mtv_q(n, du, db, magma_stream);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
DPRBT_MV compute B = VB to obtain the non randomized solution
Arguments
---------
@param[in]
n INTEGER
The number of values of db. n >= 0.
@param[in,out]
db DOUBLE_PRECISION array, dimension (n)
The n vector db computed by DGESV_NOPIV_GPU
On exit db = dv*db
@param[in]
dv DOUBLE_PRECISION array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in]
queue magma_queue_t
Queue to execute in.
********************************************************************/
extern "C" void
magmablas_dprbt_mv_q(
magma_int_t n,
double *dv, double *db,
magma_queue_t queue)
{
magma_int_t threads = block_length;
magma_int_t grid = n/(2*block_length) + ((n%(2*block_length))!=0);
magmablas_dapply_vector_kernel<<< grid, threads, 0, queue >>>(n, dv, 0, db, 0);
threads = block_length;
grid = n/(4*block_length) + ((n%(4*block_length))!=0);
magmablas_dapply_vector_kernel<<< grid, threads, 0, queue >>>(n/2, dv, n, db, 0);
magmablas_dapply_vector_kernel<<< grid, threads, 0, queue >>>(n/2, dv, n+n/2, db, n/2);
}
/**
@see magmablas_dprbt_mtv_q
********************************************************************/
extern "C" void
magmablas_dprbt_mv(
magma_int_t n,
double *dv, double *db)
{
magmablas_dprbt_mv_q(n, dv, db, magma_stream);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
DPRBT randomize a square general matrix using partial randomized transformation
Arguments
---------
@param[in]
n INTEGER
The number of columns and rows of the matrix dA. n >= 0.
@param[in,out]
dA DOUBLE_PRECISION array, dimension (n,ldda)
The n-by-n matrix dA
On exit dA = duT*dA*d_V
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDA >= max(1,n).
@param[in]
du DOUBLE_PRECISION array, dimension (n,2)
The 2*n vector representing the random butterfly matrix U
@param[in]
dv DOUBLE_PRECISION array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in]
queue magma_queue_t
Queue to execute in.
********************************************************************/
extern "C" void
magmablas_dprbt_q(
magma_int_t n,
double *dA, magma_int_t ldda,
double *du, double *dv,
magma_queue_t queue)
{
du += ldda;
dv += ldda;
dim3 threads(block_height, block_width);
dim3 grid(n/(4*block_height) + ((n%(4*block_height))!=0),
n/(4*block_width) + ((n%(4*block_width))!=0));
magmablas_delementary_multiplication_kernel<<< grid, threads, 0, queue >>>(n/2, dA, 0, ldda, du, 0, dv, 0);
magmablas_delementary_multiplication_kernel<<< grid, threads, 0, queue >>>(n/2, dA, ldda*n/2, ldda, du, 0, dv, n/2);
magmablas_delementary_multiplication_kernel<<< grid, threads, 0, queue >>>(n/2, dA, n/2, ldda, du, n/2, dv, 0);
magmablas_delementary_multiplication_kernel<<< grid, threads, 0, queue >>>(n/2, dA, ldda*n/2+n/2, ldda, du, n/2, dv, n/2);
dim3 threads2(block_height, block_width);
dim3 grid2(n/(2*block_height) + ((n%(2*block_height))!=0),
n/(2*block_width) + ((n%(2*block_width))!=0));
magmablas_delementary_multiplication_kernel<<< grid2, threads2, 0, queue >>>(n, dA, 0, ldda, du, -ldda, dv, -ldda);
}
/**
@see magmablas_dprbt_q
********************************************************************/
extern "C" void
magmablas_dprbt(
magma_int_t n,
double *dA, magma_int_t ldda,
double *du, double *dv)
{
magmablas_dprbt_q(n, dA, ldda, du, dv, magma_stream);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
// adds x += r --and--
// copies r = b
// each thread does one index, x[i] and r[i]
__global__ void
daxpycp2_kernel(
int m, double *r, double *x,
const double *b)
{
const int i = threadIdx.x + blockIdx.x*NB;
if ( i < m ) {
x[i] = MAGMA_D_ADD( x[i], r[i] );
r[i] = b[i];
}
}
// ----------------------------------------------------------------------
// adds x += r --and--
// copies r = b
extern "C" void
magmablas_daxpycp2_q(
magma_int_t m, double *r, double *x,
const double *b,
magma_queue_t queue )
{
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB );
daxpycp2_kernel <<< grid, threads, 0, queue >>> ( m, r, x, b );
}
extern "C" void
magmablas_daxpycp2(
magma_int_t m, double *r, double *x,
const double *b)
{
magmablas_daxpycp2_q( m, r, x, b, magma_stream );
}
|
556ebdf6e60295675b00f45d0d6496e7661c621b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
int main(int argc, char **argv) {
double *buf_d = NULL;
fprintf(stderr, "Allocating...\n");
hipMalloc((void **) &buf_d, sizeof(double) * 1024);
fprintf(stderr, "Allocating DONE.\n");
return 0;
}
| 556ebdf6e60295675b00f45d0d6496e7661c621b.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
int main(int argc, char **argv) {
double *buf_d = NULL;
fprintf(stderr, "Allocating...\n");
cudaMalloc((void **) &buf_d, sizeof(double) * 1024);
fprintf(stderr, "Allocating DONE.\n");
return 0;
}
|
7d8a29b570c8014d2a988245d6afb0cd03b8c426.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_ROCM
#include <Vectormath.hpp>
#include <Manifoldmath.hpp>
#include <vector>
#include <memory>
#include <cmath>
#include <iostream>
#include <stdio.h>
// CUDA Version
namespace Engine
{
namespace Manifoldmath
{
scalar norm(const vectorfield & vf)
{
scalar x = Vectormath::dot(vf, vf);
return std::sqrt(x);
}
void normalize(vectorfield & vf)
{
scalar sc = 1.0/norm(vf);
Vectormath::scale(vf, sc);
}
void project_parallel(vectorfield & vf1, const vectorfield & vf2)
{
vectorfield vf3 = vf1;
project_orthogonal(vf3, vf2);
Vectormath::add_c_a(-1, vf3, vf1);
}
__global__ void cu_project_orthogonal(Vector3 *vf1, const Vector3 *vf2, scalar proj, size_t N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < N)
{
vf1[idx] -= proj*vf2[idx];
}
}
// The wrapper for the calling of the actual kernel
void project_orthogonal(vectorfield & vf1, const vectorfield & vf2)
{
int n = vf1.size();
// Get projection
scalar proj=Vectormath::dot(vf1, vf2);
// Project vf1
hipLaunchKernelGGL(( cu_project_orthogonal), dim3((n+1023)/1024), dim3(1024), 0, 0, vf1.data(), vf2.data(), proj, n);
CU_CHECK_AND_SYNC();
}
void invert_parallel(vectorfield & vf1, const vectorfield & vf2)
{
scalar proj=Vectormath::dot(vf1, vf2);
Vectormath::add_c_a(-2*proj, vf2, vf1);
}
void invert_orthogonal(vectorfield & vf1, const vectorfield & vf2)
{
vectorfield vf3 = vf1;
project_orthogonal(vf3, vf2);
Vectormath::add_c_a(-2, vf3, vf1);
}
__global__ void cu_project_tangential(Vector3 *vf1, const Vector3 *vf2, size_t N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < N)
{
vf1[idx] -= vf1[idx].dot(vf2[idx]) * vf2[idx];
}
}
void project_tangential(vectorfield & vf1, const vectorfield & vf2)
{
int n = vf1.size();
hipLaunchKernelGGL(( cu_project_tangential), dim3((n+1023)/1024), dim3(1024), 0, 0, vf1.data(), vf2.data(), n);
CU_CHECK_AND_SYNC();
}
__inline__ __device__
scalar cu_dist_greatcircle(const Vector3 v1, const Vector3 v2)
{
scalar r = v1.dot(v2);
// Prevent NaNs from occurring
r = max(-1.0, min(1.0, r));
// Greatcircle distance
return std::acos(r);
}
scalar dist_greatcircle(const Vector3 & v1, const Vector3 & v2)
{
scalar r = v1.dot(v2);
// Prevent NaNs from occurring
r = max(-1.0, min(1.0, r));
// Greatcircle distance
return std::acos(r);
}
// Calculates the squares of the geodesic distances between vectors of two vectorfields
__global__ void cu_dist_geodesic_2(const Vector3 * vf1, const Vector3 * vf2, scalar * sf, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < N)
{
scalar d = cu_dist_greatcircle(vf1[idx], vf2[idx]);
sf[idx] = d*d;
}
}
scalar dist_geodesic(const vectorfield & vf1, const vectorfield & vf2)
{
int n = vf1.size();
scalarfield sf(n);
hipLaunchKernelGGL(( cu_dist_geodesic_2), dim3((n+1023)/1024), dim3(1024), 0, 0, vf1.data(), vf2.data(), sf.data(), n);
CU_CHECK_AND_SYNC();
scalar dist = Vectormath::sum(sf);
return sqrt(dist);
}
/*
Calculates the 'tangent' vectors, i.e.in crudest approximation the difference between an image and the neighbouring
*/
void Tangents(std::vector<std::shared_ptr<vectorfield>> configurations, const std::vector<scalar> & energies, std::vector<vectorfield> & tangents)
{
int noi = configurations.size();
int nos = (*configurations[0]).size();
for (int idx_img = 0; idx_img < noi; ++idx_img)
{
auto& image = *configurations[idx_img];
// First Image
if (idx_img == 0)
{
auto& image_plus = *configurations[idx_img + 1];
Vectormath::set_c_a( 1, image_plus, tangents[idx_img]);
Vectormath::add_c_a(-1, image, tangents[idx_img]);
}
// Last Image
else if (idx_img == noi - 1)
{
auto& image_minus = *configurations[idx_img - 1];
Vectormath::set_c_a( 1, image, tangents[idx_img]);
Vectormath::add_c_a(-1, image_minus, tangents[idx_img]);
}
// Images Inbetween
else
{
auto& image_plus = *configurations[idx_img + 1];
auto& image_minus = *configurations[idx_img - 1];
// Energies
scalar E_mid = 0, E_plus = 0, E_minus = 0;
E_mid = energies[idx_img];
E_plus = energies[idx_img + 1];
E_minus = energies[idx_img - 1];
// Vectors to neighbouring images
vectorfield t_plus(nos), t_minus(nos);
Vectormath::set_c_a( 1, image_plus, t_plus);
Vectormath::add_c_a(-1, image, t_plus);
Vectormath::set_c_a( 1, image, t_minus);
Vectormath::add_c_a(-1, image_minus, t_minus);
// Near maximum or minimum
if ((E_plus < E_mid && E_mid > E_minus) || (E_plus > E_mid && E_mid < E_minus))
{
// Get a smooth transition between forward and backward tangent
scalar E_max = ::max(std::abs(E_plus - E_mid), std::abs(E_minus - E_mid));
scalar E_min = ::min(std::abs(E_plus - E_mid), std::abs(E_minus - E_mid));
if (E_plus > E_minus)
{
Vectormath::set_c_a(E_max, t_plus, tangents[idx_img]);
Vectormath::add_c_a(E_min, t_minus, tangents[idx_img]);
}
else
{
Vectormath::set_c_a(E_min, t_plus, tangents[idx_img]);
Vectormath::add_c_a(E_max, t_minus, tangents[idx_img]);
}
}
// Rising slope
else if (E_plus > E_mid && E_mid > E_minus)
{
Vectormath::set_c_a(1, t_plus, tangents[idx_img]);
}
// Falling slope
else if (E_plus < E_mid && E_mid < E_minus)
{
Vectormath::set_c_a(1, t_minus, tangents[idx_img]);
//tangents = t_minus;
for (int i = 0; i < nos; ++i)
{
tangents[idx_img][i] = t_minus[i];
}
}
// No slope(constant energy)
else
{
Vectormath::set_c_a(1, t_plus, tangents[idx_img]);
Vectormath::add_c_a(1, t_minus, tangents[idx_img]);
}
}
// Project tangents into tangent planes of spin vectors to make them actual tangents
project_tangential(tangents[idx_img], image);
// Normalise in 3N - dimensional space
Manifoldmath::normalize(tangents[idx_img]);
}// end for idx_img
}// end Tangents
}
}
#endif | 7d8a29b570c8014d2a988245d6afb0cd03b8c426.cu | #ifdef USE_CUDA
#include <Vectormath.hpp>
#include <Manifoldmath.hpp>
#include <vector>
#include <memory>
#include <cmath>
#include <iostream>
#include <stdio.h>
// CUDA Version
namespace Engine
{
namespace Manifoldmath
{
scalar norm(const vectorfield & vf)
{
scalar x = Vectormath::dot(vf, vf);
return std::sqrt(x);
}
void normalize(vectorfield & vf)
{
scalar sc = 1.0/norm(vf);
Vectormath::scale(vf, sc);
}
void project_parallel(vectorfield & vf1, const vectorfield & vf2)
{
vectorfield vf3 = vf1;
project_orthogonal(vf3, vf2);
Vectormath::add_c_a(-1, vf3, vf1);
}
__global__ void cu_project_orthogonal(Vector3 *vf1, const Vector3 *vf2, scalar proj, size_t N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < N)
{
vf1[idx] -= proj*vf2[idx];
}
}
// The wrapper for the calling of the actual kernel
void project_orthogonal(vectorfield & vf1, const vectorfield & vf2)
{
int n = vf1.size();
// Get projection
scalar proj=Vectormath::dot(vf1, vf2);
// Project vf1
cu_project_orthogonal<<<(n+1023)/1024, 1024>>>(vf1.data(), vf2.data(), proj, n);
CU_CHECK_AND_SYNC();
}
void invert_parallel(vectorfield & vf1, const vectorfield & vf2)
{
scalar proj=Vectormath::dot(vf1, vf2);
Vectormath::add_c_a(-2*proj, vf2, vf1);
}
void invert_orthogonal(vectorfield & vf1, const vectorfield & vf2)
{
vectorfield vf3 = vf1;
project_orthogonal(vf3, vf2);
Vectormath::add_c_a(-2, vf3, vf1);
}
__global__ void cu_project_tangential(Vector3 *vf1, const Vector3 *vf2, size_t N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < N)
{
vf1[idx] -= vf1[idx].dot(vf2[idx]) * vf2[idx];
}
}
void project_tangential(vectorfield & vf1, const vectorfield & vf2)
{
int n = vf1.size();
cu_project_tangential<<<(n+1023)/1024, 1024>>>(vf1.data(), vf2.data(), n);
CU_CHECK_AND_SYNC();
}
__inline__ __device__
scalar cu_dist_greatcircle(const Vector3 v1, const Vector3 v2)
{
scalar r = v1.dot(v2);
// Prevent NaNs from occurring
r = max(-1.0, min(1.0, r));
// Greatcircle distance
return std::acos(r);
}
scalar dist_greatcircle(const Vector3 & v1, const Vector3 & v2)
{
scalar r = v1.dot(v2);
// Prevent NaNs from occurring
r = max(-1.0, min(1.0, r));
// Greatcircle distance
return std::acos(r);
}
// Calculates the squares of the geodesic distances between vectors of two vectorfields
__global__ void cu_dist_geodesic_2(const Vector3 * vf1, const Vector3 * vf2, scalar * sf, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < N)
{
scalar d = cu_dist_greatcircle(vf1[idx], vf2[idx]);
sf[idx] = d*d;
}
}
scalar dist_geodesic(const vectorfield & vf1, const vectorfield & vf2)
{
int n = vf1.size();
scalarfield sf(n);
cu_dist_geodesic_2<<<(n+1023)/1024, 1024>>>(vf1.data(), vf2.data(), sf.data(), n);
CU_CHECK_AND_SYNC();
scalar dist = Vectormath::sum(sf);
return sqrt(dist);
}
/*
Calculates the 'tangent' vectors, i.e.in crudest approximation the difference between an image and the neighbouring
*/
void Tangents(std::vector<std::shared_ptr<vectorfield>> configurations, const std::vector<scalar> & energies, std::vector<vectorfield> & tangents)
{
int noi = configurations.size();
int nos = (*configurations[0]).size();
for (int idx_img = 0; idx_img < noi; ++idx_img)
{
auto& image = *configurations[idx_img];
// First Image
if (idx_img == 0)
{
auto& image_plus = *configurations[idx_img + 1];
Vectormath::set_c_a( 1, image_plus, tangents[idx_img]);
Vectormath::add_c_a(-1, image, tangents[idx_img]);
}
// Last Image
else if (idx_img == noi - 1)
{
auto& image_minus = *configurations[idx_img - 1];
Vectormath::set_c_a( 1, image, tangents[idx_img]);
Vectormath::add_c_a(-1, image_minus, tangents[idx_img]);
}
// Images Inbetween
else
{
auto& image_plus = *configurations[idx_img + 1];
auto& image_minus = *configurations[idx_img - 1];
// Energies
scalar E_mid = 0, E_plus = 0, E_minus = 0;
E_mid = energies[idx_img];
E_plus = energies[idx_img + 1];
E_minus = energies[idx_img - 1];
// Vectors to neighbouring images
vectorfield t_plus(nos), t_minus(nos);
Vectormath::set_c_a( 1, image_plus, t_plus);
Vectormath::add_c_a(-1, image, t_plus);
Vectormath::set_c_a( 1, image, t_minus);
Vectormath::add_c_a(-1, image_minus, t_minus);
// Near maximum or minimum
if ((E_plus < E_mid && E_mid > E_minus) || (E_plus > E_mid && E_mid < E_minus))
{
// Get a smooth transition between forward and backward tangent
scalar E_max = std::max(std::abs(E_plus - E_mid), std::abs(E_minus - E_mid));
scalar E_min = std::min(std::abs(E_plus - E_mid), std::abs(E_minus - E_mid));
if (E_plus > E_minus)
{
Vectormath::set_c_a(E_max, t_plus, tangents[idx_img]);
Vectormath::add_c_a(E_min, t_minus, tangents[idx_img]);
}
else
{
Vectormath::set_c_a(E_min, t_plus, tangents[idx_img]);
Vectormath::add_c_a(E_max, t_minus, tangents[idx_img]);
}
}
// Rising slope
else if (E_plus > E_mid && E_mid > E_minus)
{
Vectormath::set_c_a(1, t_plus, tangents[idx_img]);
}
// Falling slope
else if (E_plus < E_mid && E_mid < E_minus)
{
Vectormath::set_c_a(1, t_minus, tangents[idx_img]);
//tangents = t_minus;
for (int i = 0; i < nos; ++i)
{
tangents[idx_img][i] = t_minus[i];
}
}
// No slope(constant energy)
else
{
Vectormath::set_c_a(1, t_plus, tangents[idx_img]);
Vectormath::add_c_a(1, t_minus, tangents[idx_img]);
}
}
// Project tangents into tangent planes of spin vectors to make them actual tangents
project_tangential(tangents[idx_img], image);
// Normalise in 3N - dimensional space
Manifoldmath::normalize(tangents[idx_img]);
}// end for idx_img
}// end Tangents
}
}
#endif |
41cd7ffced84f486cd8205df8508bb3009b21769.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2020 Mobvoi Inc. (authors: Fangjun Kuang)
*
* See LICENSE for clarification regarding multiple authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cstdlib>
#include <random>
#include <vector>
#include "k2/csrc/fsa.h"
#include "k2/csrc/host/fsa_util.h"
#include "k2/csrc/host_shim.h"
#include "k2/csrc/macros.h"
#include "k2/csrc/math.h"
#include "k2/csrc/test_utils.h"
namespace k2 {
Array1<int32_t> GenerateRandomIndexes(ContextPtr context, bool allow_minus_one,
int32_t dim, int32_t max_value) {
std::vector<int32_t> indexes(dim);
int32_t start = allow_minus_one ? -1 : 0;
for (int32_t &i : indexes) {
int32_t tmp = RandInt(-max_value, max_value);
i = ::max(tmp, start);
}
return Array1<int32_t>(context, indexes);
}
} // namespace k2
| 41cd7ffced84f486cd8205df8508bb3009b21769.cu | /**
* Copyright 2020 Mobvoi Inc. (authors: Fangjun Kuang)
*
* See LICENSE for clarification regarding multiple authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cstdlib>
#include <random>
#include <vector>
#include "k2/csrc/fsa.h"
#include "k2/csrc/host/fsa_util.h"
#include "k2/csrc/host_shim.h"
#include "k2/csrc/macros.h"
#include "k2/csrc/math.h"
#include "k2/csrc/test_utils.h"
namespace k2 {
Array1<int32_t> GenerateRandomIndexes(ContextPtr context, bool allow_minus_one,
int32_t dim, int32_t max_value) {
std::vector<int32_t> indexes(dim);
int32_t start = allow_minus_one ? -1 : 0;
for (int32_t &i : indexes) {
int32_t tmp = RandInt(-max_value, max_value);
i = std::max(tmp, start);
}
return Array1<int32_t>(context, indexes);
}
} // namespace k2
|
fa8d8698e8aa9b406c17a56c2f806a3af8c4bc2c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <time.h>
#define TIMER_CREATE(t) \
hipEvent_t t##_start, t##_end; \
hipEventCreate(&t##_start); \
hipEventCreate(&t##_end);
#define TIMER_START(t) \
hipEventRecord(t##_start); \
hipEventSynchronize(t##_start); \
#define TIMER_END(t) \
hipEventRecord(t##_end); \
hipEventSynchronize(t##_end); \
hipEventElapsedTime(&t, t##_start, t##_end); \
hipEventDestroy(t##_start); \
hipEventDestroy(t##_end);
#define TILE_SIZE 16
#define CUDA_TIMING
#define DEBUG
#define HIST_SIZE 256
#define SCAN_SIZE HIST_SIZE*2
unsigned char *input_gpu;
//unsigned char *output_gpu;
float *cdf;
float *hist_array;
double CLOCK() {
struct timespec t;
clock_gettime(CLOCK_MONOTONIC, &t);
return (t.tv_sec * 1000)+(t.tv_nsec*1e-6);
}
/*******************************************************/
/* Cuda Error Function */
/*******************************************************/
inline hipError_t checkCuda(hipError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
exit(-1);
}
#endif
return result;
}
// sparse histogram with shared memory
__global__ void kernel_hist(unsigned char *input,
float *hist, unsigned int height, unsigned int width){
//int x = blockIdx.x*TILE_SIZE+threadIdx.x;
//int y = blockIdx.y*TILE_SIZE+threadIdx.y;
int interval_x = (width - 1) / TILE_SIZE + 1;
int interval_y = (height - 1) / TILE_SIZE + 1;
int y = threadIdx.y*interval_y+blockIdx.y;
int x = threadIdx.x*interval_x+blockIdx.x;
//int location = y*TILE_SIZE*gridDim.x+x;
//int location = y*width+x;
int location = y*width + x;
int block_loc = threadIdx.y*TILE_SIZE+threadIdx.x;
// HIST_SIZE 256
__shared__ unsigned int hist_shared[HIST_SIZE];
/*if (block_loc<HIST_SIZE) hist_shared[block_loc]=0;
__syncthreads();*/
hist_shared[block_loc]=0;
__syncthreads();
if (x<width && y<height) atomicAdd(&(hist_shared[input[location]]),1);
__syncthreads();
//if (block_loc<HIST_SIZE) {
atomicAdd(&(hist[block_loc]),(float)hist_shared[block_loc]);
//}
}
//normal histogram with shared memory
__global__ void kernel_hist3(unsigned char *input,
float *hist, unsigned int height, unsigned int width){
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
// int interval_x = (width - 1) / TILE_SIZE + 1;
// int interval_y = (height - 1) / TILE_SIZE + 1;
// int y = threadIdx.y*interval_y+blockIdx.y;
// int x = threadIdx.x*interval_x+blockIdx.x;
// int location = y*TILE_SIZE*gridDim.x+x;
// int location = y*width+x;
int location = y*width + x;
int block_loc = threadIdx.y*TILE_SIZE+threadIdx.x;
// HIST_SIZE 256
__shared__ unsigned int hist_shared[HIST_SIZE];
/*if (block_loc<HIST_SIZE) hist_shared[block_loc]=0;
__syncthreads();*/
hist_shared[block_loc]=0;
__syncthreads();
if (x<width && y<height) atomicAdd(&(hist_shared[input[location]]),1);
__syncthreads();
//if (block_loc<HIST_SIZE) {
atomicAdd(&(hist[block_loc]),(float)hist_shared[block_loc]);
//}
}
__global__ void kernel_hist_global(unsigned char *input,
float *hist, unsigned int height, unsigned int width){
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
int location = y*TILE_SIZE*gridDim.x+x;
// int block_loc = threadIdx.y*TILE_SIZE+threadIdx.x;
// HIST_SIZE 256
//__shared__ unsigned int hist_shared[HIST_SIZE];
if (x<width && y<height) atomicAdd(&(hist[input[location]]),1);
__syncthreads();
}
__global__ void kernel_cdf(float *hist_array, int size){
__shared__ float p[HIST_SIZE];
int tid=blockIdx.x*blockDim.x+threadIdx.x;
if (tid<HIST_SIZE){
p[tid]=hist_array[tid] / (float)size;
}
__syncthreads();
for (int i=1; i<=HIST_SIZE;i=i<<1){
int ai=(threadIdx.x+1)*i*2-1;
if (ai<HIST_SIZE) p[ai]+=p[ai-i];
__syncthreads();
}
for (int i=HIST_SIZE/2;i>0;i=i>>1){
int bi=(threadIdx.x+1)*i*2-1;
if (bi+i<HIST_SIZE) p[bi+i]+=p[bi];
__syncthreads();
}
if (tid<HIST_SIZE) hist_array[tid]=p[threadIdx.x];
}
__global__ void kernel_equlization(
unsigned char *input,
float * cdf, unsigned int height, unsigned int width){
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
int location = y*TILE_SIZE*gridDim.x+x;
float min=cdf[0]; float down=0.0F; float up=255.0F;
if (x<width && y<height) {
float value=255.0F * (cdf[input[location]]-min) / (1.0F-min);
if (value<down) value=down;
if (value>up) value=up;
input[location]=(unsigned char) value;
}
}
void histogram_gpu(unsigned char *data,
unsigned int height,
unsigned int width){
//float cdf_cpu[HIST_SIZE]={0};
//unsigned int hist_cpu[HIST_SIZE]={0};
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
int block_size2=256;
int grid_size2=1+((width*height-1) / block_size2);
hipHostRegister((void*)data, size*sizeof(unsigned char),0);
// Allocate arrays in GPU memory
// float Ktime00;
// TIMER_CREATE(Ktime00);
// TIMER_START(Ktime00);
hipMalloc((void**)&input_gpu , size*sizeof(unsigned char));
hipMalloc((void**)&hist_array , HIST_SIZE*sizeof(float));
// hipMallocManaged((void**)&input_gpu , size*sizeof(unsigned char));
// hipMallocManaged((void**)&hist_array , HIST_SIZE*sizeof(float));
// hipHostRegister((void*)&input_gpu , size*sizeof(unsigned char),0);
// hipHostRegister((void*)&hist_array , HIST_SIZE*sizeof(float),0);
// TIMER_END(Ktime00);
// printf("CUDA_MALLOC Execution Time: %f ms\n", Ktime00);
// hipMalloc((void**)&output_gpu , size*sizeof(unsigned char));
// checkCuda(hipMalloc((void**)&cdf , size*sizeof(float)));
// init output_gpu to 0
//checkCuda(hipMemset(output_gpu , 0 , size*sizeof(unsigned char)));
hipMemset(hist_array , 0 , HIST_SIZE*sizeof(float));
// checkCuda(hipMemset(cdf , 0 , HIST_SIZE*sizeof(float)));
// Copy data to GPU
hipMemcpy(input_gpu,
data,
size*sizeof(char),
hipMemcpyHostToDevice);
checkCuda(hipDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
//hist2
dim3 dimGrid2(grid_size2);
dim3 dimBlock2(block_size2);
dim3 dimCdfGrid(1);
dim3 dimCdfBlock(HIST_SIZE);
// Kernel Call
#if defined(CUDA_TIMING)
float Ktime;
TIMER_CREATE(Ktime);
TIMER_START(Ktime);
#endif
// float Ktime0;
// TIMER_CREATE(Ktime0);
// TIMER_START(Ktime0);
hipLaunchKernelGGL(( kernel_hist), dim3(dimGrid), dim3(dimBlock), 0, 0, input_gpu,
hist_array,
height,
width);
checkCuda(hipPeekAtLastError());
checkCuda(hipDeviceSynchronize());
//TIMER_END(Ktime0);
//printf("HIST Kernel Execution Time: %f ms\n", Ktime0);
// float Ktime1;
// TIMER_CREATE(Ktime1);
// TIMER_START(Ktime1);
hipLaunchKernelGGL(( kernel_cdf), dim3(dimCdfGrid),dim3(dimCdfBlock), 0, 0,
hist_array,
height*width);
checkCuda(hipPeekAtLastError());
checkCuda(hipDeviceSynchronize());
// TIMER_END(Ktime1);
// printf("CDF Kernel Execution Time: %f ms\n", Ktime1);
// float Ktime2;
// TIMER_CREATE(Ktime2);
// TIMER_START(Ktime2);
hipLaunchKernelGGL(( kernel_equlization), dim3(dimGrid), dim3(dimBlock), 0, 0,
input_gpu,
hist_array,
height,
width);
checkCuda(hipPeekAtLastError());
checkCuda(hipDeviceSynchronize());
// TIMER_END(Ktime2);
// printf("EQUALIZATION Kernel Execution Time: %f ms\n", Ktime2);
#if defined(CUDA_TIMING)
TIMER_END(Ktime);
printf("Kernel Execution Time: %f ms\n", Ktime);
#endif
// Retrieve results from the GPU
checkCuda(hipMemcpy(data,
input_gpu,
size*sizeof(unsigned char),
hipMemcpyDeviceToHost));
// Free resources and end the program
// checkCuda(hipFree(output_gpu));
checkCuda(hipFree(input_gpu));
checkCuda(hipFree(hist_array));
}
void histogram_gpu_warmup(unsigned char *data,
unsigned int height,
unsigned int width){
//float cdf_cpu[HIST_SIZE]={0};
//unsigned int hist_cpu[HIST_SIZE]={0};
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
int block_size2=256;
int grid_size2=1+((width*height-1) / block_size2);
// Allocate arrays in GPU memory
// float Ktime00;
// TIMER_CREATE(Ktime00);
// TIMER_START(Ktime00);
hipHostRegister((void*)data, size*sizeof(unsigned char),0);
hipMalloc((void**)&input_gpu , size*sizeof(unsigned char));
hipMalloc((void**)&hist_array , HIST_SIZE*sizeof(float));
// hipMallocManaged((void**)&input_gpu , size*sizeof(unsigned char));
// hipMallocManaged((void**)&hist_array , HIST_SIZE*sizeof(float));
// hipHostRegister((void*)&input_gpu , size*sizeof(unsigned char),0);
// hipHostRegister((void*)&hist_array , HIST_SIZE*sizeof(float),0);
// TIMER_END(Ktime00);
//printf("CUDA_MALLOC Execution Time: %f ms\n", Ktime00);
// hipMalloc((void**)&output_gpu , size*sizeof(unsigned char));
// checkCuda(hipMalloc((void**)&cdf , size*sizeof(float)));
// init output_gpu to 0
//checkCuda(hipMemset(output_gpu , 0 , size*sizeof(unsigned char)));
hipMemset(hist_array , 0 , HIST_SIZE*sizeof(float));
// checkCuda(hipMemset(cdf , 0 , HIST_SIZE*sizeof(float)));
// Copy data to GPU
hipMemcpy(input_gpu,
data,
size*sizeof(char),
hipMemcpyHostToDevice);
checkCuda(hipDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
//hist2
dim3 dimGrid2(grid_size2);
dim3 dimBlock2(block_size2);
dim3 dimCdfGrid(1);
dim3 dimCdfBlock(HIST_SIZE);
// Kernel Call
#if defined(CUDA_TIMING)
float Ktime;
TIMER_CREATE(Ktime);
TIMER_START(Ktime);
#endif
// float Ktime0;
// TIMER_CREATE(Ktime0);
// TIMER_START(Ktime0);
hipLaunchKernelGGL(( kernel_hist), dim3(dimGrid), dim3(dimBlock), 0, 0, input_gpu,
hist_array,
height,
width);
checkCuda(hipPeekAtLastError());
checkCuda(hipDeviceSynchronize());
// TIMER_END(Ktime0);
//printf("HIST Kernel Execution Time: %f ms\n", Ktime0);
// float Ktime1;
// TIMER_CREATE(Ktime1);
// TIMER_START(Ktime1);
hipLaunchKernelGGL(( kernel_cdf), dim3(dimCdfGrid),dim3(dimCdfBlock), 0, 0,
hist_array,
height*width);
checkCuda(hipPeekAtLastError());
checkCuda(hipDeviceSynchronize());
// TIMER_END(Ktime1);
//printf("CDF Kernel Execution Time: %f ms\n", Ktime1);
// float Ktime2;
// TIMER_CREATE(Ktime2);
// TIMER_START(Ktime2);
hipLaunchKernelGGL(( kernel_equlization), dim3(dimGrid), dim3(dimBlock), 0, 0,
input_gpu,
hist_array,
height,
width);
checkCuda(hipPeekAtLastError());
checkCuda(hipDeviceSynchronize());
// TIMER_END(Ktime2);
// printf("EQUALIZATION Kernel Execution Time: %f ms\n", Ktime2);
#if defined(CUDA_TIMING)
TIMER_END(Ktime);
//printf("Kernel Execution Time: %f ms\n", Ktime);
#endif
// Retrieve results from the GPU
checkCuda(hipMemcpy(data,
input_gpu,
size*sizeof(unsigned char),
hipMemcpyDeviceToHost));
// Free resources and end the program
// checkCuda(hipFree(output_gpu));
checkCuda(hipFree(input_gpu));
checkCuda(hipFree(hist_array));
}
| fa8d8698e8aa9b406c17a56c2f806a3af8c4bc2c.cu |
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
#define TIMER_CREATE(t) \
cudaEvent_t t##_start, t##_end; \
cudaEventCreate(&t##_start); \
cudaEventCreate(&t##_end);
#define TIMER_START(t) \
cudaEventRecord(t##_start); \
cudaEventSynchronize(t##_start); \
#define TIMER_END(t) \
cudaEventRecord(t##_end); \
cudaEventSynchronize(t##_end); \
cudaEventElapsedTime(&t, t##_start, t##_end); \
cudaEventDestroy(t##_start); \
cudaEventDestroy(t##_end);
#define TILE_SIZE 16
#define CUDA_TIMING
#define DEBUG
#define HIST_SIZE 256
#define SCAN_SIZE HIST_SIZE*2
unsigned char *input_gpu;
//unsigned char *output_gpu;
float *cdf;
float *hist_array;
double CLOCK() {
struct timespec t;
clock_gettime(CLOCK_MONOTONIC, &t);
return (t.tv_sec * 1000)+(t.tv_nsec*1e-6);
}
/*******************************************************/
/* Cuda Error Function */
/*******************************************************/
inline cudaError_t checkCuda(cudaError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
exit(-1);
}
#endif
return result;
}
// sparse histogram with shared memory
__global__ void kernel_hist(unsigned char *input,
float *hist, unsigned int height, unsigned int width){
//int x = blockIdx.x*TILE_SIZE+threadIdx.x;
//int y = blockIdx.y*TILE_SIZE+threadIdx.y;
int interval_x = (width - 1) / TILE_SIZE + 1;
int interval_y = (height - 1) / TILE_SIZE + 1;
int y = threadIdx.y*interval_y+blockIdx.y;
int x = threadIdx.x*interval_x+blockIdx.x;
//int location = y*TILE_SIZE*gridDim.x+x;
//int location = y*width+x;
int location = y*width + x;
int block_loc = threadIdx.y*TILE_SIZE+threadIdx.x;
// HIST_SIZE 256
__shared__ unsigned int hist_shared[HIST_SIZE];
/*if (block_loc<HIST_SIZE) hist_shared[block_loc]=0;
__syncthreads();*/
hist_shared[block_loc]=0;
__syncthreads();
if (x<width && y<height) atomicAdd(&(hist_shared[input[location]]),1);
__syncthreads();
//if (block_loc<HIST_SIZE) {
atomicAdd(&(hist[block_loc]),(float)hist_shared[block_loc]);
//}
}
//normal histogram with shared memory
__global__ void kernel_hist3(unsigned char *input,
float *hist, unsigned int height, unsigned int width){
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
// int interval_x = (width - 1) / TILE_SIZE + 1;
// int interval_y = (height - 1) / TILE_SIZE + 1;
// int y = threadIdx.y*interval_y+blockIdx.y;
// int x = threadIdx.x*interval_x+blockIdx.x;
// int location = y*TILE_SIZE*gridDim.x+x;
// int location = y*width+x;
int location = y*width + x;
int block_loc = threadIdx.y*TILE_SIZE+threadIdx.x;
// HIST_SIZE 256
__shared__ unsigned int hist_shared[HIST_SIZE];
/*if (block_loc<HIST_SIZE) hist_shared[block_loc]=0;
__syncthreads();*/
hist_shared[block_loc]=0;
__syncthreads();
if (x<width && y<height) atomicAdd(&(hist_shared[input[location]]),1);
__syncthreads();
//if (block_loc<HIST_SIZE) {
atomicAdd(&(hist[block_loc]),(float)hist_shared[block_loc]);
//}
}
__global__ void kernel_hist_global(unsigned char *input,
float *hist, unsigned int height, unsigned int width){
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
int location = y*TILE_SIZE*gridDim.x+x;
// int block_loc = threadIdx.y*TILE_SIZE+threadIdx.x;
// HIST_SIZE 256
//__shared__ unsigned int hist_shared[HIST_SIZE];
if (x<width && y<height) atomicAdd(&(hist[input[location]]),1);
__syncthreads();
}
__global__ void kernel_cdf(float *hist_array, int size){
__shared__ float p[HIST_SIZE];
int tid=blockIdx.x*blockDim.x+threadIdx.x;
if (tid<HIST_SIZE){
p[tid]=hist_array[tid] / (float)size;
}
__syncthreads();
for (int i=1; i<=HIST_SIZE;i=i<<1){
int ai=(threadIdx.x+1)*i*2-1;
if (ai<HIST_SIZE) p[ai]+=p[ai-i];
__syncthreads();
}
for (int i=HIST_SIZE/2;i>0;i=i>>1){
int bi=(threadIdx.x+1)*i*2-1;
if (bi+i<HIST_SIZE) p[bi+i]+=p[bi];
__syncthreads();
}
if (tid<HIST_SIZE) hist_array[tid]=p[threadIdx.x];
}
__global__ void kernel_equlization(
unsigned char *input,
float * cdf, unsigned int height, unsigned int width){
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
int location = y*TILE_SIZE*gridDim.x+x;
float min=cdf[0]; float down=0.0F; float up=255.0F;
if (x<width && y<height) {
float value=255.0F * (cdf[input[location]]-min) / (1.0F-min);
if (value<down) value=down;
if (value>up) value=up;
input[location]=(unsigned char) value;
}
}
void histogram_gpu(unsigned char *data,
unsigned int height,
unsigned int width){
//float cdf_cpu[HIST_SIZE]={0};
//unsigned int hist_cpu[HIST_SIZE]={0};
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
int block_size2=256;
int grid_size2=1+((width*height-1) / block_size2);
cudaHostRegister((void*)data, size*sizeof(unsigned char),0);
// Allocate arrays in GPU memory
// float Ktime00;
// TIMER_CREATE(Ktime00);
// TIMER_START(Ktime00);
cudaMalloc((void**)&input_gpu , size*sizeof(unsigned char));
cudaMalloc((void**)&hist_array , HIST_SIZE*sizeof(float));
// cudaMallocManaged((void**)&input_gpu , size*sizeof(unsigned char));
// cudaMallocManaged((void**)&hist_array , HIST_SIZE*sizeof(float));
// cudaHostRegister((void*)&input_gpu , size*sizeof(unsigned char),0);
// cudaHostRegister((void*)&hist_array , HIST_SIZE*sizeof(float),0);
// TIMER_END(Ktime00);
// printf("CUDA_MALLOC Execution Time: %f ms\n", Ktime00);
// cudaMalloc((void**)&output_gpu , size*sizeof(unsigned char));
// checkCuda(cudaMalloc((void**)&cdf , size*sizeof(float)));
// init output_gpu to 0
//checkCuda(cudaMemset(output_gpu , 0 , size*sizeof(unsigned char)));
cudaMemset(hist_array , 0 , HIST_SIZE*sizeof(float));
// checkCuda(cudaMemset(cdf , 0 , HIST_SIZE*sizeof(float)));
// Copy data to GPU
cudaMemcpy(input_gpu,
data,
size*sizeof(char),
cudaMemcpyHostToDevice);
checkCuda(cudaDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
//hist2
dim3 dimGrid2(grid_size2);
dim3 dimBlock2(block_size2);
dim3 dimCdfGrid(1);
dim3 dimCdfBlock(HIST_SIZE);
// Kernel Call
#if defined(CUDA_TIMING)
float Ktime;
TIMER_CREATE(Ktime);
TIMER_START(Ktime);
#endif
// float Ktime0;
// TIMER_CREATE(Ktime0);
// TIMER_START(Ktime0);
kernel_hist<<<dimGrid, dimBlock>>>(input_gpu,
hist_array,
height,
width);
checkCuda(cudaPeekAtLastError());
checkCuda(cudaDeviceSynchronize());
//TIMER_END(Ktime0);
//printf("HIST Kernel Execution Time: %f ms\n", Ktime0);
// float Ktime1;
// TIMER_CREATE(Ktime1);
// TIMER_START(Ktime1);
kernel_cdf<<<dimCdfGrid,dimCdfBlock>>>(
hist_array,
height*width);
checkCuda(cudaPeekAtLastError());
checkCuda(cudaDeviceSynchronize());
// TIMER_END(Ktime1);
// printf("CDF Kernel Execution Time: %f ms\n", Ktime1);
// float Ktime2;
// TIMER_CREATE(Ktime2);
// TIMER_START(Ktime2);
kernel_equlization<<<dimGrid, dimBlock>>>(
input_gpu,
hist_array,
height,
width);
checkCuda(cudaPeekAtLastError());
checkCuda(cudaDeviceSynchronize());
// TIMER_END(Ktime2);
// printf("EQUALIZATION Kernel Execution Time: %f ms\n", Ktime2);
#if defined(CUDA_TIMING)
TIMER_END(Ktime);
printf("Kernel Execution Time: %f ms\n", Ktime);
#endif
// Retrieve results from the GPU
checkCuda(cudaMemcpy(data,
input_gpu,
size*sizeof(unsigned char),
cudaMemcpyDeviceToHost));
// Free resources and end the program
// checkCuda(cudaFree(output_gpu));
checkCuda(cudaFree(input_gpu));
checkCuda(cudaFree(hist_array));
}
void histogram_gpu_warmup(unsigned char *data,
unsigned int height,
unsigned int width){
//float cdf_cpu[HIST_SIZE]={0};
//unsigned int hist_cpu[HIST_SIZE]={0};
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
int block_size2=256;
int grid_size2=1+((width*height-1) / block_size2);
// Allocate arrays in GPU memory
// float Ktime00;
// TIMER_CREATE(Ktime00);
// TIMER_START(Ktime00);
cudaHostRegister((void*)data, size*sizeof(unsigned char),0);
cudaMalloc((void**)&input_gpu , size*sizeof(unsigned char));
cudaMalloc((void**)&hist_array , HIST_SIZE*sizeof(float));
// cudaMallocManaged((void**)&input_gpu , size*sizeof(unsigned char));
// cudaMallocManaged((void**)&hist_array , HIST_SIZE*sizeof(float));
// cudaHostRegister((void*)&input_gpu , size*sizeof(unsigned char),0);
// cudaHostRegister((void*)&hist_array , HIST_SIZE*sizeof(float),0);
// TIMER_END(Ktime00);
//printf("CUDA_MALLOC Execution Time: %f ms\n", Ktime00);
// cudaMalloc((void**)&output_gpu , size*sizeof(unsigned char));
// checkCuda(cudaMalloc((void**)&cdf , size*sizeof(float)));
// init output_gpu to 0
//checkCuda(cudaMemset(output_gpu , 0 , size*sizeof(unsigned char)));
cudaMemset(hist_array , 0 , HIST_SIZE*sizeof(float));
// checkCuda(cudaMemset(cdf , 0 , HIST_SIZE*sizeof(float)));
// Copy data to GPU
cudaMemcpy(input_gpu,
data,
size*sizeof(char),
cudaMemcpyHostToDevice);
checkCuda(cudaDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
//hist2
dim3 dimGrid2(grid_size2);
dim3 dimBlock2(block_size2);
dim3 dimCdfGrid(1);
dim3 dimCdfBlock(HIST_SIZE);
// Kernel Call
#if defined(CUDA_TIMING)
float Ktime;
TIMER_CREATE(Ktime);
TIMER_START(Ktime);
#endif
// float Ktime0;
// TIMER_CREATE(Ktime0);
// TIMER_START(Ktime0);
kernel_hist<<<dimGrid, dimBlock>>>(input_gpu,
hist_array,
height,
width);
checkCuda(cudaPeekAtLastError());
checkCuda(cudaDeviceSynchronize());
// TIMER_END(Ktime0);
//printf("HIST Kernel Execution Time: %f ms\n", Ktime0);
// float Ktime1;
// TIMER_CREATE(Ktime1);
// TIMER_START(Ktime1);
kernel_cdf<<<dimCdfGrid,dimCdfBlock>>>(
hist_array,
height*width);
checkCuda(cudaPeekAtLastError());
checkCuda(cudaDeviceSynchronize());
// TIMER_END(Ktime1);
//printf("CDF Kernel Execution Time: %f ms\n", Ktime1);
// float Ktime2;
// TIMER_CREATE(Ktime2);
// TIMER_START(Ktime2);
kernel_equlization<<<dimGrid, dimBlock>>>(
input_gpu,
hist_array,
height,
width);
checkCuda(cudaPeekAtLastError());
checkCuda(cudaDeviceSynchronize());
// TIMER_END(Ktime2);
// printf("EQUALIZATION Kernel Execution Time: %f ms\n", Ktime2);
#if defined(CUDA_TIMING)
TIMER_END(Ktime);
//printf("Kernel Execution Time: %f ms\n", Ktime);
#endif
// Retrieve results from the GPU
checkCuda(cudaMemcpy(data,
input_gpu,
size*sizeof(unsigned char),
cudaMemcpyDeviceToHost));
// Free resources and end the program
// checkCuda(cudaFree(output_gpu));
checkCuda(cudaFree(input_gpu));
checkCuda(cudaFree(hist_array));
}
|
0f4238a12fa0790c1d2b1de3621d0e0bb2ac0ed0.hip | // !!! This is a file automatically generated by hipify!!!
/**
* The MIT License (MIT)
*
* Copyright (c) 2015 Kyle Hollins Wray, University of Massachusetts
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "utilities/mdp_model_gpu.h"
#include "error_codes.h"
#include <stdio.h>
namespace nova {
int mdp_initialize_successors_gpu(MDP *mdp)
{
// Ensure the data is valid.
if (mdp->n == 0 || mdp->m == 0 || mdp->ns == 0 || mdp->S == nullptr) {
fprintf(stderr, "Error[mdp_initialize_successors_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
// Allocate the memory on the device.
if (hipMalloc(&mdp->d_S, mdp->n * mdp->m * mdp->ns * sizeof(int)) != hipSuccess) {
fprintf(stderr, "Error[mdp_initialize_successors_gpu]: %s\n",
"Failed to allocate device-side memory for the successor states.");
return NOVA_ERROR_DEVICE_MALLOC;
}
// Copy the data from the host to the device.
if (hipMemcpy(mdp->d_S, mdp->S, mdp->n * mdp->m * mdp->ns * sizeof(int),
hipMemcpyHostToDevice) != hipSuccess) {
fprintf(stderr, "Error[mdp_initialize_successors_gpu]: %s\n",
"Failed to copy memory from host to device for the successor states.");
return NOVA_ERROR_MEMCPY_TO_DEVICE;
}
return NOVA_SUCCESS;
}
int mdp_uninitialize_successors_gpu(MDP *mdp)
{
if (mdp->d_S != nullptr) {
if (hipFree(mdp->d_S) != hipSuccess) {
fprintf(stderr, "Error[mdp_uninitialize_successors_gpu]: %s\n",
"Failed to free device-side memory for the successor states.");
return NOVA_ERROR_DEVICE_FREE;
}
}
mdp->d_S = nullptr;
return NOVA_SUCCESS;
}
int mdp_initialize_state_transitions_gpu(MDP *mdp)
{
// Ensure the data is valid.
if (mdp->n == 0 || mdp->m == 0 || mdp->ns == 0 || mdp->T == nullptr) {
fprintf(stderr, "Error[mdp_initialize_state_transitions_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
// Allocate the memory on the device.
if (hipMalloc(&mdp->d_T, mdp->n * mdp->m * mdp->ns * sizeof(float)) != hipSuccess) {
fprintf(stderr, "Error[mdp_initialize_state_transitions_gpu]: %s\n",
"Failed to allocate device-side memory for the state transitions.");
return NOVA_ERROR_DEVICE_MALLOC;
}
// Copy the data from the host to the device.
if (hipMemcpy(mdp->d_T, mdp->T, mdp->n * mdp->m * mdp->ns * sizeof(float), hipMemcpyHostToDevice) != hipSuccess) {
fprintf(stderr, "Error[nova_mdp_pbvi_initialize_state_transitions]: %s\n",
"Failed to copy memory from host to device for the state transitions.");
return NOVA_ERROR_MEMCPY_TO_DEVICE;
}
return NOVA_SUCCESS;
}
int mdp_uninitialize_state_transitions_gpu(MDP *mdp)
{
if (mdp->d_T != nullptr) {
if (hipFree(mdp->d_T) != hipSuccess) {
fprintf(stderr, "Error[mdp_uninitialize_state_transitions_gpu]: %s\n",
"Failed to free device-side memory for the state transitions.");
return NOVA_ERROR_DEVICE_FREE;
}
}
mdp->d_T = nullptr;
return NOVA_SUCCESS;
}
int mdp_initialize_rewards_gpu(MDP *mdp)
{
// Ensure the data is valid.
if (mdp->n == 0 || mdp->m == 0 || mdp->R == nullptr) {
fprintf(stderr, "Error[mdp_initialize_rewards_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
// Allocate the memory on the device.
if (hipMalloc(&mdp->d_R, mdp->n * mdp->m * sizeof(float)) != hipSuccess) {
fprintf(stderr, "Error[mdp_initialize_rewards_gpu]: %s\n",
"Failed to allocate device-side memory for the rewards.");
return NOVA_ERROR_DEVICE_MALLOC;
}
// Copy the data from the host to the device.
if (hipMemcpy(mdp->d_R, mdp->R, mdp->n * mdp->m * sizeof(float), hipMemcpyHostToDevice) != hipSuccess) {
fprintf(stderr, "Error[mdp_initialize_rewards_gpu]: %s\n",
"Failed to copy memory from host to device for the rewards.");
return NOVA_ERROR_MEMCPY_TO_DEVICE;
}
return NOVA_SUCCESS;
}
int mdp_uninitialize_rewards_gpu(MDP *mdp)
{
if (mdp->d_R != nullptr) {
if (hipFree(mdp->d_R) != hipSuccess) {
fprintf(stderr, "Error[mdp_uninitialize_rewards_gpu]: %s\n",
"Failed to free device-side memory for the rewards.");
return NOVA_ERROR_DEVICE_FREE;
}
}
mdp->d_R = nullptr;
return NOVA_SUCCESS;
}
}; // namespace nova
| 0f4238a12fa0790c1d2b1de3621d0e0bb2ac0ed0.cu | /**
* The MIT License (MIT)
*
* Copyright (c) 2015 Kyle Hollins Wray, University of Massachusetts
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "utilities/mdp_model_gpu.h"
#include "error_codes.h"
#include <stdio.h>
namespace nova {
int mdp_initialize_successors_gpu(MDP *mdp)
{
// Ensure the data is valid.
if (mdp->n == 0 || mdp->m == 0 || mdp->ns == 0 || mdp->S == nullptr) {
fprintf(stderr, "Error[mdp_initialize_successors_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
// Allocate the memory on the device.
if (cudaMalloc(&mdp->d_S, mdp->n * mdp->m * mdp->ns * sizeof(int)) != cudaSuccess) {
fprintf(stderr, "Error[mdp_initialize_successors_gpu]: %s\n",
"Failed to allocate device-side memory for the successor states.");
return NOVA_ERROR_DEVICE_MALLOC;
}
// Copy the data from the host to the device.
if (cudaMemcpy(mdp->d_S, mdp->S, mdp->n * mdp->m * mdp->ns * sizeof(int),
cudaMemcpyHostToDevice) != cudaSuccess) {
fprintf(stderr, "Error[mdp_initialize_successors_gpu]: %s\n",
"Failed to copy memory from host to device for the successor states.");
return NOVA_ERROR_MEMCPY_TO_DEVICE;
}
return NOVA_SUCCESS;
}
int mdp_uninitialize_successors_gpu(MDP *mdp)
{
if (mdp->d_S != nullptr) {
if (cudaFree(mdp->d_S) != cudaSuccess) {
fprintf(stderr, "Error[mdp_uninitialize_successors_gpu]: %s\n",
"Failed to free device-side memory for the successor states.");
return NOVA_ERROR_DEVICE_FREE;
}
}
mdp->d_S = nullptr;
return NOVA_SUCCESS;
}
int mdp_initialize_state_transitions_gpu(MDP *mdp)
{
// Ensure the data is valid.
if (mdp->n == 0 || mdp->m == 0 || mdp->ns == 0 || mdp->T == nullptr) {
fprintf(stderr, "Error[mdp_initialize_state_transitions_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
// Allocate the memory on the device.
if (cudaMalloc(&mdp->d_T, mdp->n * mdp->m * mdp->ns * sizeof(float)) != cudaSuccess) {
fprintf(stderr, "Error[mdp_initialize_state_transitions_gpu]: %s\n",
"Failed to allocate device-side memory for the state transitions.");
return NOVA_ERROR_DEVICE_MALLOC;
}
// Copy the data from the host to the device.
if (cudaMemcpy(mdp->d_T, mdp->T, mdp->n * mdp->m * mdp->ns * sizeof(float), cudaMemcpyHostToDevice) != cudaSuccess) {
fprintf(stderr, "Error[nova_mdp_pbvi_initialize_state_transitions]: %s\n",
"Failed to copy memory from host to device for the state transitions.");
return NOVA_ERROR_MEMCPY_TO_DEVICE;
}
return NOVA_SUCCESS;
}
int mdp_uninitialize_state_transitions_gpu(MDP *mdp)
{
if (mdp->d_T != nullptr) {
if (cudaFree(mdp->d_T) != cudaSuccess) {
fprintf(stderr, "Error[mdp_uninitialize_state_transitions_gpu]: %s\n",
"Failed to free device-side memory for the state transitions.");
return NOVA_ERROR_DEVICE_FREE;
}
}
mdp->d_T = nullptr;
return NOVA_SUCCESS;
}
int mdp_initialize_rewards_gpu(MDP *mdp)
{
// Ensure the data is valid.
if (mdp->n == 0 || mdp->m == 0 || mdp->R == nullptr) {
fprintf(stderr, "Error[mdp_initialize_rewards_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
// Allocate the memory on the device.
if (cudaMalloc(&mdp->d_R, mdp->n * mdp->m * sizeof(float)) != cudaSuccess) {
fprintf(stderr, "Error[mdp_initialize_rewards_gpu]: %s\n",
"Failed to allocate device-side memory for the rewards.");
return NOVA_ERROR_DEVICE_MALLOC;
}
// Copy the data from the host to the device.
if (cudaMemcpy(mdp->d_R, mdp->R, mdp->n * mdp->m * sizeof(float), cudaMemcpyHostToDevice) != cudaSuccess) {
fprintf(stderr, "Error[mdp_initialize_rewards_gpu]: %s\n",
"Failed to copy memory from host to device for the rewards.");
return NOVA_ERROR_MEMCPY_TO_DEVICE;
}
return NOVA_SUCCESS;
}
int mdp_uninitialize_rewards_gpu(MDP *mdp)
{
if (mdp->d_R != nullptr) {
if (cudaFree(mdp->d_R) != cudaSuccess) {
fprintf(stderr, "Error[mdp_uninitialize_rewards_gpu]: %s\n",
"Failed to free device-side memory for the rewards.");
return NOVA_ERROR_DEVICE_FREE;
}
}
mdp->d_R = nullptr;
return NOVA_SUCCESS;
}
}; // namespace nova
|
cb16cd9a2eae4f711bde9725608fbd9070bf3a62.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_common.hpp"
#include <cstdio>
#include <numeric>
#include "kernel_files/pack_kernel.cuknl"
extern "C" void cuda_pack_buffers_
(int fields[NUM_FIELDS], int offsets[NUM_FIELDS], int * depth,
int * face, double * host_buffer)
{
cuda_chunk.packUnpackAllBuffers(fields, offsets, *depth, *face, 1, host_buffer);
}
extern "C" void cuda_unpack_buffers_
(int fields[NUM_FIELDS], int offsets[NUM_FIELDS], int * depth,
int * face, double * host_buffer)
{
cuda_chunk.packUnpackAllBuffers(fields, offsets, *depth, *face, 0, host_buffer);
}
void CloverleafCudaChunk::packUnpackAllBuffers
(int fields[NUM_FIELDS], int offsets[NUM_FIELDS],
const int depth, const int face, const int pack,
double * host_buffer)
{
const int n_exchanged = std::accumulate(fields, fields + NUM_FIELDS, 0);
if (n_exchanged < 1)
{
return;
}
// which buffer is being used for this operation
double * device_buffer = NULL;
switch (face)
{
case CHUNK_LEFT:
device_buffer = left_buffer;
break;
case CHUNK_RIGHT:
device_buffer = right_buffer;
break;
case CHUNK_BOTTOM:
device_buffer = bottom_buffer;
break;
case CHUNK_TOP:
device_buffer = top_buffer;
break;
default:
DIE("Invalid face identifier %d passed to mpi buffer packing\n", face);
}
pack_func_t pack_kernel = NULL;
kernel_info_t kernel_info;
// set which kernel to call
if (pack)
{
switch (face)
{
case CHUNK_LEFT:
pack_kernel = &device_pack_left_buffer;
kernel_info = kernel_info_map.at("device_pack_left_buffer");
break;
case CHUNK_RIGHT:
pack_kernel = &device_pack_right_buffer;
kernel_info = kernel_info_map.at("device_pack_right_buffer");
break;
case CHUNK_BOTTOM:
pack_kernel = &device_pack_bottom_buffer;
kernel_info = kernel_info_map.at("device_pack_bottom_buffer");
break;
case CHUNK_TOP:
pack_kernel = &device_pack_top_buffer;
kernel_info = kernel_info_map.at("device_pack_top_buffer");
break;
default:
DIE("Invalid face identifier %d passed to pack\n", face);
}
}
else
{
switch (face)
{
case CHUNK_LEFT:
pack_kernel = &device_unpack_left_buffer;
kernel_info = kernel_info_map.at("device_unpack_left_buffer");
break;
case CHUNK_RIGHT:
pack_kernel = &device_unpack_right_buffer;
kernel_info = kernel_info_map.at("device_unpack_right_buffer");
break;
case CHUNK_BOTTOM:
pack_kernel = &device_unpack_bottom_buffer;
kernel_info = kernel_info_map.at("device_unpack_bottom_buffer");
break;
case CHUNK_TOP:
pack_kernel = &device_unpack_top_buffer;
kernel_info = kernel_info_map.at("device_unpack_top_buffer");
break;
default:
DIE("Invalid face identifier %d passed to unpack\n", face);
}
}
kernel_info.x_offset = halo_exchange_depth - depth;
kernel_info.y_offset = halo_exchange_depth - depth;
// size of this buffer
int side_size = 0;
// launch sizes for packing/unpacking arrays
dim3 pack_num_blocks, pack_block_size;
switch (face)
{
// pad it to fit in 32 local work group size (always on NVIDIA hardware)
case CHUNK_LEFT:
case CHUNK_RIGHT:
side_size = depth*(y_max + 2*depth);
pack_num_blocks = update_lr_num_blocks[depth];
pack_block_size = update_lr_block_sizes[depth];
break;
case CHUNK_BOTTOM:
case CHUNK_TOP:
side_size = depth*(x_max + 2*depth);
pack_num_blocks = update_bt_num_blocks[depth];
pack_block_size = update_bt_block_sizes[depth];
break;
default:
DIE("Invalid face identifier %d passed to mpi buffer packing\n", face);
}
side_size *= sizeof(double);
if (!pack)
{
hipMemcpy(device_buffer, host_buffer, n_exchanged*side_size,
hipMemcpyHostToDevice);
CUDA_ERR_CHECK;
}
for (int ii = 0; ii < NUM_FIELDS; ii++)
{
int which_field = ii+1;
if (fields[ii])
{
if (offsets[ii] < 0 || offsets[ii] > NUM_FIELDS*side_size)
{
DIE("Tried to pack/unpack field %d but invalid offset %d given\n",
ii, offsets[ii]);
}
int x_inc = 0, y_inc = 0;
// x_inc / y_inc set to 0 in tea leaf
#define CASE_BUF(which_array) \
case FIELD_##which_array: \
{ \
device_array = which_array; \
}
double * device_array = NULL;
switch (which_field)
{
CASE_BUF(density); break;
CASE_BUF(energy0); break;
CASE_BUF(energy1); break;
CASE_BUF(u); break;
CASE_BUF(vector_p); break;
CASE_BUF(vector_sd); break;
CASE_BUF(vector_r); break;
default:
DIE("Invalid face %d passed to pack buffer function\n", which_field);
}
#undef CASE_BUF
hipLaunchKernelGGL(( pack_kernel), dim3(pack_num_blocks), dim3(pack_block_size) , 0, 0, kernel_info,
x_inc, y_inc, device_array, device_buffer, depth, offsets[ii]);
CUDA_ERR_CHECK;
}
}
if (pack)
{
hipMemcpy(host_buffer, device_buffer, n_exchanged*side_size,
hipMemcpyDeviceToHost);
CUDA_ERR_CHECK;
}
}
| cb16cd9a2eae4f711bde9725608fbd9070bf3a62.cu | #include "cuda_common.hpp"
#include <cstdio>
#include <numeric>
#include "kernel_files/pack_kernel.cuknl"
extern "C" void cuda_pack_buffers_
(int fields[NUM_FIELDS], int offsets[NUM_FIELDS], int * depth,
int * face, double * host_buffer)
{
cuda_chunk.packUnpackAllBuffers(fields, offsets, *depth, *face, 1, host_buffer);
}
extern "C" void cuda_unpack_buffers_
(int fields[NUM_FIELDS], int offsets[NUM_FIELDS], int * depth,
int * face, double * host_buffer)
{
cuda_chunk.packUnpackAllBuffers(fields, offsets, *depth, *face, 0, host_buffer);
}
void CloverleafCudaChunk::packUnpackAllBuffers
(int fields[NUM_FIELDS], int offsets[NUM_FIELDS],
const int depth, const int face, const int pack,
double * host_buffer)
{
const int n_exchanged = std::accumulate(fields, fields + NUM_FIELDS, 0);
if (n_exchanged < 1)
{
return;
}
// which buffer is being used for this operation
double * device_buffer = NULL;
switch (face)
{
case CHUNK_LEFT:
device_buffer = left_buffer;
break;
case CHUNK_RIGHT:
device_buffer = right_buffer;
break;
case CHUNK_BOTTOM:
device_buffer = bottom_buffer;
break;
case CHUNK_TOP:
device_buffer = top_buffer;
break;
default:
DIE("Invalid face identifier %d passed to mpi buffer packing\n", face);
}
pack_func_t pack_kernel = NULL;
kernel_info_t kernel_info;
// set which kernel to call
if (pack)
{
switch (face)
{
case CHUNK_LEFT:
pack_kernel = &device_pack_left_buffer;
kernel_info = kernel_info_map.at("device_pack_left_buffer");
break;
case CHUNK_RIGHT:
pack_kernel = &device_pack_right_buffer;
kernel_info = kernel_info_map.at("device_pack_right_buffer");
break;
case CHUNK_BOTTOM:
pack_kernel = &device_pack_bottom_buffer;
kernel_info = kernel_info_map.at("device_pack_bottom_buffer");
break;
case CHUNK_TOP:
pack_kernel = &device_pack_top_buffer;
kernel_info = kernel_info_map.at("device_pack_top_buffer");
break;
default:
DIE("Invalid face identifier %d passed to pack\n", face);
}
}
else
{
switch (face)
{
case CHUNK_LEFT:
pack_kernel = &device_unpack_left_buffer;
kernel_info = kernel_info_map.at("device_unpack_left_buffer");
break;
case CHUNK_RIGHT:
pack_kernel = &device_unpack_right_buffer;
kernel_info = kernel_info_map.at("device_unpack_right_buffer");
break;
case CHUNK_BOTTOM:
pack_kernel = &device_unpack_bottom_buffer;
kernel_info = kernel_info_map.at("device_unpack_bottom_buffer");
break;
case CHUNK_TOP:
pack_kernel = &device_unpack_top_buffer;
kernel_info = kernel_info_map.at("device_unpack_top_buffer");
break;
default:
DIE("Invalid face identifier %d passed to unpack\n", face);
}
}
kernel_info.x_offset = halo_exchange_depth - depth;
kernel_info.y_offset = halo_exchange_depth - depth;
// size of this buffer
int side_size = 0;
// launch sizes for packing/unpacking arrays
dim3 pack_num_blocks, pack_block_size;
switch (face)
{
// pad it to fit in 32 local work group size (always on NVIDIA hardware)
case CHUNK_LEFT:
case CHUNK_RIGHT:
side_size = depth*(y_max + 2*depth);
pack_num_blocks = update_lr_num_blocks[depth];
pack_block_size = update_lr_block_sizes[depth];
break;
case CHUNK_BOTTOM:
case CHUNK_TOP:
side_size = depth*(x_max + 2*depth);
pack_num_blocks = update_bt_num_blocks[depth];
pack_block_size = update_bt_block_sizes[depth];
break;
default:
DIE("Invalid face identifier %d passed to mpi buffer packing\n", face);
}
side_size *= sizeof(double);
if (!pack)
{
cudaMemcpy(device_buffer, host_buffer, n_exchanged*side_size,
cudaMemcpyHostToDevice);
CUDA_ERR_CHECK;
}
for (int ii = 0; ii < NUM_FIELDS; ii++)
{
int which_field = ii+1;
if (fields[ii])
{
if (offsets[ii] < 0 || offsets[ii] > NUM_FIELDS*side_size)
{
DIE("Tried to pack/unpack field %d but invalid offset %d given\n",
ii, offsets[ii]);
}
int x_inc = 0, y_inc = 0;
// x_inc / y_inc set to 0 in tea leaf
#define CASE_BUF(which_array) \
case FIELD_##which_array: \
{ \
device_array = which_array; \
}
double * device_array = NULL;
switch (which_field)
{
CASE_BUF(density); break;
CASE_BUF(energy0); break;
CASE_BUF(energy1); break;
CASE_BUF(u); break;
CASE_BUF(vector_p); break;
CASE_BUF(vector_sd); break;
CASE_BUF(vector_r); break;
default:
DIE("Invalid face %d passed to pack buffer function\n", which_field);
}
#undef CASE_BUF
pack_kernel<<< pack_num_blocks, pack_block_size >>>(kernel_info,
x_inc, y_inc, device_array, device_buffer, depth, offsets[ii]);
CUDA_ERR_CHECK;
}
}
if (pack)
{
cudaMemcpy(host_buffer, device_buffer, n_exchanged*side_size,
cudaMemcpyDeviceToHost);
CUDA_ERR_CHECK;
}
}
|
9304a5670421462dce940a0579789c07696a2b2c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "../include/helper_cuda.h"
#include "../cuGMP.h"
__global__ void shiftLeftKernel(__dev_mpz_struct *res, __dev_mpz_struct *a, unsigned int shift_limbs, unsigned int shift_bits, unsigned int source_limbs)
{
unsigned int i = THREAD_ID;
if (i <= source_limbs)
{
if (i < shift_limbs)
{
res->_mp_d[i] = 0;
}
else if (i == shift_limbs)
{
res->_mp_d[i] = (a->_mp_d[i - shift_limbs] << shift_bits);
}
else if (i < source_limbs)
{
res->_mp_d[i] = (a->_mp_d[i - shift_limbs] << shift_bits) + (a->_mp_d[i - shift_limbs - 1] >> (GMP_LIMB_BITS - shift_bits));
}
else if (i == source_limbs)
{
res->_mp_d[i] = (a->_mp_d[i - shift_limbs - 1] >> (GMP_LIMB_BITS - shift_bits));
if (res->_mp_d[i] > 0)
{
res->_mp_size = i + 1;
}
}
}
}
void mpz_mul_2exp(mpz_ptr res, mpz_ptr a, unsigned long int exponent)
{
unsigned int source_limbs = ABS(a->_mp_size) + exponent / GMP_LIMB_BITS;
unsigned int result_limbs = source_limbs + 1;
allocate_memory(res, result_limbs, source_limbs);
unsigned int shift_limbs = exponent / GMP_LIMB_BITS;
unsigned int shift_bits = exponent % GMP_LIMB_BITS;
#ifdef KERNEL_PRINT
printf("shiftLeftKernel <<<%d, %d>>>\n", result_limbs / BLOCK_SIZE + 1, BLOCK_SIZE);
#endif
shiftLeftKernel << <result_limbs / BLOCK_SIZE + 1, BLOCK_SIZE >> >
(res->_dev_mp_struct, a->_dev_mp_struct, shift_limbs, shift_bits, source_limbs);
getLastCudaError("Kernel execution failed: [ shiftLeftKernel ]");
#ifdef EXPLICIT_SYNCHRONIZATION
checkCudaErrors(hipDeviceSynchronize());
#endif
// mp_size could have changed on the device - reflect on the host.
copy_operand_data_without_limbs(res, MemcpyDirection::memcpyDeviceToHost);
// If the number shifted is negative, reflect it in the result.
if (a->_mp_size < 0)
{
res->_mp_size = -1 * ABS(res->_mp_size);
copy_operand_data_without_limbs(res, MemcpyDirection::memcpyHostToDevice);
}
}
| 9304a5670421462dce940a0579789c07696a2b2c.cu | #include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "../include/helper_cuda.h"
#include "../cuGMP.h"
__global__ void shiftLeftKernel(__dev_mpz_struct *res, __dev_mpz_struct *a, unsigned int shift_limbs, unsigned int shift_bits, unsigned int source_limbs)
{
unsigned int i = THREAD_ID;
if (i <= source_limbs)
{
if (i < shift_limbs)
{
res->_mp_d[i] = 0;
}
else if (i == shift_limbs)
{
res->_mp_d[i] = (a->_mp_d[i - shift_limbs] << shift_bits);
}
else if (i < source_limbs)
{
res->_mp_d[i] = (a->_mp_d[i - shift_limbs] << shift_bits) + (a->_mp_d[i - shift_limbs - 1] >> (GMP_LIMB_BITS - shift_bits));
}
else if (i == source_limbs)
{
res->_mp_d[i] = (a->_mp_d[i - shift_limbs - 1] >> (GMP_LIMB_BITS - shift_bits));
if (res->_mp_d[i] > 0)
{
res->_mp_size = i + 1;
}
}
}
}
void mpz_mul_2exp(mpz_ptr res, mpz_ptr a, unsigned long int exponent)
{
unsigned int source_limbs = ABS(a->_mp_size) + exponent / GMP_LIMB_BITS;
unsigned int result_limbs = source_limbs + 1;
allocate_memory(res, result_limbs, source_limbs);
unsigned int shift_limbs = exponent / GMP_LIMB_BITS;
unsigned int shift_bits = exponent % GMP_LIMB_BITS;
#ifdef KERNEL_PRINT
printf("shiftLeftKernel <<<%d, %d>>>\n", result_limbs / BLOCK_SIZE + 1, BLOCK_SIZE);
#endif
shiftLeftKernel << <result_limbs / BLOCK_SIZE + 1, BLOCK_SIZE >> >
(res->_dev_mp_struct, a->_dev_mp_struct, shift_limbs, shift_bits, source_limbs);
getLastCudaError("Kernel execution failed: [ shiftLeftKernel ]");
#ifdef EXPLICIT_SYNCHRONIZATION
checkCudaErrors(cudaDeviceSynchronize());
#endif
// mp_size could have changed on the device - reflect on the host.
copy_operand_data_without_limbs(res, MemcpyDirection::memcpyDeviceToHost);
// If the number shifted is negative, reflect it in the result.
if (a->_mp_size < 0)
{
res->_mp_size = -1 * ABS(res->_mp_size);
copy_operand_data_without_limbs(res, MemcpyDirection::memcpyHostToDevice);
}
}
|
8bc3e5a7279666f078c10ac339b7234a551347cb.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/NativeFunctions.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/native/sparse/SparseTensorMath.h>
#include <ATen/native/sparse/hip/SparseHIPApplyUtils.cuh>
#include <ATen/native/sparse/hip/SparseHIPBlas.cuh>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPUtils.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/WrapDimUtilsMulti.h>
#include <ATen/ExpandUtils.h>
#include <THH/THHTensorMathPointwise.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/binary_search.h>
#include <thrust/sort.h>
#include <thrust/system/hip/execution_policy.h>
#include <bitset>
#include <hipsparse.h>
#include <hip/hip_runtime_api.h>
#include <memory>
#define I_INFO(tensor) cuda::detail::getTensorInfo<int64_t, uint64_t>(tensor)
#define V_INFO(tensor) cuda::detail::getTensorInfo<scalar_t, uint64_t>(tensor)
namespace at { namespace native {
using namespace at::sparse;
using at::cuda::detail::TensorInfo;
using at::cuda::detail::getTensorInfo;
// --------------------------------------------------------------------
// Utility functions
// --------------------------------------------------------------------
namespace {
IntTensor _to_csr_int(const LongTensor& rowIndices, int64_t dim, int64_t nnz) {
IntTensor csr = at::empty({dim+1}, CUDA(kInt));
IntTensor rowIndicesInt = at::empty({rowIndices.size(0)}, CUDA(kInt));
rowIndicesInt.copy_(rowIndices);
sparse::cuda::Xcoo2csr(rowIndicesInt.data_ptr<int32_t>(), nnz, dim, csr.data_ptr<int32_t>());
return csr;
}
}
// NB: Deleted spaddcmul (aka addcmul_, but not actually wired up), spaddcdiv (not
// wired at all)
template <typename scalar_t>
void s_addmm_out_sparse_dense_cuda_worker(int64_t nnz, int64_t m, int64_t n, int64_t k, Tensor& r_, Scalar beta, const Tensor& t, Scalar alpha, LongTensor& indices, Tensor& values, const Tensor& dense) {
scalar_t cast_beta = beta.to<scalar_t>();
scalar_t cast_alpha = alpha.to<scalar_t>();
LongTensor rowIndices = indices.select(0, 0);
LongTensor colIndices = indices.select(0, 1);
IntTensor csr = _to_csr_int(rowIndices, m, nnz);
IntTensor colIndicesInt = at::empty({colIndices.size(0)}, indices.options().dtype(kInt));
colIndicesInt.copy_(colIndices);
Tensor r__;
if (cast_beta == 0) {
r_.zero_();
} else if (cast_beta == 1) {
if (!is_same_tensor(t, r_)) {
r_.copy_(t);
}
} else {
at::mul_out(r_, t, scalar_to_tensor(beta));
}
if(r_.stride(0) == 1 && r_.stride(1) == r_.size(0)) {
r__ = r_;
} else {
// TODO: how... strange
r__ = r_.transpose(0, 1).clone(at::MemoryFormat::Contiguous);
r__.transpose_(0, 1);
}
if (nnz > 0) {
Tensor dense_;
char transpose_dense;
if(dense.stride(0) == 1 && dense.stride(1) == dense.size(0)) {
transpose_dense = 'n';
dense_ = dense;
} else if(dense.stride(1) == 1 && dense.stride(0) != dense.size(1)) {
transpose_dense = 't';
dense_ = dense;
} else {
transpose_dense = 't';
dense_ = dense.contiguous();
}
sparse::cuda::csrmm2(
'n',
transpose_dense,
m,
n,
k,
nnz,
cast_alpha,
values.data_ptr<scalar_t>(),
csr.data_ptr<int32_t>(),
colIndicesInt.data_ptr<int32_t>(),
dense_.data_ptr<scalar_t>(),
(transpose_dense == 'n' ? dense_.stride(1) : dense_.stride(0)),
cast_beta,
r__.data_ptr<scalar_t>(),
r__.stride(1));
}
r_.copy_(r__);
}
// --------------------------------------------------------------------
// addmm(Tensor, SparseTensor, Tensor, Scalar, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& s_addmm_out_sparse_dense_cuda(Tensor& r_, const Tensor& t, const SparseTensor& sparse_, const Tensor& dense, Scalar beta, Scalar alpha) {
TORCH_CHECK(t.is_cuda(), "addmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "addmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(sparse_.is_cuda(), "addmm: expected 'mat1' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "addmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({sparse_, r_, t, dense}));
TORCH_CHECK(dense.dim() == 2, "addmm: 2D tensor expected, got ", dense.dim(), "D tensor");
TORCH_CHECK(sparse_.sparse_dim() == 2, "addmm: expected first two dims to be sparse (indices has size 2 at first dim), but got ", sparse_.sparse_dim(), " sparse dims");
// no need to check dense_dim because dense_dim + sparse_dim = dim
// mxk * kxn = mxn
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(t.size(0) == m,
"addmm: Argument #1 (t): Expected dim 0 size ", m, ", got ", t.size(0));
TORCH_CHECK(t.size(1) == n,
"addmm: Argument #1 (t): Expected dim 1 size ", n, ", got ", t.size(1));
TORCH_CHECK(dense.size(0) == k,
"addmm: Argument #3 (dense): Expected dim 0 size ", k, ", got ", dense.size(0));
r_.resize_({m, n});
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
LongTensor indices = sparse._indices();
Tensor values = sparse._values();
// No half support, so we don't have to use CUDATypeConversion
AT_DISPATCH_FLOATING_TYPES(
values.scalar_type(), "addmm_sparse_cuda", [&] {
s_addmm_out_sparse_dense_cuda_worker<scalar_t>(nnz, m, n, k, r_, beta, t, alpha, indices, values, dense);
}
);
return r_;
}
Tensor& addmm_out_sparse_dense_cuda(
Tensor& result,
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
Scalar beta,
Scalar alpha
) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_out_sparse_dense_cuda(result, b_self, mat1, mat2, beta, alpha);
}
Tensor s_addmm_sparse_dense_cuda(
const Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
Scalar beta,
Scalar alpha
) {
Tensor r = at::empty({0}, t.options());
s_addmm_out_sparse_dense_cuda(r, t, sparse, dense, beta, alpha);
return r;
}
Tensor addmm_sparse_dense_cuda(
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
Scalar beta,
Scalar alpha
) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_sparse_dense_cuda(b_self, mat1, mat2, beta, alpha);
}
Tensor& s_addmm_sparse_dense_cuda_(
Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
Scalar beta,
Scalar alpha
) {
return s_addmm_out_sparse_dense_cuda(t, t, sparse, dense, beta, alpha);
}
// NB: Purposely no broadcasting version of addmm inplace
// Deleted sspaddmm (sparse, dense) -> sparse
// --------------------------------------------------------------------
// hspmm(SparseTensor mat1, Tensor mat2)
// --------------------------------------------------------------------
SparseTensor& hspmm_out_sparse_cuda(SparseTensor& r_, const SparseTensor& sparse_, const Tensor& dense/* , Scalar alpha */) {
TORCH_CHECK(sparse_.is_cuda(), "hspmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "hspmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "hspmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, sparse_, dense}));
TORCH_CHECK(sparse_.sparse_dim() == 2,
"hspmm: Argument #2: 2D tensor expected, got ", sparse_.sparse_dim(), "D tensor");
TORCH_CHECK(sparse_.dense_dim() == 0,
"hspmm: Argument #2: scalar values expected, got ", sparse_.dense_dim(), "D values");
TORCH_CHECK(dense.dim() == 2,
"hspmm: Argument #3: 2D tensor expected, got ", dense.dim(), "D tensor");
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(dense.size(0) == k,
"hspmm: Argument #3: Expected dim 0 size ", k, ", got ", dense.size(0));
get_sparse_impl(r_)->resize_and_clear_(1, 1, {m, n});
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
LongTensor indices = at::empty({1, nnz}, CUDA(kLong));
// create values in column-major format to avoid copying in spaddmm
Tensor values = at::empty({n, nnz}, dense.options());
values.transpose_(0, 1);
// why does sparse need to be cloned? If this is really necessary maybe we
// need to fuse this with newCoalesce
SparseTensor newSparse = sparse.clone();
LongTensor spIndices = newSparse._indices();
LongTensor dstIndices = spIndices.select(0, 0);
// Save destination indices to output hybrid tensor
indices.copy_(dstIndices);
// Replace destination indices with 0, 1, 2, 3, ... and compute output values
// tensor with sparse * dense multiplication
thrust::device_ptr<int64_t> indicesIter(dstIndices.data_ptr<int64_t>());
thrust::sequence(policy, indicesIter, indicesIter + nnz);
std::vector<int64_t> new_size = get_sparse_impl(newSparse)->sizes().vec();
new_size[0] = nnz;
get_sparse_impl(newSparse)->raw_resize_(get_sparse_impl(newSparse)->sparse_dim(), get_sparse_impl(newSparse)->dense_dim(), new_size);
s_addmm_out_sparse_dense_cuda(values, values, newSparse, dense, 0, /*alpha*/ 1);
get_sparse_impl(r_)->set_indices_and_values_unsafe(indices, values);
return r_;
}
SparseTensor hspmm_sparse_cuda(const SparseTensor& sparse, const Tensor& dense) {
SparseTensor r = at::empty({0}, sparse.options());
hspmm_out_sparse_cuda(r, sparse, dense);
return r;
}
// --------------------------------------------------------------------
// add(Tensor, SparseTensor, Scalar)
// formerly known as spcadd
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r_, const Tensor& dense, const SparseTensor& sparse, at::Scalar value) {
TORCH_CHECK(dense.is_cuda(), "add: expected 'self' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(sparse.is_cuda(), "add: expected 'other' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(cuda::check_device({sparse, r_, dense}));
TORCH_CHECK(dense.sizes().equals(sparse.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ",
dense.sizes(), " while other has size ", sparse.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)");
const int64_t nnz = sparse._nnz();
if (nnz == 0) {
r_.resize_as_(dense);
r_.copy_(dense);
return r_;
}
auto commonDtype = at::result_type(dense, sparse);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
Tensor r = r_;
if (r_.scalar_type() != commonDtype) {
r = at::empty_like(dense, r_.options().dtype(commonDtype));
}
Tensor dense_buffer = dense.to(commonDtype);
Tensor values = sparse._values().to(commonDtype);
if (is_same_tensor(r, dense_buffer)) {
TORCH_CHECK(r_.is_contiguous(), "add: CUDA dense-sparse addition with a non-contiguous output tensor does not work; shout if you need it (see https://github.com/pytorch/pytorch/issues/1521 )");
} else {
r.resize_as_(dense);
r.copy_(dense_buffer);
}
LongTensor indices = sparse._indices();
int64_t nDim = dense.dim();
int64_t nDimI = sparse.sparse_dim();
if (values.numel() == 0) {
return r_;
}
if (sparse.is_coalesced()) {
// TODO benchmark to decide whether to remove this special case
const dim3 block = cuda::getApplyBlock();
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
if (sparse.dense_dim() == 0) {
TORCH_CHECK(cuda::getApplyGrid(nnz, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
AT_DISPATCH_ALL_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "add_out_dense_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::sparseElementwiseKernelScalar<TensorCAddOp<scalar_t>, uint64_t, scalar_t>)
, dim3(grid), dim3(block), 0, stream,
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
});
});
} else {
TORCH_CHECK(cuda::getApplyGrid(nnz * block.x, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
// sparseElementwiseKernel needs values to be contiguous too
values = values.contiguous();
AT_DISPATCH_ALL_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "add_out_dense_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::sparseElementwiseKernel<TensorCAddOp<scalar_t>, uint64_t, scalar_t>)
, dim3(grid), dim3(block), 0, stream,
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
});
});
}
} else {
LongTensor indices1D = flatten_indices(indices, sparse.sizes(), 0);
int64_t view_rows = 1;
int64_t view_columns = 1;
for (int i = 0; i < nDimI; i++) {
view_rows *= r.size(i);
}
for (int i = nDimI; i < nDim; i++) {
view_columns *= r.size(i);
}
Tensor r_view = r.view({view_rows, view_columns});
values = values.reshape({nnz, view_columns});
r_view.index_add_(0, indices1D, values, value);
}
THCudaCheck(hipGetLastError());
r_.copy_(r);
return r_;
}
// --------------------------------------------------------------------
// add(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r, const Tensor& dense, const SparseTensor& sparse_, Scalar value);
SparseTensor& add_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t, const SparseTensor& src, Scalar value) {
if (!t.is_sparse()) {
return add_out_dense_sparse_cuda(r_, t, src, value);
}
// TODO: This test seems a bit goofy
TORCH_CHECK(src.is_sparse(), "add(sparse, dense) is not supported. Use add(dense, sparse) instead.");
TORCH_CHECK(t.is_cuda(), "add: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src.is_cuda(), "add: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t, src}));
auto commonDtype = at::result_type(t, src);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
TORCH_CHECK(t.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but ", t.sizes(), " != ", src.sizes());
if (src._nnz() == 0) {
return copy_sparse_to_sparse_(r_, t);
}
if (t._nnz() == 0) {
return mul_out_sparse_scalar(r_, src, value);
}
TORCH_CHECK(is_same_density(t, src), "add: expected 'self' and 'other' to have same density, but 'self' has ", t.sparse_dim(), " sparse dimensions while 'other' has ", src.sparse_dim(), " sparse dimensions");
// We deliberately choose to simply concat the indices and values tensors
// rather than merging them. This removes the need to synchronously fetch nnz
// at the end of the operation, at the cost of having a non-coalesced result.
// This trade-off is preferable for the common use-case of gradient accumulation.
LongTensor t_indices_ = t._indices();
LongTensor s_indices_ = src._indices();
Tensor t_values_ = t._values().to(commonDtype);
Tensor s_values_ = src._values().to(commonDtype);
AT_DISPATCH_ALL_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_sparse_cuda", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "add_out_sparse_cuda", [&] {
if (value.to<scalar_t>() != static_cast<scalar_t>(1)) {
s_values_ = s_values_.mul(value);
}
});
});
LongTensor r_indices_ = at::cat({t_indices_, s_indices_}, 1);
Tensor r_values_ = at::cat({t_values_, s_values_}, 0);
if (r_.scalar_type() != commonDtype) {
SparseTensor promoted = at::empty({0}, r_.options().dtype(commonDtype));
promoted.resize_as_(src);
alias_into_sparse(promoted, r_indices_, r_values_);
// performs the addition under the common dtype.
promoted = promoted.coalesce();
r_values_ = promoted._values().to(r_.scalar_type());
r_indices_ = promoted._indices();
} else {
r_.resize_as_(src);
}
alias_into_sparse(r_, r_indices_, r_values_);
// Prevent unbounded growth of nnz
// TODO: Improved heuristic on when to coalesce or remove need to coalesce
if (r_._nnz() > r_.numel()) {
auto c = r_.coalesce();
alias_into_sparse(r_, c._indices(), c._values());
}
return r_;
}
// --------------------------------------------------------------------
// mul(SparseTensor, SparseTensor) [broadcasts]
// --------------------------------------------------------------------
SparseTensor& mul_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t_, const SparseTensor& src_) {
if (src_.dim() == 0) {
return mul_out_sparse_zerodim(r_, t_, src_);
} else if (t_.dim() == 0) {
return mul_out_sparse_zerodim(r_, src_, t_);
}
TORCH_CHECK(t_.is_cuda(), "mul: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src_.is_cuda(), "mul: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "mul: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t_, src_}));
TORCH_CHECK(t_.sizes().equals(src_.sizes()), "mul: expected 'self' and 'other' to have same size, but ", t_.sizes(), " != ", src_.sizes());
SparseTensor t = t_.coalesce();
SparseTensor src = src_.coalesce();
if (src_._nnz() == 0 || t_._nnz() == 0) {
r_.resize_as_(src_);
return r_.zero_();
}
// saving those because they can be overwritten when doing in-place operations
int64_t t_nnz = t._nnz(), s_nnz = src._nnz();
int64_t max_nnz = ::min(t_nnz, s_nnz); // multiply by zero is zero, and can be dropped
int64_t sparse_dim = src.sparse_dim();
auto commonDtype = at::result_type(t, src);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
LongTensor t_indices_ = t._indices().contiguous();
Tensor t_values_ = t._values().to(commonDtype);
LongTensor s_indices_ = src._indices().contiguous();
Tensor s_values_ = src._values().to(commonDtype);
LongTensor r_indices_ = at::empty({sparse_dim, max_nnz}, t_indices_.options());
r_.resize_as_(src);
Tensor r_values_ = new_values_with_size_of(t_values_, max_nnz).zero_();
int64_t valueSize = t_values_.stride(0);
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), valueSize));
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
TORCH_CHECK(cuda::getApplyGrid(valueSize, grid, curDevice), "mul: Argument #0: tensor too large or too many dimensions");
LongTensor resultNnz = at::empty({1}, CUDA(kLong));
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, commonDtype, "mul_out_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::valueSparseIntersectionKernel<TensorMulOp<scalar_t>, uint64_t, scalar_t>)
, dim3(grid), dim3(block), 0, stream,
TensorMulOp<scalar_t>(),
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
V_INFO(r_values_), V_INFO(t_values_), V_INFO(s_values_),
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz));
THCudaCheck(hipGetLastError());
hipLaunchKernelGGL(( apply::indexSparseIntersectionKernel<uint64_t, scalar_t>)
, dim3(1), dim3(1), 0, stream,
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
// reinterpret_cast shenanigans, because we don't actually have
// unsigned tensors...
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz), reinterpret_cast<uint64_t*>(resultNnz.data_ptr()));
THCudaCheck(hipGetLastError());
});
r_values_ = r_values_.to(r_.scalar_type());
get_sparse_impl(r_)->set_indices_and_values_unsafe(r_indices_, r_values_);
// sync! (surely there is a more idiomatic way to do this...)
LongTensor cpu_resultNnz = at::empty({1}, CPU(kLong));
cpu_resultNnz.copy_(resultNnz);
get_sparse_impl(r_)->set_nnz_and_narrow(cpu_resultNnz.accessor<int64_t, 1>()[0]);
return r_._coalesced_(true);
}
// --------------------------------------------------------------------
// sparse.sum() backward
//
// see NOTE [ sparse.sum() backward ]
// --------------------------------------------------------------------
template <typename scalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__ void _sparse_sum_backward_cuda_kernel(
int64_t total_threads,
const TensorInfo<int64_t, int64_t> grad_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_pos_ti,
const TensorInfo<scalar_t, int64_t> grad_values_expand_ti,
TensorInfo<scalar_t, int64_t> grad_input_values_ti
) {
const int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= total_threads) return;
const int64_t j = input_indices_pos_ti.data[i];
bool has_match = false;
if (grad_indices_ti.data[j] == input_indices_ti.data[i]) {
has_match = true;
}
int64_t grad_input_values_stride0 = grad_input_values_ti.strides[0];
int64_t out_start = i * grad_input_values_stride0;
int64_t out_end = (i + 1) * grad_input_values_stride0;
int64_t in_start = j * grad_values_expand_ti.strides[0];
if (has_match) {
for (int64_t out_i = out_start, in_i = in_start; out_i < out_end; out_i++, in_i++) {
grad_input_values_ti.data[out_i] = grad_values_expand_ti.data[in_i];
}
}
else {
for (int64_t out_i = out_start; out_i < out_end; out_i++) {
grad_input_values_ti.data[out_i] = scalar_t(0);
}
}
}
Tensor _sparse_sum_backward_cuda(const Tensor& grad_, const SparseTensor& input_, IntArrayRef dims_to_sum) {
TORCH_CHECK(grad_.is_cuda(), "_sparse_sum_backward_cuda: expected 'grad_' to be CUDA tensor, but got CPU tensor");
TORCH_CHECK(input_.is_cuda(), "_sparse_sum_backward_cuda: expected 'input_' to be CUDA tensor, but got CPU tensor");
auto input = input_.coalesce();
const int64_t input_dim = input.dim();
auto dims_to_sum_b = dim_list_to_bitset(dims_to_sum, input_dim);
auto dims_to_sum_v = dims_to_sum.vec();
maybe_wrap_dims(dims_to_sum_v, input_dim);
LongTensor input_indices = input._indices();
Tensor input_values = input._values();
IntArrayRef input_sizes = input.sizes();
const int64_t input_sparse_dim = input.sparse_dim();
const int64_t input_dense_dim = input.dense_dim();
const int64_t input_nnz = input._nnz();
int64_t sparse_dims_to_sum_size = 0;
auto sparse_dims_to_keep_v = std::vector<int64_t>();
auto dense_dims_to_sum_v = std::vector<int64_t>();
for (int64_t d = 0; d < input_dim; d++) {
if (dims_to_sum_b[d]) {
if (d < input_sparse_dim) sparse_dims_to_sum_size ++;
else dense_dims_to_sum_v.emplace_back(d + 1 - input_sparse_dim);
}
else {
if (d < input_sparse_dim) sparse_dims_to_keep_v.emplace_back(d);
}
}
const bool sum_all_sparse_dim = (input_sparse_dim == sparse_dims_to_sum_size);
const bool sum_dense_dim = (dense_dims_to_sum_v.size() > 0);
const bool sum_sparse_dim = (sparse_dims_to_sum_size > 0);
if (sum_all_sparse_dim) {
TORCH_CHECK(!grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad Tensor to be dense since all sparse dims are summed");
auto grad_input_values = grad_;
auto expand_size = input_values.sizes().vec();
if (sum_dense_dim) {
auto dense_expand_size = std::vector<int64_t>(expand_size);
dense_expand_size.erase(dense_expand_size.begin()); // remove nnz dim
for (auto d : dense_dims_to_sum_v) grad_input_values = grad_input_values.unsqueeze(d - 1); // -1 since grad has no nnz dim
grad_input_values = grad_input_values.expand(dense_expand_size);
}
grad_input_values = grad_input_values.expand(expand_size).clone(at::MemoryFormat::Contiguous);
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, input.options().dtype(grad_.dtype())); // convert to grad dtype
}
else {
TORCH_CHECK(grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad_ Tensor to be sparse, but got dense");
auto grad = grad_.coalesce();
LongTensor grad_indices = grad._indices();
Tensor grad_values = grad._values();
const int64_t grad_sparse_dim = grad.sparse_dim();
const int64_t grad_nnz = grad._nnz();
Tensor grad_values_expand = grad_values;
if (sum_dense_dim) {
auto expand_size = input_values.sizes().vec();
if (sum_sparse_dim) expand_size[0] = grad_values.size(0); // update nnz
for (auto d : dense_dims_to_sum_v) grad_values_expand = grad_values_expand.unsqueeze(d);
grad_values_expand = grad_values_expand.expand(expand_size).clone(at::MemoryFormat::Contiguous);
}
Tensor grad_input_values;
if (!sum_sparse_dim) {
grad_input_values = grad_values_expand;
}
else {
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
typedef thrust::device_ptr<int64_t> thrust_ptr;
grad_input_values = at::empty_like(input_values, grad_values.options(), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
AT_ASSERT(grad_input_values.is_cuda());
// get 1D indices
auto grad_sparse_dim_to_keep_v = std::vector<int64_t>(grad_sparse_dim);
std::iota(grad_sparse_dim_to_keep_v.begin(), grad_sparse_dim_to_keep_v.end(), 0);
auto grad_indices_1D = flatten_indices_by_dims(grad_indices, grad.sizes(), grad_sparse_dim_to_keep_v); // flatten indices on all sparse_dim of grad, output indices is coalesced and sorted
auto input_indices_1D = flatten_indices_by_dims(input_indices, input_sizes, sparse_dims_to_keep_v);
thrust_ptr grad_indices_iter(grad_indices_1D.data_ptr<int64_t>());
thrust_ptr input_indices_iter(input_indices_1D.data_ptr<int64_t>());
// store lower_bound of input indices at grad indices
LongTensor input_indices_pos = at::empty_like(input_indices_1D, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
thrust_ptr input_indices_pos_iter(input_indices_pos.data_ptr<int64_t>());
thrust::lower_bound(policy,
grad_indices_iter, grad_indices_iter + grad_nnz,
input_indices_iter, input_indices_iter + input_nnz,
input_indices_pos_iter);
// config to run cuda kernel
int64_t total_threads = input_nnz;
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), total_threads));
dim3 grid;
TORCH_CHECK(cuda::getApplyGrid(total_threads, grid, curDevice), "_sparse_sum_backward_cuda: input too large or too many dimensions");
auto grad_indices_ti = getTensorInfo<int64_t, int64_t>(grad_indices_1D);
auto input_indices_ti = getTensorInfo<int64_t, int64_t>(input_indices_1D);
auto input_indices_pos_ti = getTensorInfo<int64_t, int64_t>(input_indices_pos);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_values.scalar_type(), "_sparse_sum_backward_cuda", [&] {
auto grad_values_expand_ti = getTensorInfo<scalar_t, int64_t>(grad_values_expand);
auto grad_input_values_ti = getTensorInfo<scalar_t, int64_t>(grad_input_values);
hipLaunchKernelGGL(( _sparse_sum_backward_cuda_kernel<scalar_t>), dim3(grid), dim3(block), 0, stream,
total_threads,
grad_indices_ti,
input_indices_ti,
input_indices_pos_ti,
grad_values_expand_ti,
grad_input_values_ti
);
});
}
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, grad.options());
}
}
Tensor bmm_sparse_cuda(const SparseTensor& self, const Tensor& mat2) {
Tensor result = at::empty({self.size(0), mat2.size(2), self.size(1)}, mat2.options(), at::MemoryFormat::Contiguous);
return _bmm_out_sparse_cuda(result, self, mat2, false);
}
Tensor _bmm_sparse_cuda(const SparseTensor& self, const Tensor& mat2, bool deterministic) {
Tensor result = at::empty({self.size(0), mat2.size(2), self.size(1)}, mat2.options(), at::MemoryFormat::Contiguous);
return _bmm_out_sparse_cuda(result, self, mat2, deterministic);
}
#if !(defined(__HIP_PLATFORM_HCC__) || (defined(_MSC_VER) && CUSPARSE_VERSION < 11000))
__global__ void search_end_matrix_indices_cuda_kernel(
int64_t* mat_el_end_indices,
int64_t num_matrices,
const TensorInfo<int64_t, int64_t> indices_1D_ti,
const int64_t num_elements
){
const int64_t target_mat_num = blockIdx.x * blockDim.x + threadIdx.x;
if (target_mat_num >= num_matrices) return;
const int64_t* indices_1D = indices_1D_ti.data;
const int64_t indices_1D_stride = indices_1D_ti.strides[0];
int64_t start_idx = 0;
int64_t end_idx = num_elements - 1;
int64_t mid_idx = (start_idx + end_idx) >> 1;
int64_t mid_val = indices_1D[mid_idx*indices_1D_stride];
bool found;
while (
start_idx <= end_idx
) {
bool trim_right = mid_val > target_mat_num;
int64_t mid_idx_minus_1 = mid_idx - 1;
int64_t mid_idx_plus_1 = mid_idx + 1;
end_idx = trim_right ? mid_idx_minus_1 : end_idx;
start_idx = trim_right ? start_idx : mid_idx_plus_1;
mid_idx = (start_idx + end_idx) >> 1;
mid_val = indices_1D[mid_idx*indices_1D_stride];
}
found = (mid_val == target_mat_num)
&& (
(mid_idx == (num_elements-1))
|| (indices_1D[(mid_idx+1)*indices_1D_stride] != target_mat_num)
);
mat_el_end_indices[target_mat_num] = found ? mid_idx : -1;
}
// Search through a 1D tensor of sorted sparse matrix
// indices to find the end index for each matrix
void search_end_matrix_indices(int64_t* mat_el_end_indices, int64_t num_matrices, const LongTensor& indices_1D) {
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
auto indices_1D_ti = getTensorInfo<int64_t, int64_t>(indices_1D);
int64_t grid_size = (num_matrices / 64)+1;
int64_t block_size = 64;
int64_t num_elements = indices_1D.size(0);
hipLaunchKernelGGL(( search_end_matrix_indices_cuda_kernel), dim3(grid_size), dim3(block_size), 0, stream,
mat_el_end_indices,
num_matrices,
indices_1D_ti,
num_elements
);
hipDeviceSynchronize();
}
hipDataType getTensorCudaDataType(Tensor self) {
hipDataType cuda_data_type;
switch (self.scalar_type()) {
case ScalarType::Float:
cuda_data_type = HIP_R_32F;
break;
case ScalarType::Double:
cuda_data_type = HIP_R_64F;
break;
default:
TORCH_CHECK(false, "Tensor types must be either float32 or float64");
break;
}
return cuda_data_type;
}
#endif
Tensor& bmm_out_sparse_cuda(Tensor& result, const SparseTensor& self, const Tensor& mat2) {
return _bmm_out_sparse_cuda(result, self, mat2, false);
}
Tensor& _bmm_out_sparse_cuda(Tensor& result, const SparseTensor& self, const Tensor& mat2, bool deterministic) {
#if defined __HIP_PLATFORM_HCC__
TORCH_CHECK(false, "bmm sparse-dense is not supported on HIP");
#elif defined(_MSC_VER) && (CUSPARSE_VERSION < 11000)
TORCH_CHECK(false, "bmm sparse-dense CUDA is not supported on Windows with cuda before 11.0");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 10010) // linux cuda >= 10.1 or windows cuda >= 11.0
TORCH_CHECK(!mat2.is_sparse(), "bmm_sparse: Tensor 'mat2' must be dense");
TORCH_CHECK(self.dense_dim() == 0, "bmm_sparse: Tensor 'self' must have 0 dense dims, but has ", self.dense_dim());
TORCH_CHECK(self.sparse_dim() == 3, "bmm_sparse: Tensor 'self' must have 3 sparse dims, but has ", self.sparse_dim());
TORCH_CHECK(mat2.dim() == 3, "bmm_sparse: Tensor 'mat2' must have 3 dims, but has ", mat2.dim());
TORCH_CHECK(self.size(0) == mat2.size(0), "bmm_sparse: 'self.size(0)' and 'mat2.size(0)' must match");
TORCH_CHECK(self.size(2) == mat2.size(1), "bmm_sparse: 'self.size(2)' and 'mat2.size(1)' must match");
int64_t num_matrices = self.size(0);
int64_t dim_i = self.size(1);
int64_t dim_j = self.size(2);
int64_t dim_k = mat2.size(2);
result.resize_({num_matrices, dim_k, dim_i});
if ((self._nnz() == 0) || (dim_j == 0) || (dim_k == 0)) {
result.zero_().transpose_(1, 2);
return result;
}
Tensor tmp_result;
bool need_copy_result;
// If the result tensor is contiguous, we can just write results directly to it.
// Otherwise, we'll need to write results to a temp buffer and then copy.
if (result.is_contiguous()) {
tmp_result = result;
need_copy_result = false;
} else {
tmp_result = at::empty({num_matrices, dim_k, dim_i}, result.options(), at::MemoryFormat::Contiguous);
need_copy_result = true;
}
// Dense matrices have to be contiguous for hipsparseSpMM to work
const Tensor mat2_contig = mat2.contiguous();
auto cusparse_handle = at::cuda::getCurrentCUDASparseHandle();
// First need to coalesce to get all of the first dimension indices
// in order since we'll be sending each matrix into the MM operation
SparseTensor self_coalesced = coalesce_sparse_cuda(self);
int64_t nnz = self_coalesced._nnz();
LongTensor indices = self_coalesced._indices();
Tensor values = self_coalesced._values();
LongTensor indices_dim0 = indices[0];
// Need to convert dim1 and dim2 indices to 32-bit since hipsparseSpMM
// only supports 32-bit indices
Tensor indices_dim1 = indices[1].to(ScalarType::Int);
Tensor indices_dim2 = indices[2].to(ScalarType::Int);
std::unique_ptr<int64_t[]> mat_el_end_indices_host(new int64_t[num_matrices]);
int64_t* mat_el_end_indices_device;
hipMalloc(&mat_el_end_indices_device, num_matrices*sizeof(int64_t));
search_end_matrix_indices(mat_el_end_indices_device, num_matrices, indices_dim0);
hipMemcpy(
mat_el_end_indices_host.get(),
mat_el_end_indices_device,
num_matrices*sizeof(int64_t),
hipMemcpyDeviceToHost
);
hipFree(mat_el_end_indices_device);
// Need a pointer to an array to access within a lambda
int64_t* mat_el_end_indices = &mat_el_end_indices_host[0];
Scalar beta = 0;
Scalar alpha = 1;
int64_t mat_el_begin_idx = 0;
size_t workspace_buffer_size = 0;
void* workspace_buffer = nullptr;
// See Note [Enabling Deterministic Operations]
deterministic = deterministic || globalContext().deterministic();
hipsparseSpMMAlg_t mm_alg = deterministic ? HIPSPARSE_COOMM_ALG2 : HIPSPARSE_COOMM_ALG1;
// Iterate through each set of 2D matrices within the 3D
// tensor inputs, performing a matrix multiply with each
AT_DISPATCH_FLOATING_TYPES(
values.scalar_type(), "bmm_sparse_cuda", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
uint32_t* row_indices_start_ptr = reinterpret_cast<uint32_t*>(indices_dim1.data_ptr());
uint32_t* col_indices_start_ptr = reinterpret_cast<uint32_t*>(indices_dim2.data_ptr());
scalar_t* values_start_ptr = reinterpret_cast<scalar_t*>(values.data_ptr());
scalar_t* mat2_start_ptr = reinterpret_cast<scalar_t*>(mat2_contig.data_ptr());
scalar_t* result_start_ptr = reinterpret_cast<scalar_t*>(tmp_result.data_ptr());
for (
int64_t cur_mat_num = 0;
(cur_mat_num < num_matrices);
cur_mat_num++
) {
int64_t mat_el_end_idx = mat_el_end_indices[cur_mat_num];
if (mat_el_end_idx != -1) {
mat_el_end_idx++;
// Create tensors to view just the current set of matrices
int64_t sparse_nnz = mat_el_end_idx - mat_el_begin_idx;
hipDataType cuda_data_type = getTensorCudaDataType(mat2_contig);
uint32_t* row_indices_ptr = &row_indices_start_ptr[mat_el_begin_idx];
uint32_t* col_indices_ptr = &col_indices_start_ptr[mat_el_begin_idx];
scalar_t* values_ptr = &values_start_ptr[mat_el_begin_idx];
hipsparseSpMatDescr_t sparse_descr;
TORCH_CUDASPARSE_CHECK(hipsparseCreateCoo(
&sparse_descr,
dim_i,
dim_j,
sparse_nnz,
reinterpret_cast<void*>(row_indices_ptr),
reinterpret_cast<void*>(col_indices_ptr),
reinterpret_cast<void*>(values_ptr),
HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO,
cuda_data_type
));
scalar_t* mat2_ptr = &mat2_start_ptr[dim_k*dim_j*cur_mat_num];
hipsparseDnMatDescr_t dense_descr;
TORCH_CUDASPARSE_CHECK(hipsparseCreateDnMat(
&dense_descr,
dim_k,
dim_j,
dim_k,
reinterpret_cast<void*>(mat2_ptr),
cuda_data_type,
HIPSPARSE_ORDER_COL
));
scalar_t* result_ptr = &result_start_ptr[dim_i*dim_k*cur_mat_num];
hipsparseDnMatDescr_t result_descr;
TORCH_CUDASPARSE_CHECK(hipsparseCreateDnMat(
&result_descr,
dim_i,
dim_k,
dim_i,
reinterpret_cast<void*>(result_ptr),
cuda_data_type,
HIPSPARSE_ORDER_COL
));
size_t required_workspace_buffer_size = 0;
TORCH_CUDASPARSE_CHECK(hipsparseSpMM_bufferSize(
cusparse_handle,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_TRANSPOSE,
(void*)&alpha_val,
sparse_descr,
dense_descr,
(void*)&beta_val,
result_descr,
cuda_data_type,
mm_alg,
&required_workspace_buffer_size
));
if (required_workspace_buffer_size > workspace_buffer_size) {
if (workspace_buffer != nullptr) {
hipFree(workspace_buffer);
}
workspace_buffer_size = required_workspace_buffer_size;
hipMallocManaged(&workspace_buffer, workspace_buffer_size);
}
TORCH_CUDASPARSE_CHECK(hipsparseSpMM(
cusparse_handle,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_TRANSPOSE,
(void*)&alpha_val,
sparse_descr,
dense_descr,
(void*)&beta_val,
result_descr,
cuda_data_type,
mm_alg,
workspace_buffer
));
TORCH_CUDASPARSE_CHECK(hipsparseDestroySpMat(sparse_descr));
TORCH_CUDASPARSE_CHECK(hipsparseDestroyDnMat(dense_descr));
TORCH_CUDASPARSE_CHECK(hipsparseDestroyDnMat(result_descr));
mat_el_begin_idx = mat_el_end_idx;
} else {
tmp_result[cur_mat_num].zero_();
}
}
}
);
if (need_copy_result) {
result.copy_(tmp_result);
}
// Need to transpose the result matrices since cusparse stores
// them in column-major order in memory
result.transpose_(1,2);
if (workspace_buffer != nullptr) {
hipFree(workspace_buffer);
}
#else
TORCH_CHECK(false, "bmm sparse-dense requires CUDA 10.1 or greater");
#endif
return result;
}
}} // namespace at::native
| 8bc3e5a7279666f078c10ac339b7234a551347cb.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/NativeFunctions.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/native/sparse/SparseTensorMath.h>
#include <ATen/native/sparse/cuda/SparseCUDAApplyUtils.cuh>
#include <ATen/native/sparse/cuda/SparseCUDABlas.cuh>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAUtils.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/WrapDimUtilsMulti.h>
#include <ATen/ExpandUtils.h>
#include <THC/THCTensorMathPointwise.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/binary_search.h>
#include <thrust/sort.h>
#include <thrust/system/cuda/execution_policy.h>
#include <bitset>
#include <cusparse.h>
#include <cuda_runtime_api.h>
#include <memory>
#define I_INFO(tensor) cuda::detail::getTensorInfo<int64_t, uint64_t>(tensor)
#define V_INFO(tensor) cuda::detail::getTensorInfo<scalar_t, uint64_t>(tensor)
namespace at { namespace native {
using namespace at::sparse;
using at::cuda::detail::TensorInfo;
using at::cuda::detail::getTensorInfo;
// --------------------------------------------------------------------
// Utility functions
// --------------------------------------------------------------------
namespace {
IntTensor _to_csr_int(const LongTensor& rowIndices, int64_t dim, int64_t nnz) {
IntTensor csr = at::empty({dim+1}, CUDA(kInt));
IntTensor rowIndicesInt = at::empty({rowIndices.size(0)}, CUDA(kInt));
rowIndicesInt.copy_(rowIndices);
sparse::cuda::Xcoo2csr(rowIndicesInt.data_ptr<int32_t>(), nnz, dim, csr.data_ptr<int32_t>());
return csr;
}
}
// NB: Deleted spaddcmul (aka addcmul_, but not actually wired up), spaddcdiv (not
// wired at all)
template <typename scalar_t>
void s_addmm_out_sparse_dense_cuda_worker(int64_t nnz, int64_t m, int64_t n, int64_t k, Tensor& r_, Scalar beta, const Tensor& t, Scalar alpha, LongTensor& indices, Tensor& values, const Tensor& dense) {
scalar_t cast_beta = beta.to<scalar_t>();
scalar_t cast_alpha = alpha.to<scalar_t>();
LongTensor rowIndices = indices.select(0, 0);
LongTensor colIndices = indices.select(0, 1);
IntTensor csr = _to_csr_int(rowIndices, m, nnz);
IntTensor colIndicesInt = at::empty({colIndices.size(0)}, indices.options().dtype(kInt));
colIndicesInt.copy_(colIndices);
Tensor r__;
if (cast_beta == 0) {
r_.zero_();
} else if (cast_beta == 1) {
if (!is_same_tensor(t, r_)) {
r_.copy_(t);
}
} else {
at::mul_out(r_, t, scalar_to_tensor(beta));
}
if(r_.stride(0) == 1 && r_.stride(1) == r_.size(0)) {
r__ = r_;
} else {
// TODO: how... strange
r__ = r_.transpose(0, 1).clone(at::MemoryFormat::Contiguous);
r__.transpose_(0, 1);
}
if (nnz > 0) {
Tensor dense_;
char transpose_dense;
if(dense.stride(0) == 1 && dense.stride(1) == dense.size(0)) {
transpose_dense = 'n';
dense_ = dense;
} else if(dense.stride(1) == 1 && dense.stride(0) != dense.size(1)) {
transpose_dense = 't';
dense_ = dense;
} else {
transpose_dense = 't';
dense_ = dense.contiguous();
}
sparse::cuda::csrmm2(
'n',
transpose_dense,
m,
n,
k,
nnz,
cast_alpha,
values.data_ptr<scalar_t>(),
csr.data_ptr<int32_t>(),
colIndicesInt.data_ptr<int32_t>(),
dense_.data_ptr<scalar_t>(),
(transpose_dense == 'n' ? dense_.stride(1) : dense_.stride(0)),
cast_beta,
r__.data_ptr<scalar_t>(),
r__.stride(1));
}
r_.copy_(r__);
}
// --------------------------------------------------------------------
// addmm(Tensor, SparseTensor, Tensor, Scalar, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& s_addmm_out_sparse_dense_cuda(Tensor& r_, const Tensor& t, const SparseTensor& sparse_, const Tensor& dense, Scalar beta, Scalar alpha) {
TORCH_CHECK(t.is_cuda(), "addmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "addmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(sparse_.is_cuda(), "addmm: expected 'mat1' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "addmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({sparse_, r_, t, dense}));
TORCH_CHECK(dense.dim() == 2, "addmm: 2D tensor expected, got ", dense.dim(), "D tensor");
TORCH_CHECK(sparse_.sparse_dim() == 2, "addmm: expected first two dims to be sparse (indices has size 2 at first dim), but got ", sparse_.sparse_dim(), " sparse dims");
// no need to check dense_dim because dense_dim + sparse_dim = dim
// mxk * kxn = mxn
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(t.size(0) == m,
"addmm: Argument #1 (t): Expected dim 0 size ", m, ", got ", t.size(0));
TORCH_CHECK(t.size(1) == n,
"addmm: Argument #1 (t): Expected dim 1 size ", n, ", got ", t.size(1));
TORCH_CHECK(dense.size(0) == k,
"addmm: Argument #3 (dense): Expected dim 0 size ", k, ", got ", dense.size(0));
r_.resize_({m, n});
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
LongTensor indices = sparse._indices();
Tensor values = sparse._values();
// No half support, so we don't have to use CUDATypeConversion
AT_DISPATCH_FLOATING_TYPES(
values.scalar_type(), "addmm_sparse_cuda", [&] {
s_addmm_out_sparse_dense_cuda_worker<scalar_t>(nnz, m, n, k, r_, beta, t, alpha, indices, values, dense);
}
);
return r_;
}
Tensor& addmm_out_sparse_dense_cuda(
Tensor& result,
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
Scalar beta,
Scalar alpha
) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_out_sparse_dense_cuda(result, b_self, mat1, mat2, beta, alpha);
}
Tensor s_addmm_sparse_dense_cuda(
const Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
Scalar beta,
Scalar alpha
) {
Tensor r = at::empty({0}, t.options());
s_addmm_out_sparse_dense_cuda(r, t, sparse, dense, beta, alpha);
return r;
}
Tensor addmm_sparse_dense_cuda(
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
Scalar beta,
Scalar alpha
) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_sparse_dense_cuda(b_self, mat1, mat2, beta, alpha);
}
Tensor& s_addmm_sparse_dense_cuda_(
Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
Scalar beta,
Scalar alpha
) {
return s_addmm_out_sparse_dense_cuda(t, t, sparse, dense, beta, alpha);
}
// NB: Purposely no broadcasting version of addmm inplace
// Deleted sspaddmm (sparse, dense) -> sparse
// --------------------------------------------------------------------
// hspmm(SparseTensor mat1, Tensor mat2)
// --------------------------------------------------------------------
SparseTensor& hspmm_out_sparse_cuda(SparseTensor& r_, const SparseTensor& sparse_, const Tensor& dense/* , Scalar alpha */) {
TORCH_CHECK(sparse_.is_cuda(), "hspmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "hspmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "hspmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, sparse_, dense}));
TORCH_CHECK(sparse_.sparse_dim() == 2,
"hspmm: Argument #2: 2D tensor expected, got ", sparse_.sparse_dim(), "D tensor");
TORCH_CHECK(sparse_.dense_dim() == 0,
"hspmm: Argument #2: scalar values expected, got ", sparse_.dense_dim(), "D values");
TORCH_CHECK(dense.dim() == 2,
"hspmm: Argument #3: 2D tensor expected, got ", dense.dim(), "D tensor");
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(dense.size(0) == k,
"hspmm: Argument #3: Expected dim 0 size ", k, ", got ", dense.size(0));
get_sparse_impl(r_)->resize_and_clear_(1, 1, {m, n});
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
LongTensor indices = at::empty({1, nnz}, CUDA(kLong));
// create values in column-major format to avoid copying in spaddmm
Tensor values = at::empty({n, nnz}, dense.options());
values.transpose_(0, 1);
// why does sparse need to be cloned? If this is really necessary maybe we
// need to fuse this with newCoalesce
SparseTensor newSparse = sparse.clone();
LongTensor spIndices = newSparse._indices();
LongTensor dstIndices = spIndices.select(0, 0);
// Save destination indices to output hybrid tensor
indices.copy_(dstIndices);
// Replace destination indices with 0, 1, 2, 3, ... and compute output values
// tensor with sparse * dense multiplication
thrust::device_ptr<int64_t> indicesIter(dstIndices.data_ptr<int64_t>());
thrust::sequence(policy, indicesIter, indicesIter + nnz);
std::vector<int64_t> new_size = get_sparse_impl(newSparse)->sizes().vec();
new_size[0] = nnz;
get_sparse_impl(newSparse)->raw_resize_(get_sparse_impl(newSparse)->sparse_dim(), get_sparse_impl(newSparse)->dense_dim(), new_size);
s_addmm_out_sparse_dense_cuda(values, values, newSparse, dense, 0, /*alpha*/ 1);
get_sparse_impl(r_)->set_indices_and_values_unsafe(indices, values);
return r_;
}
SparseTensor hspmm_sparse_cuda(const SparseTensor& sparse, const Tensor& dense) {
SparseTensor r = at::empty({0}, sparse.options());
hspmm_out_sparse_cuda(r, sparse, dense);
return r;
}
// --------------------------------------------------------------------
// add(Tensor, SparseTensor, Scalar)
// formerly known as spcadd
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r_, const Tensor& dense, const SparseTensor& sparse, at::Scalar value) {
TORCH_CHECK(dense.is_cuda(), "add: expected 'self' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(sparse.is_cuda(), "add: expected 'other' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(cuda::check_device({sparse, r_, dense}));
TORCH_CHECK(dense.sizes().equals(sparse.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ",
dense.sizes(), " while other has size ", sparse.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)");
const int64_t nnz = sparse._nnz();
if (nnz == 0) {
r_.resize_as_(dense);
r_.copy_(dense);
return r_;
}
auto commonDtype = at::result_type(dense, sparse);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
Tensor r = r_;
if (r_.scalar_type() != commonDtype) {
r = at::empty_like(dense, r_.options().dtype(commonDtype));
}
Tensor dense_buffer = dense.to(commonDtype);
Tensor values = sparse._values().to(commonDtype);
if (is_same_tensor(r, dense_buffer)) {
TORCH_CHECK(r_.is_contiguous(), "add: CUDA dense-sparse addition with a non-contiguous output tensor does not work; shout if you need it (see https://github.com/pytorch/pytorch/issues/1521 )");
} else {
r.resize_as_(dense);
r.copy_(dense_buffer);
}
LongTensor indices = sparse._indices();
int64_t nDim = dense.dim();
int64_t nDimI = sparse.sparse_dim();
if (values.numel() == 0) {
return r_;
}
if (sparse.is_coalesced()) {
// TODO benchmark to decide whether to remove this special case
const dim3 block = cuda::getApplyBlock();
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
if (sparse.dense_dim() == 0) {
TORCH_CHECK(cuda::getApplyGrid(nnz, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
AT_DISPATCH_ALL_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "add_out_dense_sparse_cuda", [&] {
apply::sparseElementwiseKernelScalar<TensorCAddOp<scalar_t>, uint64_t, scalar_t>
<<<grid, block, 0, stream>>>(
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
});
});
} else {
TORCH_CHECK(cuda::getApplyGrid(nnz * block.x, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
// sparseElementwiseKernel needs values to be contiguous too
values = values.contiguous();
AT_DISPATCH_ALL_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "add_out_dense_sparse_cuda", [&] {
apply::sparseElementwiseKernel<TensorCAddOp<scalar_t>, uint64_t, scalar_t>
<<<grid, block, 0, stream>>>(
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
});
});
}
} else {
LongTensor indices1D = flatten_indices(indices, sparse.sizes(), 0);
int64_t view_rows = 1;
int64_t view_columns = 1;
for (int i = 0; i < nDimI; i++) {
view_rows *= r.size(i);
}
for (int i = nDimI; i < nDim; i++) {
view_columns *= r.size(i);
}
Tensor r_view = r.view({view_rows, view_columns});
values = values.reshape({nnz, view_columns});
r_view.index_add_(0, indices1D, values, value);
}
THCudaCheck(cudaGetLastError());
r_.copy_(r);
return r_;
}
// --------------------------------------------------------------------
// add(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r, const Tensor& dense, const SparseTensor& sparse_, Scalar value);
SparseTensor& add_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t, const SparseTensor& src, Scalar value) {
if (!t.is_sparse()) {
return add_out_dense_sparse_cuda(r_, t, src, value);
}
// TODO: This test seems a bit goofy
TORCH_CHECK(src.is_sparse(), "add(sparse, dense) is not supported. Use add(dense, sparse) instead.");
TORCH_CHECK(t.is_cuda(), "add: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src.is_cuda(), "add: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t, src}));
auto commonDtype = at::result_type(t, src);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
TORCH_CHECK(t.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but ", t.sizes(), " != ", src.sizes());
if (src._nnz() == 0) {
return copy_sparse_to_sparse_(r_, t);
}
if (t._nnz() == 0) {
return mul_out_sparse_scalar(r_, src, value);
}
TORCH_CHECK(is_same_density(t, src), "add: expected 'self' and 'other' to have same density, but 'self' has ", t.sparse_dim(), " sparse dimensions while 'other' has ", src.sparse_dim(), " sparse dimensions");
// We deliberately choose to simply concat the indices and values tensors
// rather than merging them. This removes the need to synchronously fetch nnz
// at the end of the operation, at the cost of having a non-coalesced result.
// This trade-off is preferable for the common use-case of gradient accumulation.
LongTensor t_indices_ = t._indices();
LongTensor s_indices_ = src._indices();
Tensor t_values_ = t._values().to(commonDtype);
Tensor s_values_ = src._values().to(commonDtype);
AT_DISPATCH_ALL_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_sparse_cuda", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "add_out_sparse_cuda", [&] {
if (value.to<scalar_t>() != static_cast<scalar_t>(1)) {
s_values_ = s_values_.mul(value);
}
});
});
LongTensor r_indices_ = at::cat({t_indices_, s_indices_}, 1);
Tensor r_values_ = at::cat({t_values_, s_values_}, 0);
if (r_.scalar_type() != commonDtype) {
SparseTensor promoted = at::empty({0}, r_.options().dtype(commonDtype));
promoted.resize_as_(src);
alias_into_sparse(promoted, r_indices_, r_values_);
// performs the addition under the common dtype.
promoted = promoted.coalesce();
r_values_ = promoted._values().to(r_.scalar_type());
r_indices_ = promoted._indices();
} else {
r_.resize_as_(src);
}
alias_into_sparse(r_, r_indices_, r_values_);
// Prevent unbounded growth of nnz
// TODO: Improved heuristic on when to coalesce or remove need to coalesce
if (r_._nnz() > r_.numel()) {
auto c = r_.coalesce();
alias_into_sparse(r_, c._indices(), c._values());
}
return r_;
}
// --------------------------------------------------------------------
// mul(SparseTensor, SparseTensor) [broadcasts]
// --------------------------------------------------------------------
SparseTensor& mul_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t_, const SparseTensor& src_) {
if (src_.dim() == 0) {
return mul_out_sparse_zerodim(r_, t_, src_);
} else if (t_.dim() == 0) {
return mul_out_sparse_zerodim(r_, src_, t_);
}
TORCH_CHECK(t_.is_cuda(), "mul: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src_.is_cuda(), "mul: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "mul: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t_, src_}));
TORCH_CHECK(t_.sizes().equals(src_.sizes()), "mul: expected 'self' and 'other' to have same size, but ", t_.sizes(), " != ", src_.sizes());
SparseTensor t = t_.coalesce();
SparseTensor src = src_.coalesce();
if (src_._nnz() == 0 || t_._nnz() == 0) {
r_.resize_as_(src_);
return r_.zero_();
}
// saving those because they can be overwritten when doing in-place operations
int64_t t_nnz = t._nnz(), s_nnz = src._nnz();
int64_t max_nnz = std::min(t_nnz, s_nnz); // multiply by zero is zero, and can be dropped
int64_t sparse_dim = src.sparse_dim();
auto commonDtype = at::result_type(t, src);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
LongTensor t_indices_ = t._indices().contiguous();
Tensor t_values_ = t._values().to(commonDtype);
LongTensor s_indices_ = src._indices().contiguous();
Tensor s_values_ = src._values().to(commonDtype);
LongTensor r_indices_ = at::empty({sparse_dim, max_nnz}, t_indices_.options());
r_.resize_as_(src);
Tensor r_values_ = new_values_with_size_of(t_values_, max_nnz).zero_();
int64_t valueSize = t_values_.stride(0);
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), valueSize));
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
TORCH_CHECK(cuda::getApplyGrid(valueSize, grid, curDevice), "mul: Argument #0: tensor too large or too many dimensions");
LongTensor resultNnz = at::empty({1}, CUDA(kLong));
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, commonDtype, "mul_out_sparse_cuda", [&] {
apply::valueSparseIntersectionKernel<TensorMulOp<scalar_t>, uint64_t, scalar_t>
<<<grid, block, 0, stream>>>(
TensorMulOp<scalar_t>(),
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
V_INFO(r_values_), V_INFO(t_values_), V_INFO(s_values_),
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz));
THCudaCheck(cudaGetLastError());
apply::indexSparseIntersectionKernel<uint64_t, scalar_t>
<<<1, 1, 0, stream>>>(
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
// reinterpret_cast shenanigans, because we don't actually have
// unsigned tensors...
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz), reinterpret_cast<uint64_t*>(resultNnz.data_ptr()));
THCudaCheck(cudaGetLastError());
});
r_values_ = r_values_.to(r_.scalar_type());
get_sparse_impl(r_)->set_indices_and_values_unsafe(r_indices_, r_values_);
// sync! (surely there is a more idiomatic way to do this...)
LongTensor cpu_resultNnz = at::empty({1}, CPU(kLong));
cpu_resultNnz.copy_(resultNnz);
get_sparse_impl(r_)->set_nnz_and_narrow(cpu_resultNnz.accessor<int64_t, 1>()[0]);
return r_._coalesced_(true);
}
// --------------------------------------------------------------------
// sparse.sum() backward
//
// see NOTE [ sparse.sum() backward ]
// --------------------------------------------------------------------
template <typename scalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__ void _sparse_sum_backward_cuda_kernel(
int64_t total_threads,
const TensorInfo<int64_t, int64_t> grad_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_pos_ti,
const TensorInfo<scalar_t, int64_t> grad_values_expand_ti,
TensorInfo<scalar_t, int64_t> grad_input_values_ti
) {
const int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= total_threads) return;
const int64_t j = input_indices_pos_ti.data[i];
bool has_match = false;
if (grad_indices_ti.data[j] == input_indices_ti.data[i]) {
has_match = true;
}
int64_t grad_input_values_stride0 = grad_input_values_ti.strides[0];
int64_t out_start = i * grad_input_values_stride0;
int64_t out_end = (i + 1) * grad_input_values_stride0;
int64_t in_start = j * grad_values_expand_ti.strides[0];
if (has_match) {
for (int64_t out_i = out_start, in_i = in_start; out_i < out_end; out_i++, in_i++) {
grad_input_values_ti.data[out_i] = grad_values_expand_ti.data[in_i];
}
}
else {
for (int64_t out_i = out_start; out_i < out_end; out_i++) {
grad_input_values_ti.data[out_i] = scalar_t(0);
}
}
}
Tensor _sparse_sum_backward_cuda(const Tensor& grad_, const SparseTensor& input_, IntArrayRef dims_to_sum) {
TORCH_CHECK(grad_.is_cuda(), "_sparse_sum_backward_cuda: expected 'grad_' to be CUDA tensor, but got CPU tensor");
TORCH_CHECK(input_.is_cuda(), "_sparse_sum_backward_cuda: expected 'input_' to be CUDA tensor, but got CPU tensor");
auto input = input_.coalesce();
const int64_t input_dim = input.dim();
auto dims_to_sum_b = dim_list_to_bitset(dims_to_sum, input_dim);
auto dims_to_sum_v = dims_to_sum.vec();
maybe_wrap_dims(dims_to_sum_v, input_dim);
LongTensor input_indices = input._indices();
Tensor input_values = input._values();
IntArrayRef input_sizes = input.sizes();
const int64_t input_sparse_dim = input.sparse_dim();
const int64_t input_dense_dim = input.dense_dim();
const int64_t input_nnz = input._nnz();
int64_t sparse_dims_to_sum_size = 0;
auto sparse_dims_to_keep_v = std::vector<int64_t>();
auto dense_dims_to_sum_v = std::vector<int64_t>();
for (int64_t d = 0; d < input_dim; d++) {
if (dims_to_sum_b[d]) {
if (d < input_sparse_dim) sparse_dims_to_sum_size ++;
else dense_dims_to_sum_v.emplace_back(d + 1 - input_sparse_dim);
}
else {
if (d < input_sparse_dim) sparse_dims_to_keep_v.emplace_back(d);
}
}
const bool sum_all_sparse_dim = (input_sparse_dim == sparse_dims_to_sum_size);
const bool sum_dense_dim = (dense_dims_to_sum_v.size() > 0);
const bool sum_sparse_dim = (sparse_dims_to_sum_size > 0);
if (sum_all_sparse_dim) {
TORCH_CHECK(!grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad Tensor to be dense since all sparse dims are summed");
auto grad_input_values = grad_;
auto expand_size = input_values.sizes().vec();
if (sum_dense_dim) {
auto dense_expand_size = std::vector<int64_t>(expand_size);
dense_expand_size.erase(dense_expand_size.begin()); // remove nnz dim
for (auto d : dense_dims_to_sum_v) grad_input_values = grad_input_values.unsqueeze(d - 1); // -1 since grad has no nnz dim
grad_input_values = grad_input_values.expand(dense_expand_size);
}
grad_input_values = grad_input_values.expand(expand_size).clone(at::MemoryFormat::Contiguous);
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, input.options().dtype(grad_.dtype())); // convert to grad dtype
}
else {
TORCH_CHECK(grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad_ Tensor to be sparse, but got dense");
auto grad = grad_.coalesce();
LongTensor grad_indices = grad._indices();
Tensor grad_values = grad._values();
const int64_t grad_sparse_dim = grad.sparse_dim();
const int64_t grad_nnz = grad._nnz();
Tensor grad_values_expand = grad_values;
if (sum_dense_dim) {
auto expand_size = input_values.sizes().vec();
if (sum_sparse_dim) expand_size[0] = grad_values.size(0); // update nnz
for (auto d : dense_dims_to_sum_v) grad_values_expand = grad_values_expand.unsqueeze(d);
grad_values_expand = grad_values_expand.expand(expand_size).clone(at::MemoryFormat::Contiguous);
}
Tensor grad_input_values;
if (!sum_sparse_dim) {
grad_input_values = grad_values_expand;
}
else {
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
typedef thrust::device_ptr<int64_t> thrust_ptr;
grad_input_values = at::empty_like(input_values, grad_values.options(), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
AT_ASSERT(grad_input_values.is_cuda());
// get 1D indices
auto grad_sparse_dim_to_keep_v = std::vector<int64_t>(grad_sparse_dim);
std::iota(grad_sparse_dim_to_keep_v.begin(), grad_sparse_dim_to_keep_v.end(), 0);
auto grad_indices_1D = flatten_indices_by_dims(grad_indices, grad.sizes(), grad_sparse_dim_to_keep_v); // flatten indices on all sparse_dim of grad, output indices is coalesced and sorted
auto input_indices_1D = flatten_indices_by_dims(input_indices, input_sizes, sparse_dims_to_keep_v);
thrust_ptr grad_indices_iter(grad_indices_1D.data_ptr<int64_t>());
thrust_ptr input_indices_iter(input_indices_1D.data_ptr<int64_t>());
// store lower_bound of input indices at grad indices
LongTensor input_indices_pos = at::empty_like(input_indices_1D, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
thrust_ptr input_indices_pos_iter(input_indices_pos.data_ptr<int64_t>());
thrust::lower_bound(policy,
grad_indices_iter, grad_indices_iter + grad_nnz,
input_indices_iter, input_indices_iter + input_nnz,
input_indices_pos_iter);
// config to run cuda kernel
int64_t total_threads = input_nnz;
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), total_threads));
dim3 grid;
TORCH_CHECK(cuda::getApplyGrid(total_threads, grid, curDevice), "_sparse_sum_backward_cuda: input too large or too many dimensions");
auto grad_indices_ti = getTensorInfo<int64_t, int64_t>(grad_indices_1D);
auto input_indices_ti = getTensorInfo<int64_t, int64_t>(input_indices_1D);
auto input_indices_pos_ti = getTensorInfo<int64_t, int64_t>(input_indices_pos);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_values.scalar_type(), "_sparse_sum_backward_cuda", [&] {
auto grad_values_expand_ti = getTensorInfo<scalar_t, int64_t>(grad_values_expand);
auto grad_input_values_ti = getTensorInfo<scalar_t, int64_t>(grad_input_values);
_sparse_sum_backward_cuda_kernel<scalar_t><<<grid, block, 0, stream>>>(
total_threads,
grad_indices_ti,
input_indices_ti,
input_indices_pos_ti,
grad_values_expand_ti,
grad_input_values_ti
);
});
}
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, grad.options());
}
}
Tensor bmm_sparse_cuda(const SparseTensor& self, const Tensor& mat2) {
Tensor result = at::empty({self.size(0), mat2.size(2), self.size(1)}, mat2.options(), at::MemoryFormat::Contiguous);
return _bmm_out_sparse_cuda(result, self, mat2, false);
}
Tensor _bmm_sparse_cuda(const SparseTensor& self, const Tensor& mat2, bool deterministic) {
Tensor result = at::empty({self.size(0), mat2.size(2), self.size(1)}, mat2.options(), at::MemoryFormat::Contiguous);
return _bmm_out_sparse_cuda(result, self, mat2, deterministic);
}
#if !(defined(__HIP_PLATFORM_HCC__) || (defined(_MSC_VER) && CUSPARSE_VERSION < 11000))
__global__ void search_end_matrix_indices_cuda_kernel(
int64_t* mat_el_end_indices,
int64_t num_matrices,
const TensorInfo<int64_t, int64_t> indices_1D_ti,
const int64_t num_elements
){
const int64_t target_mat_num = blockIdx.x * blockDim.x + threadIdx.x;
if (target_mat_num >= num_matrices) return;
const int64_t* indices_1D = indices_1D_ti.data;
const int64_t indices_1D_stride = indices_1D_ti.strides[0];
int64_t start_idx = 0;
int64_t end_idx = num_elements - 1;
int64_t mid_idx = (start_idx + end_idx) >> 1;
int64_t mid_val = indices_1D[mid_idx*indices_1D_stride];
bool found;
while (
start_idx <= end_idx
) {
bool trim_right = mid_val > target_mat_num;
int64_t mid_idx_minus_1 = mid_idx - 1;
int64_t mid_idx_plus_1 = mid_idx + 1;
end_idx = trim_right ? mid_idx_minus_1 : end_idx;
start_idx = trim_right ? start_idx : mid_idx_plus_1;
mid_idx = (start_idx + end_idx) >> 1;
mid_val = indices_1D[mid_idx*indices_1D_stride];
}
found = (mid_val == target_mat_num)
&& (
(mid_idx == (num_elements-1))
|| (indices_1D[(mid_idx+1)*indices_1D_stride] != target_mat_num)
);
mat_el_end_indices[target_mat_num] = found ? mid_idx : -1;
}
// Search through a 1D tensor of sorted sparse matrix
// indices to find the end index for each matrix
void search_end_matrix_indices(int64_t* mat_el_end_indices, int64_t num_matrices, const LongTensor& indices_1D) {
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
auto indices_1D_ti = getTensorInfo<int64_t, int64_t>(indices_1D);
int64_t grid_size = (num_matrices / 64)+1;
int64_t block_size = 64;
int64_t num_elements = indices_1D.size(0);
search_end_matrix_indices_cuda_kernel<<<grid_size, block_size, 0, stream>>>(
mat_el_end_indices,
num_matrices,
indices_1D_ti,
num_elements
);
cudaDeviceSynchronize();
}
cudaDataType getTensorCudaDataType(Tensor self) {
cudaDataType cuda_data_type;
switch (self.scalar_type()) {
case ScalarType::Float:
cuda_data_type = CUDA_R_32F;
break;
case ScalarType::Double:
cuda_data_type = CUDA_R_64F;
break;
default:
TORCH_CHECK(false, "Tensor types must be either float32 or float64");
break;
}
return cuda_data_type;
}
#endif
Tensor& bmm_out_sparse_cuda(Tensor& result, const SparseTensor& self, const Tensor& mat2) {
return _bmm_out_sparse_cuda(result, self, mat2, false);
}
Tensor& _bmm_out_sparse_cuda(Tensor& result, const SparseTensor& self, const Tensor& mat2, bool deterministic) {
#if defined __HIP_PLATFORM_HCC__
TORCH_CHECK(false, "bmm sparse-dense is not supported on HIP");
#elif defined(_MSC_VER) && (CUSPARSE_VERSION < 11000)
TORCH_CHECK(false, "bmm sparse-dense CUDA is not supported on Windows with cuda before 11.0");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 10010) // linux cuda >= 10.1 or windows cuda >= 11.0
TORCH_CHECK(!mat2.is_sparse(), "bmm_sparse: Tensor 'mat2' must be dense");
TORCH_CHECK(self.dense_dim() == 0, "bmm_sparse: Tensor 'self' must have 0 dense dims, but has ", self.dense_dim());
TORCH_CHECK(self.sparse_dim() == 3, "bmm_sparse: Tensor 'self' must have 3 sparse dims, but has ", self.sparse_dim());
TORCH_CHECK(mat2.dim() == 3, "bmm_sparse: Tensor 'mat2' must have 3 dims, but has ", mat2.dim());
TORCH_CHECK(self.size(0) == mat2.size(0), "bmm_sparse: 'self.size(0)' and 'mat2.size(0)' must match");
TORCH_CHECK(self.size(2) == mat2.size(1), "bmm_sparse: 'self.size(2)' and 'mat2.size(1)' must match");
int64_t num_matrices = self.size(0);
int64_t dim_i = self.size(1);
int64_t dim_j = self.size(2);
int64_t dim_k = mat2.size(2);
result.resize_({num_matrices, dim_k, dim_i});
if ((self._nnz() == 0) || (dim_j == 0) || (dim_k == 0)) {
result.zero_().transpose_(1, 2);
return result;
}
Tensor tmp_result;
bool need_copy_result;
// If the result tensor is contiguous, we can just write results directly to it.
// Otherwise, we'll need to write results to a temp buffer and then copy.
if (result.is_contiguous()) {
tmp_result = result;
need_copy_result = false;
} else {
tmp_result = at::empty({num_matrices, dim_k, dim_i}, result.options(), at::MemoryFormat::Contiguous);
need_copy_result = true;
}
// Dense matrices have to be contiguous for cusparseSpMM to work
const Tensor mat2_contig = mat2.contiguous();
auto cusparse_handle = at::cuda::getCurrentCUDASparseHandle();
// First need to coalesce to get all of the first dimension indices
// in order since we'll be sending each matrix into the MM operation
SparseTensor self_coalesced = coalesce_sparse_cuda(self);
int64_t nnz = self_coalesced._nnz();
LongTensor indices = self_coalesced._indices();
Tensor values = self_coalesced._values();
LongTensor indices_dim0 = indices[0];
// Need to convert dim1 and dim2 indices to 32-bit since cusparseSpMM
// only supports 32-bit indices
Tensor indices_dim1 = indices[1].to(ScalarType::Int);
Tensor indices_dim2 = indices[2].to(ScalarType::Int);
std::unique_ptr<int64_t[]> mat_el_end_indices_host(new int64_t[num_matrices]);
int64_t* mat_el_end_indices_device;
cudaMalloc(&mat_el_end_indices_device, num_matrices*sizeof(int64_t));
search_end_matrix_indices(mat_el_end_indices_device, num_matrices, indices_dim0);
cudaMemcpy(
mat_el_end_indices_host.get(),
mat_el_end_indices_device,
num_matrices*sizeof(int64_t),
cudaMemcpyDeviceToHost
);
cudaFree(mat_el_end_indices_device);
// Need a pointer to an array to access within a lambda
int64_t* mat_el_end_indices = &mat_el_end_indices_host[0];
Scalar beta = 0;
Scalar alpha = 1;
int64_t mat_el_begin_idx = 0;
size_t workspace_buffer_size = 0;
void* workspace_buffer = nullptr;
// See Note [Enabling Deterministic Operations]
deterministic = deterministic || globalContext().deterministic();
cusparseSpMMAlg_t mm_alg = deterministic ? CUSPARSE_COOMM_ALG2 : CUSPARSE_COOMM_ALG1;
// Iterate through each set of 2D matrices within the 3D
// tensor inputs, performing a matrix multiply with each
AT_DISPATCH_FLOATING_TYPES(
values.scalar_type(), "bmm_sparse_cuda", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
uint32_t* row_indices_start_ptr = reinterpret_cast<uint32_t*>(indices_dim1.data_ptr());
uint32_t* col_indices_start_ptr = reinterpret_cast<uint32_t*>(indices_dim2.data_ptr());
scalar_t* values_start_ptr = reinterpret_cast<scalar_t*>(values.data_ptr());
scalar_t* mat2_start_ptr = reinterpret_cast<scalar_t*>(mat2_contig.data_ptr());
scalar_t* result_start_ptr = reinterpret_cast<scalar_t*>(tmp_result.data_ptr());
for (
int64_t cur_mat_num = 0;
(cur_mat_num < num_matrices);
cur_mat_num++
) {
int64_t mat_el_end_idx = mat_el_end_indices[cur_mat_num];
if (mat_el_end_idx != -1) {
mat_el_end_idx++;
// Create tensors to view just the current set of matrices
int64_t sparse_nnz = mat_el_end_idx - mat_el_begin_idx;
cudaDataType cuda_data_type = getTensorCudaDataType(mat2_contig);
uint32_t* row_indices_ptr = &row_indices_start_ptr[mat_el_begin_idx];
uint32_t* col_indices_ptr = &col_indices_start_ptr[mat_el_begin_idx];
scalar_t* values_ptr = &values_start_ptr[mat_el_begin_idx];
cusparseSpMatDescr_t sparse_descr;
TORCH_CUDASPARSE_CHECK(cusparseCreateCoo(
&sparse_descr,
dim_i,
dim_j,
sparse_nnz,
reinterpret_cast<void*>(row_indices_ptr),
reinterpret_cast<void*>(col_indices_ptr),
reinterpret_cast<void*>(values_ptr),
CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO,
cuda_data_type
));
scalar_t* mat2_ptr = &mat2_start_ptr[dim_k*dim_j*cur_mat_num];
cusparseDnMatDescr_t dense_descr;
TORCH_CUDASPARSE_CHECK(cusparseCreateDnMat(
&dense_descr,
dim_k,
dim_j,
dim_k,
reinterpret_cast<void*>(mat2_ptr),
cuda_data_type,
CUSPARSE_ORDER_COL
));
scalar_t* result_ptr = &result_start_ptr[dim_i*dim_k*cur_mat_num];
cusparseDnMatDescr_t result_descr;
TORCH_CUDASPARSE_CHECK(cusparseCreateDnMat(
&result_descr,
dim_i,
dim_k,
dim_i,
reinterpret_cast<void*>(result_ptr),
cuda_data_type,
CUSPARSE_ORDER_COL
));
size_t required_workspace_buffer_size = 0;
TORCH_CUDASPARSE_CHECK(cusparseSpMM_bufferSize(
cusparse_handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_TRANSPOSE,
(void*)&alpha_val,
sparse_descr,
dense_descr,
(void*)&beta_val,
result_descr,
cuda_data_type,
mm_alg,
&required_workspace_buffer_size
));
if (required_workspace_buffer_size > workspace_buffer_size) {
if (workspace_buffer != nullptr) {
cudaFree(workspace_buffer);
}
workspace_buffer_size = required_workspace_buffer_size;
cudaMallocManaged(&workspace_buffer, workspace_buffer_size);
}
TORCH_CUDASPARSE_CHECK(cusparseSpMM(
cusparse_handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_TRANSPOSE,
(void*)&alpha_val,
sparse_descr,
dense_descr,
(void*)&beta_val,
result_descr,
cuda_data_type,
mm_alg,
workspace_buffer
));
TORCH_CUDASPARSE_CHECK(cusparseDestroySpMat(sparse_descr));
TORCH_CUDASPARSE_CHECK(cusparseDestroyDnMat(dense_descr));
TORCH_CUDASPARSE_CHECK(cusparseDestroyDnMat(result_descr));
mat_el_begin_idx = mat_el_end_idx;
} else {
tmp_result[cur_mat_num].zero_();
}
}
}
);
if (need_copy_result) {
result.copy_(tmp_result);
}
// Need to transpose the result matrices since cusparse stores
// them in column-major order in memory
result.transpose_(1,2);
if (workspace_buffer != nullptr) {
cudaFree(workspace_buffer);
}
#else
TORCH_CHECK(false, "bmm sparse-dense requires CUDA 10.1 or greater");
#endif
return result;
}
}} // namespace at::native
|
f81b50cb433afddb18dea93c9b7e36707283cf95.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
////////////////////////////////////////////////////////////////////////////////
/*
Hologram generating algorithms for CUDA Devices
Copyright 2009, 2010, 2011, 2012 Martin Persson
[email protected]
This file is part of GenerateHologramCUDA.
GenerateHologramCUDA is free software: you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
GenerateHologramCUDA is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with GenerateHologramCUDA. If not, see <http://www.gnu.org/licenses/>.
*/
///////////////////////////////////////////////////////////////////////////////////
//The function "GenerateHologram" contains two different algorithms for
//hologram generation. The last parameter in the function call selects which
//one to use:
//0: Complex addition of "Lenses and Prisms", no optimization (3D)
//1: Weighted Gerchberg-Saxton algorithm using Fresnel propagation (3D)
//2: Weighted Gerchberg-Saxton algorithm using Fast Fourier Transforms (2D)
//-(0) produces optimal holograms for 1 or 2 traps and is significantly faster.
// (0) is automatically selected if the number of spots is < 3.
////////////////////////////////////////////////////////////////////////////////
//Fresnel propagation based algorithm (1) described in:
//Roberto Di Leonardo, Francesca Ianni, and Giancarlo Ruocco
//"Computer generation of optimal holograms for optical trap arrays"
//Opt. Express 15, 1913-1922 (2007)
//
//The original algorithm has been modified to allow variable spot amplitudes
////////////////////////////////////////////////////////////////////////////////
//Naming convention for variables:
//-The prefix indicates where data is located
//--In host functions: h = host memory
// d = device memory
// c = constant memory
//--In global functions: g = global memory
// s = shared memory
// c = constant memory
// no prefix = registers
//-The suffix indicates the data type, no suffix usually indicates an iteger
////////////////////////////////////////////////////////////////////////////////
//Possible improvements:
//-Improve convergence of the GS algorithms for 2 spots. *done
//-Compensate spot intensities for distance from center of field. *done
//-Put all arguments for device functions and trap positions in constant memory. *done
// (Requires all functions to be moved into the same file or the use of some
// workaround found on nVidia forum)
//-Put pSLMstart and aLaser in texture memory (may not improve performance on Fermi devices)
//-Use "zero-copy" to transfer pSLM to host.
//-Rename functions and variables for consistency and readability
//-Allow variable spot phases for Lenses and Prisms
////////////////////////////////////////////////////////////////////////////////
//#define M_CUDA_DEBUG //activates a number of custom debug macros//
float dt_milliseconds;
hipEvent_t start, stop;
////////////////////////////////////////////////////////////////////////////////
//Includes
////////////////////////////////////////////////////////////////////////////////
#ifndef M_PI
#define M_PI 3.14159265358979323846f
#endif
#define MAX_SPOTS 1024 //decrease this if your GPU keeps running out of memory
#define BLOCK_SIZE 256 //should be a power of 2
#define SLM_SIZE 512
#if ((SLM_SIZE==16)||(SLM_SIZE==32)||(SLM_SIZE==64)||(SLM_SIZE==128)||(SLM_SIZE==256)||(SLM_SIZE==512)||(SLM_SIZE==1024)||(SLM_SIZE==2048))
#define SLMPOW2 //Uses bitwise modulu operations if the SLM size is a power of 2
#endif
////////////////////////////////////////////////////////////////////////////////
// forward declarations
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//Set correction parameters
////////////////////////////////////////////////////////////////////////////////
__device__ unsigned char applyPolLUT(float phase2pi, float X, float Y, float *s_c)
{
switch (c_N_PolLUTCoeff[0]) {
case 120:
return (unsigned char)(s_c[0] + s_c[1]*X + s_c[2]*Y + s_c[3]*phase2pi + s_c[4]*X*X + s_c[5]*X*Y + s_c[6]*X*phase2pi + s_c[7]*Y*Y + s_c[8]*Y*phase2pi + s_c[9]*phase2pi*phase2pi + s_c[10]*X*X*X + s_c[11]*X*X*Y + s_c[12]*X*X*phase2pi + s_c[13]*X*Y*Y + s_c[14]*X*Y*phase2pi + s_c[15]*X*phase2pi*phase2pi + s_c[16]*Y*Y*Y + s_c[17]*Y*Y*phase2pi + s_c[18]*Y*phase2pi*phase2pi + s_c[19]*phase2pi*phase2pi*phase2pi + s_c[20]*X*X*X*X + s_c[21]*X*X*X*Y + s_c[22]*X*X*X*phase2pi + s_c[23]*X*X*Y*Y + s_c[24]*X*X*Y*phase2pi + s_c[25]*X*X*phase2pi*phase2pi + s_c[26]*X*Y*Y*Y + s_c[27]*X*Y*Y*phase2pi + s_c[28]*X*Y*phase2pi*phase2pi + s_c[29]*X*phase2pi*phase2pi*phase2pi + s_c[30]*Y*Y*Y*Y + s_c[31]*Y*Y*Y*phase2pi + s_c[32]*Y*Y*phase2pi*phase2pi + s_c[33]*Y*phase2pi*phase2pi*phase2pi + s_c[34]*phase2pi*phase2pi*phase2pi*phase2pi + s_c[35]*X*X*X*X*X + s_c[36]*X*X*X*X*Y + s_c[37]*X*X*X*X*phase2pi + s_c[38]*X*X*X*Y*Y + s_c[39]*X*X*X*Y*phase2pi + s_c[40]*X*X*X*phase2pi*phase2pi + s_c[41]*X*X*Y*Y*Y + s_c[42]*X*X*Y*Y*phase2pi + s_c[43]*X*X*Y*phase2pi*phase2pi + s_c[44]*X*X*phase2pi*phase2pi*phase2pi + s_c[45]*X*Y*Y*Y*Y + s_c[46]*X*Y*Y*Y*phase2pi + s_c[47]*X*Y*Y*phase2pi*phase2pi + s_c[48]*X*Y*phase2pi*phase2pi*phase2pi + s_c[49]*X*phase2pi*phase2pi*phase2pi*phase2pi + s_c[50]*Y*Y*Y*Y*Y + s_c[51]*Y*Y*Y*Y*phase2pi + s_c[52]*Y*Y*Y*phase2pi*phase2pi + s_c[53]*Y*Y*phase2pi*phase2pi*phase2pi + s_c[54]*Y*phase2pi*phase2pi*phase2pi*phase2pi + s_c[55]*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi + s_c[56]*X*X*X*X*X*X + s_c[57]*X*X*X*X*X*Y + s_c[58]*X*X*X*X*X*phase2pi + s_c[59]*X*X*X*X*Y*Y + s_c[60]*X*X*X*X*Y*phase2pi + s_c[61]*X*X*X*X*phase2pi*phase2pi + s_c[62]*X*X*X*Y*Y*Y + s_c[63]*X*X*X*Y*Y*phase2pi + s_c[64]*X*X*X*Y*phase2pi*phase2pi + s_c[65]*X*X*X*phase2pi*phase2pi*phase2pi + s_c[66]*X*X*Y*Y*Y*Y + s_c[67]*X*X*Y*Y*Y*phase2pi + s_c[68]*X*X*Y*Y*phase2pi*phase2pi + s_c[69]*X*X*Y*phase2pi*phase2pi*phase2pi + s_c[70]*X*X*phase2pi*phase2pi*phase2pi*phase2pi + s_c[71]*X*Y*Y*Y*Y*Y + s_c[72]*X*Y*Y*Y*Y*phase2pi + s_c[73]*X*Y*Y*Y*phase2pi*phase2pi + s_c[74]*X*Y*Y*phase2pi*phase2pi*phase2pi + s_c[75]*X*Y*phase2pi*phase2pi*phase2pi*phase2pi + s_c[76]*X*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi + s_c[77]*Y*Y*Y*Y*Y*Y + s_c[78]*Y*Y*Y*Y*Y*phase2pi + s_c[79]*Y*Y*Y*Y*phase2pi*phase2pi + s_c[80]*Y*Y*Y*phase2pi*phase2pi*phase2pi + s_c[81]*Y*Y*phase2pi*phase2pi*phase2pi*phase2pi + s_c[82]*Y*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi + s_c[83]*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi + s_c[84]*X*X*X*X*X*X*X + s_c[85]*X*X*X*X*X*X*Y + s_c[86]*X*X*X*X*X*X*phase2pi + s_c[87]*X*X*X*X*X*Y*Y + s_c[88]*X*X*X*X*X*Y*phase2pi + s_c[89]*X*X*X*X*X*phase2pi*phase2pi + s_c[90]*X*X*X*X*Y*Y*Y + s_c[91]*X*X*X*X*Y*Y*phase2pi + s_c[92]*X*X*X*X*Y*phase2pi*phase2pi + s_c[93]*X*X*X*X*phase2pi*phase2pi*phase2pi + s_c[94]*X*X*X*Y*Y*Y*Y + s_c[95]*X*X*X*Y*Y*Y*phase2pi + s_c[96]*X*X*X*Y*Y*phase2pi*phase2pi + s_c[97]*X*X*X*Y*phase2pi*phase2pi*phase2pi + s_c[98]*X*X*X*phase2pi*phase2pi*phase2pi*phase2pi + s_c[99]*X*X*Y*Y*Y*Y*Y + s_c[100]*X*X*Y*Y*Y*Y*phase2pi + s_c[101]*X*X*Y*Y*Y*phase2pi*phase2pi + s_c[102]*X*X*Y*Y*phase2pi*phase2pi*phase2pi + s_c[103]*X*X*Y*phase2pi*phase2pi*phase2pi*phase2pi + s_c[104]*X*X*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi + s_c[105]*X*Y*Y*Y*Y*Y*Y + s_c[106]*X*Y*Y*Y*Y*Y*phase2pi + s_c[107]*X*Y*Y*Y*Y*phase2pi*phase2pi + s_c[108]*X*Y*Y*Y*phase2pi*phase2pi*phase2pi + s_c[109]*X*Y*Y*phase2pi*phase2pi*phase2pi*phase2pi + s_c[110]*X*Y*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi + s_c[111]*X*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi + s_c[112]*Y*Y*Y*Y*Y*Y*Y + s_c[113]*Y*Y*Y*Y*Y*Y*phase2pi + s_c[114]*Y*Y*Y*Y*Y*phase2pi*phase2pi + s_c[115]*Y*Y*Y*Y*phase2pi*phase2pi*phase2pi + s_c[116]*Y*Y*Y*phase2pi*phase2pi*phase2pi*phase2pi + s_c[117]*Y*Y*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi + s_c[118]*Y*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi + s_c[119]*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi);
case 84:
return (unsigned char)(s_c[0] + s_c[1]*X + s_c[2]*Y + s_c[3]*phase2pi + s_c[4]*X*X + s_c[5]*X*Y + s_c[6]*X*phase2pi + s_c[7]*Y*Y + s_c[8]*Y*phase2pi + s_c[9]*phase2pi*phase2pi + s_c[10]*X*X*X + s_c[11]*X*X*Y + s_c[12]*X*X*phase2pi + s_c[13]*X*Y*Y + s_c[14]*X*Y*phase2pi + s_c[15]*X*phase2pi*phase2pi + s_c[16]*Y*Y*Y + s_c[17]*Y*Y*phase2pi + s_c[18]*Y*phase2pi*phase2pi + s_c[19]*phase2pi*phase2pi*phase2pi + s_c[20]*X*X*X*X + s_c[21]*X*X*X*Y + s_c[22]*X*X*X*phase2pi + s_c[23]*X*X*Y*Y + s_c[24]*X*X*Y*phase2pi + s_c[25]*X*X*phase2pi*phase2pi + s_c[26]*X*Y*Y*Y + s_c[27]*X*Y*Y*phase2pi + s_c[28]*X*Y*phase2pi*phase2pi + s_c[29]*X*phase2pi*phase2pi*phase2pi + s_c[30]*Y*Y*Y*Y + s_c[31]*Y*Y*Y*phase2pi + s_c[32]*Y*Y*phase2pi*phase2pi + s_c[33]*Y*phase2pi*phase2pi*phase2pi + s_c[34]*phase2pi*phase2pi*phase2pi*phase2pi + s_c[35]*X*X*X*X*X + s_c[36]*X*X*X*X*Y + s_c[37]*X*X*X*X*phase2pi + s_c[38]*X*X*X*Y*Y + s_c[39]*X*X*X*Y*phase2pi + s_c[40]*X*X*X*phase2pi*phase2pi + s_c[41]*X*X*Y*Y*Y + s_c[42]*X*X*Y*Y*phase2pi + s_c[43]*X*X*Y*phase2pi*phase2pi + s_c[44]*X*X*phase2pi*phase2pi*phase2pi + s_c[45]*X*Y*Y*Y*Y + s_c[46]*X*Y*Y*Y*phase2pi + s_c[47]*X*Y*Y*phase2pi*phase2pi + s_c[48]*X*Y*phase2pi*phase2pi*phase2pi + s_c[49]*X*phase2pi*phase2pi*phase2pi*phase2pi + s_c[50]*Y*Y*Y*Y*Y + s_c[51]*Y*Y*Y*Y*phase2pi + s_c[52]*Y*Y*Y*phase2pi*phase2pi + s_c[53]*Y*Y*phase2pi*phase2pi*phase2pi + s_c[54]*Y*phase2pi*phase2pi*phase2pi*phase2pi + s_c[55]*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi + s_c[56]*X*X*X*X*X*X + s_c[57]*X*X*X*X*X*Y + s_c[58]*X*X*X*X*X*phase2pi + s_c[59]*X*X*X*X*Y*Y + s_c[60]*X*X*X*X*Y*phase2pi + s_c[61]*X*X*X*X*phase2pi*phase2pi + s_c[62]*X*X*X*Y*Y*Y + s_c[63]*X*X*X*Y*Y*phase2pi + s_c[64]*X*X*X*Y*phase2pi*phase2pi + s_c[65]*X*X*X*phase2pi*phase2pi*phase2pi + s_c[66]*X*X*Y*Y*Y*Y + s_c[67]*X*X*Y*Y*Y*phase2pi + s_c[68]*X*X*Y*Y*phase2pi*phase2pi + s_c[69]*X*X*Y*phase2pi*phase2pi*phase2pi + s_c[70]*X*X*phase2pi*phase2pi*phase2pi*phase2pi + s_c[71]*X*Y*Y*Y*Y*Y + s_c[72]*X*Y*Y*Y*Y*phase2pi + s_c[73]*X*Y*Y*Y*phase2pi*phase2pi + s_c[74]*X*Y*Y*phase2pi*phase2pi*phase2pi + s_c[75]*X*Y*phase2pi*phase2pi*phase2pi*phase2pi + s_c[76]*X*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi + s_c[77]*Y*Y*Y*Y*Y*Y + s_c[78]*Y*Y*Y*Y*Y*phase2pi + s_c[79]*Y*Y*Y*Y*phase2pi*phase2pi + s_c[80]*Y*Y*Y*phase2pi*phase2pi*phase2pi + s_c[81]*Y*Y*phase2pi*phase2pi*phase2pi*phase2pi + s_c[82]*Y*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi + s_c[83]*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi);
case 56:
return (unsigned char)(s_c[0] + s_c[1]*X + s_c[2]*Y + s_c[3]*phase2pi + s_c[4]*X*X + s_c[5]*X*Y + s_c[6]*X*phase2pi + s_c[7]*Y*Y + s_c[8]*Y*phase2pi + s_c[9]*phase2pi*phase2pi + s_c[10]*X*X*X + s_c[11]*X*X*Y + s_c[12]*X*X*phase2pi + s_c[13]*X*Y*Y + s_c[14]*X*Y*phase2pi + s_c[15]*X*phase2pi*phase2pi + s_c[16]*Y*Y*Y + s_c[17]*Y*Y*phase2pi + s_c[18]*Y*phase2pi*phase2pi + s_c[19]*phase2pi*phase2pi*phase2pi + s_c[20]*X*X*X*X + s_c[21]*X*X*X*Y + s_c[22]*X*X*X*phase2pi + s_c[23]*X*X*Y*Y + s_c[24]*X*X*Y*phase2pi + s_c[25]*X*X*phase2pi*phase2pi + s_c[26]*X*Y*Y*Y + s_c[27]*X*Y*Y*phase2pi + s_c[28]*X*Y*phase2pi*phase2pi + s_c[29]*X*phase2pi*phase2pi*phase2pi + s_c[30]*Y*Y*Y*Y + s_c[31]*Y*Y*Y*phase2pi + s_c[32]*Y*Y*phase2pi*phase2pi + s_c[33]*Y*phase2pi*phase2pi*phase2pi + s_c[34]*phase2pi*phase2pi*phase2pi*phase2pi + s_c[35]*X*X*X*X*X + s_c[36]*X*X*X*X*Y + s_c[37]*X*X*X*X*phase2pi + s_c[38]*X*X*X*Y*Y + s_c[39]*X*X*X*Y*phase2pi + s_c[40]*X*X*X*phase2pi*phase2pi + s_c[41]*X*X*Y*Y*Y + s_c[42]*X*X*Y*Y*phase2pi + s_c[43]*X*X*Y*phase2pi*phase2pi + s_c[44]*X*X*phase2pi*phase2pi*phase2pi + s_c[45]*X*Y*Y*Y*Y + s_c[46]*X*Y*Y*Y*phase2pi + s_c[47]*X*Y*Y*phase2pi*phase2pi + s_c[48]*X*Y*phase2pi*phase2pi*phase2pi + s_c[49]*X*phase2pi*phase2pi*phase2pi*phase2pi + s_c[50]*Y*Y*Y*Y*Y + s_c[51]*Y*Y*Y*Y*phase2pi + s_c[52]*Y*Y*Y*phase2pi*phase2pi + s_c[53]*Y*Y*phase2pi*phase2pi*phase2pi + s_c[54]*Y*phase2pi*phase2pi*phase2pi*phase2pi + s_c[55]*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi);
case 35:
return (unsigned char)(s_c[0] + s_c[1]*X + s_c[2]*Y + s_c[3]*phase2pi + s_c[4]*X*X + s_c[5]*X*Y + s_c[6]*X*phase2pi + s_c[7]*Y*Y + s_c[8]*Y*phase2pi + s_c[9]*phase2pi*phase2pi + s_c[10]*X*X*X + s_c[11]*X*X*Y + s_c[12]*X*X*phase2pi + s_c[13]*X*Y*Y + s_c[14]*X*Y*phase2pi + s_c[15]*X*phase2pi*phase2pi + s_c[16]*Y*Y*Y + s_c[17]*Y*Y*phase2pi + s_c[18]*Y*phase2pi*phase2pi + s_c[19]*phase2pi*phase2pi*phase2pi + s_c[20]*X*X*X*X + s_c[21]*X*X*X*Y + s_c[22]*X*X*X*phase2pi + s_c[23]*X*X*Y*Y + s_c[24]*X*X*Y*phase2pi + s_c[25]*X*X*phase2pi*phase2pi + s_c[26]*X*Y*Y*Y + s_c[27]*X*Y*Y*phase2pi + s_c[28]*X*Y*phase2pi*phase2pi + s_c[29]*X*phase2pi*phase2pi*phase2pi + s_c[30]*Y*Y*Y*Y + s_c[31]*Y*Y*Y*phase2pi + s_c[32]*Y*Y*phase2pi*phase2pi + s_c[33]*Y*phase2pi*phase2pi*phase2pi + s_c[34]*phase2pi*phase2pi*phase2pi*phase2pi);
case 20:
return (unsigned char)(s_c[0] + s_c[1]*X + s_c[2]*Y + s_c[3]*phase2pi + s_c[4]*X*X + s_c[5]*X*Y + s_c[6]*X*phase2pi + s_c[7]*Y*Y + s_c[8]*Y*phase2pi + s_c[9]*phase2pi*phase2pi + s_c[10]*X*X*X + s_c[11]*X*X*Y + s_c[12]*X*X*phase2pi + s_c[13]*X*Y*Y + s_c[14]*X*Y*phase2pi + s_c[15]*X*phase2pi*phase2pi + s_c[16]*Y*Y*Y + s_c[17]*Y*Y*phase2pi + s_c[18]*Y*phase2pi*phase2pi + s_c[19]*phase2pi*phase2pi*phase2pi);
default:
return 0;
}
}
__device__ int getYint(int index, int X_int)
{
#ifdef SLMPOW2
int Y_int = (index-X_int)>>c_log2data_w[0];
#else
int Y_int = (float)(floor((float)index/c_data_w_f[0]));
#endif
return Y_int;
}
__device__ int getXint(int index)
{
#ifdef SLMPOW2
int X_int = index&(c_data_w[0]-1);
#else
float X_int= index%c_data_w[0];
#endif
return X_int;
}
__device__ float ApplyAberrationCorrection(float pSpot, float correction)
{
pSpot = pSpot - correction; //apply correction
return (pSpot - (2.0f*M_PI) * floor((pSpot+M_PI) / (2.0f*M_PI))); //apply mod([-pi, pi], pSpot)
}
__device__ int phase2int32(float phase2pi)
{
return (int)floor((phase2pi + M_PI)*256.0f / (2.0f * M_PI));
}
__device__ unsigned char phase2uc(float phase2pi)
{
return (unsigned char)floor((phase2pi + M_PI)*256.0f / (2.0f * M_PI));
}
__device__ float uc2phase(float uc)
{
return (float)uc*2.0f*M_PI/256.0f - M_PI;
}
__global__ void ApplyCorrections(unsigned char *g_pSLM_uc, unsigned char *g_LUT, float *g_AberrationCorr_f, float *g_LUTPolCoeff_f)
{
int tid = threadIdx.x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float pSLM2pi_f = uc2phase(g_pSLM_uc[idx]);
if (c_useAberrationCorr_b[0])
pSLM2pi_f = ApplyAberrationCorrection(pSLM2pi_f, g_AberrationCorr_f[idx]);
if (c_usePolLUT_b[0])
{
int X_int = getXint(idx);
int Y_int = getYint(idx, X_int);
float X = c_SLMpitch_f[0]*(X_int - c_half_w_f[0]);
float Y = c_SLMpitch_f[0]*(Y_int - c_half_w_f[0]);
__shared__ float s_LUTcoeff[120];
if (tid < c_N_PolLUTCoeff[0])
s_LUTcoeff[tid] = g_LUTPolCoeff_f[tid];
__syncthreads();
g_pSLM_uc[idx] = applyPolLUT(pSLM2pi_f, X, Y, s_LUTcoeff);
}
else if (c_applyLUT_b[0])
{
__shared__ unsigned char s_LUT[256];
if (tid < 256)
s_LUT[tid] = g_LUT[tid];
__syncthreads();
g_pSLM_uc[idx] = s_LUT[phase2int32(pSLM2pi_f)];
}
else
g_pSLM_uc[idx] = phase2uc(pSLM2pi_f);
} | f81b50cb433afddb18dea93c9b7e36707283cf95.cu | #include "includes.h"
////////////////////////////////////////////////////////////////////////////////
/*
Hologram generating algorithms for CUDA Devices
Copyright 2009, 2010, 2011, 2012 Martin Persson
[email protected]
This file is part of GenerateHologramCUDA.
GenerateHologramCUDA is free software: you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
GenerateHologramCUDA is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with GenerateHologramCUDA. If not, see <http://www.gnu.org/licenses/>.
*/
///////////////////////////////////////////////////////////////////////////////////
//The function "GenerateHologram" contains two different algorithms for
//hologram generation. The last parameter in the function call selects which
//one to use:
//0: Complex addition of "Lenses and Prisms", no optimization (3D)
//1: Weighted Gerchberg-Saxton algorithm using Fresnel propagation (3D)
//2: Weighted Gerchberg-Saxton algorithm using Fast Fourier Transforms (2D)
//-(0) produces optimal holograms for 1 or 2 traps and is significantly faster.
// (0) is automatically selected if the number of spots is < 3.
////////////////////////////////////////////////////////////////////////////////
//Fresnel propagation based algorithm (1) described in:
//Roberto Di Leonardo, Francesca Ianni, and Giancarlo Ruocco
//"Computer generation of optimal holograms for optical trap arrays"
//Opt. Express 15, 1913-1922 (2007)
//
//The original algorithm has been modified to allow variable spot amplitudes
////////////////////////////////////////////////////////////////////////////////
//Naming convention for variables:
//-The prefix indicates where data is located
//--In host functions: h = host memory
// d = device memory
// c = constant memory
//--In global functions: g = global memory
// s = shared memory
// c = constant memory
// no prefix = registers
//-The suffix indicates the data type, no suffix usually indicates an iteger
////////////////////////////////////////////////////////////////////////////////
//Possible improvements:
//-Improve convergence of the GS algorithms for 2 spots. *done
//-Compensate spot intensities for distance from center of field. *done
//-Put all arguments for device functions and trap positions in constant memory. *done
// (Requires all functions to be moved into the same file or the use of some
// workaround found on nVidia forum)
//-Put pSLMstart and aLaser in texture memory (may not improve performance on Fermi devices)
//-Use "zero-copy" to transfer pSLM to host.
//-Rename functions and variables for consistency and readability
//-Allow variable spot phases for Lenses and Prisms
////////////////////////////////////////////////////////////////////////////////
//#define M_CUDA_DEBUG //activates a number of custom debug macros//
float dt_milliseconds;
cudaEvent_t start, stop;
////////////////////////////////////////////////////////////////////////////////
//Includes
////////////////////////////////////////////////////////////////////////////////
#ifndef M_PI
#define M_PI 3.14159265358979323846f
#endif
#define MAX_SPOTS 1024 //decrease this if your GPU keeps running out of memory
#define BLOCK_SIZE 256 //should be a power of 2
#define SLM_SIZE 512
#if ((SLM_SIZE==16)||(SLM_SIZE==32)||(SLM_SIZE==64)||(SLM_SIZE==128)||(SLM_SIZE==256)||(SLM_SIZE==512)||(SLM_SIZE==1024)||(SLM_SIZE==2048))
#define SLMPOW2 //Uses bitwise modulu operations if the SLM size is a power of 2
#endif
////////////////////////////////////////////////////////////////////////////////
// forward declarations
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//Set correction parameters
////////////////////////////////////////////////////////////////////////////////
__device__ unsigned char applyPolLUT(float phase2pi, float X, float Y, float *s_c)
{
switch (c_N_PolLUTCoeff[0]) {
case 120:
return (unsigned char)(s_c[0] + s_c[1]*X + s_c[2]*Y + s_c[3]*phase2pi + s_c[4]*X*X + s_c[5]*X*Y + s_c[6]*X*phase2pi + s_c[7]*Y*Y + s_c[8]*Y*phase2pi + s_c[9]*phase2pi*phase2pi + s_c[10]*X*X*X + s_c[11]*X*X*Y + s_c[12]*X*X*phase2pi + s_c[13]*X*Y*Y + s_c[14]*X*Y*phase2pi + s_c[15]*X*phase2pi*phase2pi + s_c[16]*Y*Y*Y + s_c[17]*Y*Y*phase2pi + s_c[18]*Y*phase2pi*phase2pi + s_c[19]*phase2pi*phase2pi*phase2pi + s_c[20]*X*X*X*X + s_c[21]*X*X*X*Y + s_c[22]*X*X*X*phase2pi + s_c[23]*X*X*Y*Y + s_c[24]*X*X*Y*phase2pi + s_c[25]*X*X*phase2pi*phase2pi + s_c[26]*X*Y*Y*Y + s_c[27]*X*Y*Y*phase2pi + s_c[28]*X*Y*phase2pi*phase2pi + s_c[29]*X*phase2pi*phase2pi*phase2pi + s_c[30]*Y*Y*Y*Y + s_c[31]*Y*Y*Y*phase2pi + s_c[32]*Y*Y*phase2pi*phase2pi + s_c[33]*Y*phase2pi*phase2pi*phase2pi + s_c[34]*phase2pi*phase2pi*phase2pi*phase2pi + s_c[35]*X*X*X*X*X + s_c[36]*X*X*X*X*Y + s_c[37]*X*X*X*X*phase2pi + s_c[38]*X*X*X*Y*Y + s_c[39]*X*X*X*Y*phase2pi + s_c[40]*X*X*X*phase2pi*phase2pi + s_c[41]*X*X*Y*Y*Y + s_c[42]*X*X*Y*Y*phase2pi + s_c[43]*X*X*Y*phase2pi*phase2pi + s_c[44]*X*X*phase2pi*phase2pi*phase2pi + s_c[45]*X*Y*Y*Y*Y + s_c[46]*X*Y*Y*Y*phase2pi + s_c[47]*X*Y*Y*phase2pi*phase2pi + s_c[48]*X*Y*phase2pi*phase2pi*phase2pi + s_c[49]*X*phase2pi*phase2pi*phase2pi*phase2pi + s_c[50]*Y*Y*Y*Y*Y + s_c[51]*Y*Y*Y*Y*phase2pi + s_c[52]*Y*Y*Y*phase2pi*phase2pi + s_c[53]*Y*Y*phase2pi*phase2pi*phase2pi + s_c[54]*Y*phase2pi*phase2pi*phase2pi*phase2pi + s_c[55]*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi + s_c[56]*X*X*X*X*X*X + s_c[57]*X*X*X*X*X*Y + s_c[58]*X*X*X*X*X*phase2pi + s_c[59]*X*X*X*X*Y*Y + s_c[60]*X*X*X*X*Y*phase2pi + s_c[61]*X*X*X*X*phase2pi*phase2pi + s_c[62]*X*X*X*Y*Y*Y + s_c[63]*X*X*X*Y*Y*phase2pi + s_c[64]*X*X*X*Y*phase2pi*phase2pi + s_c[65]*X*X*X*phase2pi*phase2pi*phase2pi + s_c[66]*X*X*Y*Y*Y*Y + s_c[67]*X*X*Y*Y*Y*phase2pi + s_c[68]*X*X*Y*Y*phase2pi*phase2pi + s_c[69]*X*X*Y*phase2pi*phase2pi*phase2pi + s_c[70]*X*X*phase2pi*phase2pi*phase2pi*phase2pi + s_c[71]*X*Y*Y*Y*Y*Y + s_c[72]*X*Y*Y*Y*Y*phase2pi + s_c[73]*X*Y*Y*Y*phase2pi*phase2pi + s_c[74]*X*Y*Y*phase2pi*phase2pi*phase2pi + s_c[75]*X*Y*phase2pi*phase2pi*phase2pi*phase2pi + s_c[76]*X*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi + s_c[77]*Y*Y*Y*Y*Y*Y + s_c[78]*Y*Y*Y*Y*Y*phase2pi + s_c[79]*Y*Y*Y*Y*phase2pi*phase2pi + s_c[80]*Y*Y*Y*phase2pi*phase2pi*phase2pi + s_c[81]*Y*Y*phase2pi*phase2pi*phase2pi*phase2pi + s_c[82]*Y*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi + s_c[83]*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi + s_c[84]*X*X*X*X*X*X*X + s_c[85]*X*X*X*X*X*X*Y + s_c[86]*X*X*X*X*X*X*phase2pi + s_c[87]*X*X*X*X*X*Y*Y + s_c[88]*X*X*X*X*X*Y*phase2pi + s_c[89]*X*X*X*X*X*phase2pi*phase2pi + s_c[90]*X*X*X*X*Y*Y*Y + s_c[91]*X*X*X*X*Y*Y*phase2pi + s_c[92]*X*X*X*X*Y*phase2pi*phase2pi + s_c[93]*X*X*X*X*phase2pi*phase2pi*phase2pi + s_c[94]*X*X*X*Y*Y*Y*Y + s_c[95]*X*X*X*Y*Y*Y*phase2pi + s_c[96]*X*X*X*Y*Y*phase2pi*phase2pi + s_c[97]*X*X*X*Y*phase2pi*phase2pi*phase2pi + s_c[98]*X*X*X*phase2pi*phase2pi*phase2pi*phase2pi + s_c[99]*X*X*Y*Y*Y*Y*Y + s_c[100]*X*X*Y*Y*Y*Y*phase2pi + s_c[101]*X*X*Y*Y*Y*phase2pi*phase2pi + s_c[102]*X*X*Y*Y*phase2pi*phase2pi*phase2pi + s_c[103]*X*X*Y*phase2pi*phase2pi*phase2pi*phase2pi + s_c[104]*X*X*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi + s_c[105]*X*Y*Y*Y*Y*Y*Y + s_c[106]*X*Y*Y*Y*Y*Y*phase2pi + s_c[107]*X*Y*Y*Y*Y*phase2pi*phase2pi + s_c[108]*X*Y*Y*Y*phase2pi*phase2pi*phase2pi + s_c[109]*X*Y*Y*phase2pi*phase2pi*phase2pi*phase2pi + s_c[110]*X*Y*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi + s_c[111]*X*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi + s_c[112]*Y*Y*Y*Y*Y*Y*Y + s_c[113]*Y*Y*Y*Y*Y*Y*phase2pi + s_c[114]*Y*Y*Y*Y*Y*phase2pi*phase2pi + s_c[115]*Y*Y*Y*Y*phase2pi*phase2pi*phase2pi + s_c[116]*Y*Y*Y*phase2pi*phase2pi*phase2pi*phase2pi + s_c[117]*Y*Y*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi + s_c[118]*Y*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi + s_c[119]*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi);
case 84:
return (unsigned char)(s_c[0] + s_c[1]*X + s_c[2]*Y + s_c[3]*phase2pi + s_c[4]*X*X + s_c[5]*X*Y + s_c[6]*X*phase2pi + s_c[7]*Y*Y + s_c[8]*Y*phase2pi + s_c[9]*phase2pi*phase2pi + s_c[10]*X*X*X + s_c[11]*X*X*Y + s_c[12]*X*X*phase2pi + s_c[13]*X*Y*Y + s_c[14]*X*Y*phase2pi + s_c[15]*X*phase2pi*phase2pi + s_c[16]*Y*Y*Y + s_c[17]*Y*Y*phase2pi + s_c[18]*Y*phase2pi*phase2pi + s_c[19]*phase2pi*phase2pi*phase2pi + s_c[20]*X*X*X*X + s_c[21]*X*X*X*Y + s_c[22]*X*X*X*phase2pi + s_c[23]*X*X*Y*Y + s_c[24]*X*X*Y*phase2pi + s_c[25]*X*X*phase2pi*phase2pi + s_c[26]*X*Y*Y*Y + s_c[27]*X*Y*Y*phase2pi + s_c[28]*X*Y*phase2pi*phase2pi + s_c[29]*X*phase2pi*phase2pi*phase2pi + s_c[30]*Y*Y*Y*Y + s_c[31]*Y*Y*Y*phase2pi + s_c[32]*Y*Y*phase2pi*phase2pi + s_c[33]*Y*phase2pi*phase2pi*phase2pi + s_c[34]*phase2pi*phase2pi*phase2pi*phase2pi + s_c[35]*X*X*X*X*X + s_c[36]*X*X*X*X*Y + s_c[37]*X*X*X*X*phase2pi + s_c[38]*X*X*X*Y*Y + s_c[39]*X*X*X*Y*phase2pi + s_c[40]*X*X*X*phase2pi*phase2pi + s_c[41]*X*X*Y*Y*Y + s_c[42]*X*X*Y*Y*phase2pi + s_c[43]*X*X*Y*phase2pi*phase2pi + s_c[44]*X*X*phase2pi*phase2pi*phase2pi + s_c[45]*X*Y*Y*Y*Y + s_c[46]*X*Y*Y*Y*phase2pi + s_c[47]*X*Y*Y*phase2pi*phase2pi + s_c[48]*X*Y*phase2pi*phase2pi*phase2pi + s_c[49]*X*phase2pi*phase2pi*phase2pi*phase2pi + s_c[50]*Y*Y*Y*Y*Y + s_c[51]*Y*Y*Y*Y*phase2pi + s_c[52]*Y*Y*Y*phase2pi*phase2pi + s_c[53]*Y*Y*phase2pi*phase2pi*phase2pi + s_c[54]*Y*phase2pi*phase2pi*phase2pi*phase2pi + s_c[55]*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi + s_c[56]*X*X*X*X*X*X + s_c[57]*X*X*X*X*X*Y + s_c[58]*X*X*X*X*X*phase2pi + s_c[59]*X*X*X*X*Y*Y + s_c[60]*X*X*X*X*Y*phase2pi + s_c[61]*X*X*X*X*phase2pi*phase2pi + s_c[62]*X*X*X*Y*Y*Y + s_c[63]*X*X*X*Y*Y*phase2pi + s_c[64]*X*X*X*Y*phase2pi*phase2pi + s_c[65]*X*X*X*phase2pi*phase2pi*phase2pi + s_c[66]*X*X*Y*Y*Y*Y + s_c[67]*X*X*Y*Y*Y*phase2pi + s_c[68]*X*X*Y*Y*phase2pi*phase2pi + s_c[69]*X*X*Y*phase2pi*phase2pi*phase2pi + s_c[70]*X*X*phase2pi*phase2pi*phase2pi*phase2pi + s_c[71]*X*Y*Y*Y*Y*Y + s_c[72]*X*Y*Y*Y*Y*phase2pi + s_c[73]*X*Y*Y*Y*phase2pi*phase2pi + s_c[74]*X*Y*Y*phase2pi*phase2pi*phase2pi + s_c[75]*X*Y*phase2pi*phase2pi*phase2pi*phase2pi + s_c[76]*X*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi + s_c[77]*Y*Y*Y*Y*Y*Y + s_c[78]*Y*Y*Y*Y*Y*phase2pi + s_c[79]*Y*Y*Y*Y*phase2pi*phase2pi + s_c[80]*Y*Y*Y*phase2pi*phase2pi*phase2pi + s_c[81]*Y*Y*phase2pi*phase2pi*phase2pi*phase2pi + s_c[82]*Y*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi + s_c[83]*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi);
case 56:
return (unsigned char)(s_c[0] + s_c[1]*X + s_c[2]*Y + s_c[3]*phase2pi + s_c[4]*X*X + s_c[5]*X*Y + s_c[6]*X*phase2pi + s_c[7]*Y*Y + s_c[8]*Y*phase2pi + s_c[9]*phase2pi*phase2pi + s_c[10]*X*X*X + s_c[11]*X*X*Y + s_c[12]*X*X*phase2pi + s_c[13]*X*Y*Y + s_c[14]*X*Y*phase2pi + s_c[15]*X*phase2pi*phase2pi + s_c[16]*Y*Y*Y + s_c[17]*Y*Y*phase2pi + s_c[18]*Y*phase2pi*phase2pi + s_c[19]*phase2pi*phase2pi*phase2pi + s_c[20]*X*X*X*X + s_c[21]*X*X*X*Y + s_c[22]*X*X*X*phase2pi + s_c[23]*X*X*Y*Y + s_c[24]*X*X*Y*phase2pi + s_c[25]*X*X*phase2pi*phase2pi + s_c[26]*X*Y*Y*Y + s_c[27]*X*Y*Y*phase2pi + s_c[28]*X*Y*phase2pi*phase2pi + s_c[29]*X*phase2pi*phase2pi*phase2pi + s_c[30]*Y*Y*Y*Y + s_c[31]*Y*Y*Y*phase2pi + s_c[32]*Y*Y*phase2pi*phase2pi + s_c[33]*Y*phase2pi*phase2pi*phase2pi + s_c[34]*phase2pi*phase2pi*phase2pi*phase2pi + s_c[35]*X*X*X*X*X + s_c[36]*X*X*X*X*Y + s_c[37]*X*X*X*X*phase2pi + s_c[38]*X*X*X*Y*Y + s_c[39]*X*X*X*Y*phase2pi + s_c[40]*X*X*X*phase2pi*phase2pi + s_c[41]*X*X*Y*Y*Y + s_c[42]*X*X*Y*Y*phase2pi + s_c[43]*X*X*Y*phase2pi*phase2pi + s_c[44]*X*X*phase2pi*phase2pi*phase2pi + s_c[45]*X*Y*Y*Y*Y + s_c[46]*X*Y*Y*Y*phase2pi + s_c[47]*X*Y*Y*phase2pi*phase2pi + s_c[48]*X*Y*phase2pi*phase2pi*phase2pi + s_c[49]*X*phase2pi*phase2pi*phase2pi*phase2pi + s_c[50]*Y*Y*Y*Y*Y + s_c[51]*Y*Y*Y*Y*phase2pi + s_c[52]*Y*Y*Y*phase2pi*phase2pi + s_c[53]*Y*Y*phase2pi*phase2pi*phase2pi + s_c[54]*Y*phase2pi*phase2pi*phase2pi*phase2pi + s_c[55]*phase2pi*phase2pi*phase2pi*phase2pi*phase2pi);
case 35:
return (unsigned char)(s_c[0] + s_c[1]*X + s_c[2]*Y + s_c[3]*phase2pi + s_c[4]*X*X + s_c[5]*X*Y + s_c[6]*X*phase2pi + s_c[7]*Y*Y + s_c[8]*Y*phase2pi + s_c[9]*phase2pi*phase2pi + s_c[10]*X*X*X + s_c[11]*X*X*Y + s_c[12]*X*X*phase2pi + s_c[13]*X*Y*Y + s_c[14]*X*Y*phase2pi + s_c[15]*X*phase2pi*phase2pi + s_c[16]*Y*Y*Y + s_c[17]*Y*Y*phase2pi + s_c[18]*Y*phase2pi*phase2pi + s_c[19]*phase2pi*phase2pi*phase2pi + s_c[20]*X*X*X*X + s_c[21]*X*X*X*Y + s_c[22]*X*X*X*phase2pi + s_c[23]*X*X*Y*Y + s_c[24]*X*X*Y*phase2pi + s_c[25]*X*X*phase2pi*phase2pi + s_c[26]*X*Y*Y*Y + s_c[27]*X*Y*Y*phase2pi + s_c[28]*X*Y*phase2pi*phase2pi + s_c[29]*X*phase2pi*phase2pi*phase2pi + s_c[30]*Y*Y*Y*Y + s_c[31]*Y*Y*Y*phase2pi + s_c[32]*Y*Y*phase2pi*phase2pi + s_c[33]*Y*phase2pi*phase2pi*phase2pi + s_c[34]*phase2pi*phase2pi*phase2pi*phase2pi);
case 20:
return (unsigned char)(s_c[0] + s_c[1]*X + s_c[2]*Y + s_c[3]*phase2pi + s_c[4]*X*X + s_c[5]*X*Y + s_c[6]*X*phase2pi + s_c[7]*Y*Y + s_c[8]*Y*phase2pi + s_c[9]*phase2pi*phase2pi + s_c[10]*X*X*X + s_c[11]*X*X*Y + s_c[12]*X*X*phase2pi + s_c[13]*X*Y*Y + s_c[14]*X*Y*phase2pi + s_c[15]*X*phase2pi*phase2pi + s_c[16]*Y*Y*Y + s_c[17]*Y*Y*phase2pi + s_c[18]*Y*phase2pi*phase2pi + s_c[19]*phase2pi*phase2pi*phase2pi);
default:
return 0;
}
}
__device__ int getYint(int index, int X_int)
{
#ifdef SLMPOW2
int Y_int = (index-X_int)>>c_log2data_w[0];
#else
int Y_int = (float)(floor((float)index/c_data_w_f[0]));
#endif
return Y_int;
}
__device__ int getXint(int index)
{
#ifdef SLMPOW2
int X_int = index&(c_data_w[0]-1);
#else
float X_int= index%c_data_w[0];
#endif
return X_int;
}
__device__ float ApplyAberrationCorrection(float pSpot, float correction)
{
pSpot = pSpot - correction; //apply correction
return (pSpot - (2.0f*M_PI) * floor((pSpot+M_PI) / (2.0f*M_PI))); //apply mod([-pi, pi], pSpot)
}
__device__ int phase2int32(float phase2pi)
{
return (int)floor((phase2pi + M_PI)*256.0f / (2.0f * M_PI));
}
__device__ unsigned char phase2uc(float phase2pi)
{
return (unsigned char)floor((phase2pi + M_PI)*256.0f / (2.0f * M_PI));
}
__device__ float uc2phase(float uc)
{
return (float)uc*2.0f*M_PI/256.0f - M_PI;
}
__global__ void ApplyCorrections(unsigned char *g_pSLM_uc, unsigned char *g_LUT, float *g_AberrationCorr_f, float *g_LUTPolCoeff_f)
{
int tid = threadIdx.x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float pSLM2pi_f = uc2phase(g_pSLM_uc[idx]);
if (c_useAberrationCorr_b[0])
pSLM2pi_f = ApplyAberrationCorrection(pSLM2pi_f, g_AberrationCorr_f[idx]);
if (c_usePolLUT_b[0])
{
int X_int = getXint(idx);
int Y_int = getYint(idx, X_int);
float X = c_SLMpitch_f[0]*(X_int - c_half_w_f[0]);
float Y = c_SLMpitch_f[0]*(Y_int - c_half_w_f[0]);
__shared__ float s_LUTcoeff[120];
if (tid < c_N_PolLUTCoeff[0])
s_LUTcoeff[tid] = g_LUTPolCoeff_f[tid];
__syncthreads();
g_pSLM_uc[idx] = applyPolLUT(pSLM2pi_f, X, Y, s_LUTcoeff);
}
else if (c_applyLUT_b[0])
{
__shared__ unsigned char s_LUT[256];
if (tid < 256)
s_LUT[tid] = g_LUT[tid];
__syncthreads();
g_pSLM_uc[idx] = s_LUT[phase2int32(pSLM2pi_f)];
}
else
g_pSLM_uc[idx] = phase2uc(pSLM2pi_f);
} |
ff0e56878bfce0767cc2442ab473348992beeaba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef IUSPARSECOMPARE_CU
#define IUSPARSECOMPARE_CU
#include <list>
#include <iostream>
#include <iudefs.h>
#include <iucore.h>
#include <iucutil.h>
#include <iumath.h>
#include <iusparse.h>
// textures
texture<float, 2, hipReadModeElementType> rof_tex_u;
texture<float, 2, hipReadModeElementType> rof_tex_u_;
texture<float, 2, hipReadModeElementType> rof_tex_f;
texture<float2, 2, hipReadModeElementType> rof_tex_p;
texture<float2, 2, hipReadModeElementType> rof_tex_gradient;
texture<float, 2, hipReadModeElementType> rof_tex_divergence;
const unsigned int BSX=32;
const unsigned int BSY=16;
////////////////////////////////////////////////////////////////////////////////
void bindTexture(texture<float, 2>& tex, iu::ImageGpu_32f_C1* mem)
{
tex.addressMode[0] = hipAddressModeClamp; // Neumann Boundary Conditions
tex.addressMode[1] = hipAddressModeClamp; // Neumann Boundary Conditions
tex.filterMode = hipFilterModeLinear;
tex.normalized = false;
hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float>();
( hipBindTexture2D( 0, &tex, mem->data(), &channel_desc,
mem->width(), mem->height(), mem->pitch()));
}
////////////////////////////////////////////////////////////////////////////////
void bindTexture(texture<float2, 2>& tex, iu::ImageGpu_32f_C2* mem)
{
tex.addressMode[0] = hipAddressModeClamp; // Neumann Boundary Conditions
tex.addressMode[1] = hipAddressModeClamp; // Neumann Boundary Conditions
tex.filterMode = hipFilterModeLinear;
tex.normalized = false;
hipChannelFormatDesc channel_desc_float2 = hipCreateChannelDesc<float2>();
( hipBindTexture2D( 0, &tex, mem->data(), &channel_desc_float2,
mem->width(), mem->height(), mem->pitch()));
}
////////////////////////////////////////////////////////////////////////////////
//############################################################################//
//OOOOOOOOOOOOOOOOOOOOOOOOOOO STANDARD MODEL OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO//
//############################################################################//
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
__global__ void update_primal(float* device_u, float* device_u_,
float tau, float theta, float lambda,
int width, int height, int xstride)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int c = y*xstride + x;
if(x<width && y<height)
{
// texture fetches
float f = tex2D(rof_tex_f, x+0.5f, y+0.5f);
float u = tex2D(rof_tex_u, x+0.5f, y+0.5f);
float u_ = u;
float divergence = dp_ad(rof_tex_p, x, y, width, height);
// update primal variable
u = (u + tau*(divergence + lambda*f))/(1.0f+tau*lambda);
device_u[c] = u;
device_u_[c] = u + theta*(u-u_);
}
}
////////////////////////////////////////////////////////////////////////////////
__global__ void update_dual(float2* device_p, float sigma,
int width, int height, int stride)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if(x<width && y<height)
{
float2 p = tex2D(rof_tex_p, x+0.5f, y+0.5f);
float2 grad_u = dp(rof_tex_u_, x, y);
// update dual variable
p = p + sigma*grad_u;
p = p/max(1.0f, length(p));
device_p[y*stride + x] = p;
}
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
void rof_primal_dual(iu::ImageGpu_32f_C1* device_f, iu::ImageGpu_32f_C1* device_u,
iu::ImageGpu_32f_C1* device_u_, iu::ImageGpu_32f_C2* device_p,
float lambda, int max_iter)
{
int width = device_f->width();
int height = device_f->height();
// fragmentation
int nb_x = width/BSX;
int nb_y = height/BSY;
if (nb_x*BSX < width) nb_x++;
if (nb_y*BSY < height) nb_y++;
dim3 dimBlock(BSX,BSY);
dim3 dimGrid(nb_x,nb_y);
bindTexture(rof_tex_f, device_f);
bindTexture(rof_tex_u, device_u);
bindTexture(rof_tex_u_, device_u_);
bindTexture(rof_tex_p, device_p);
float L = sqrtf(8.0f);
float tau = 1/L;
float sigma = 1/L;
float theta;
for (int k = 0; k < max_iter; ++k)
{
hipLaunchKernelGGL(( update_dual), dim3(dimGrid), dim3(dimBlock), 0, 0, device_p->data(), sigma,
width, height, device_p->stride());
if (sigma < 1000.0f)
theta = 1/sqrtf(1.0f+0.7f*lambda*tau);
else
theta = 1.0f;
hipLaunchKernelGGL(( update_primal), dim3(dimGrid), dim3(dimBlock), 0, 0, device_u->data(), device_u_->data(),
tau, theta, lambda, width, height,
device_u->stride());
sigma /= theta;
tau *= theta;
}
iu::checkCudaErrorState(__FILE__, __FUNCTION__, __LINE__);
}
////////////////////////////////////////////////////////////////////////////////
//############################################################################//
//OOOOOOOOOOOOOOOOOOOOOOOOOOOOO NO TEXTURE OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO//
//############################################################################//
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
__global__ void update_primal_notex(float* device_u, float* device_u_,
float* device_f, float2* device_p,
float tau, float theta, float lambda,
int width, int height, int xstride, int xstride2)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int c = y*xstride + x;
if(x<width && y<height)
{
// texture fetches
float f = device_f[c];
float u = device_u[c];
float u_ = u;
float2 cval = device_p[y*xstride2 + x];
float2 wval = device_p[y*xstride2 + max(0,x-1)];
float2 nval = device_p[max(0,y-1)*xstride2 + x];
if (x == 0)
wval.x = 0.0f;
else if (x >= width-1)
cval.x = 0.0f;
if (y == 0)
nval.y = 0.0f;
else if (y >= height-1)
cval.y = 0.0f;
float divergence = cval.x - wval.x + cval.y - nval.y;
// update primal variable
u = (u + tau*(divergence + lambda*f))/(1.0f+tau*lambda);
device_u[c] = u;
device_u_[c] = u + theta*(u-u_);
}
}
////////////////////////////////////////////////////////////////////////////////
__global__ void update_dual_notex(float2* device_p, float* device_u_, float sigma,
int width, int height, int xstride, int xstride2)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int c2 = y*xstride2 + x;
if(x<width && y<height)
{
float2 p = device_p[c2];
float2 grad_u = make_float2(0.0f, 0.0f);
float cval = device_u_[y*xstride + x];
grad_u.x = device_u_[y*xstride + min(width-1,x+1)] - cval;
grad_u.y = device_u_[min(height-1,y+1)*xstride + x] - cval;
// update dual variable
p = p + sigma*grad_u;
p = p/max(1.0f, length(p));
device_p[c2] = p;
}
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
void rof_primal_dual_notex(iu::ImageGpu_32f_C1* device_f, iu::ImageGpu_32f_C1* device_u,
iu::ImageGpu_32f_C1* device_u_, iu::ImageGpu_32f_C2* device_p,
float lambda, int max_iter)
{
int width = device_f->width();
int height = device_f->height();
// fragmentation
int nb_x = width/BSX;
int nb_y = height/BSY;
if (nb_x*BSX < width) nb_x++;
if (nb_y*BSY < height) nb_y++;
dim3 dimBlock(BSX,BSY);
dim3 dimGrid(nb_x,nb_y);
float L = sqrtf(8.0f);
float tau = 1/L;
float sigma = 1/L;
float theta;
hipFuncSetCacheConfig("_Z17update_dual_notexP6float2Pffiiii", hipFuncCachePreferShared);
iu::checkCudaErrorState(__FILE__, __FUNCTION__, __LINE__);
hipFuncSetCacheConfig("_Z19update_primal_notexPfS_S_P6float2fffiiii", hipFuncCachePreferShared);
iu::checkCudaErrorState(__FILE__, __FUNCTION__, __LINE__);
for (int k = 0; k < max_iter; ++k)
{
hipLaunchKernelGGL(( update_dual_notex), dim3(dimGrid), dim3(dimBlock), 0, 0, device_p->data(), device_u_->data(),
sigma, width, height,
device_u_->stride(), device_p->stride());
if (sigma < 1000.0f)
theta = 1/sqrtf(1.0f+0.7f*lambda*tau);
else
theta = 1.0f;
hipLaunchKernelGGL(( update_primal_notex), dim3(dimGrid), dim3(dimBlock), 0, 0, device_u->data(), device_u_->data(),
device_f->data(), device_p->data(),
tau, theta, lambda, width, height,
device_u->stride(), device_p->stride());
sigma /= theta;
tau *= theta;
}
iu::checkCudaErrorState(__FILE__, __FUNCTION__, __LINE__);
}
////////////////////////////////////////////////////////////////////////////////
//############################################################################//
//OOOOOOOOOOOOOOOOOOOOOOOOOOO SPARSE MATRIX OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO//
//############################################################################//
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
__global__ void update_primal_sparse(float* device_u, float* device_u_,
float tau, float theta, float lambda,
int width, int height, int xstride)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int c = y*xstride + x;
if(x<width && y<height)
{
// texture fetches
float u = tex2D(rof_tex_u, x+0.5f, y+0.5f);
float u_ = u;
// update primal variable
u = (u + tau*(-tex2D(rof_tex_divergence, x+0.5f, y+0.5f) + lambda*tex2D(rof_tex_f, x+0.5f, y+0.5f)))/(1.0f+tau*lambda);
device_u[c] = u;
device_u_[c] = u + theta*(u-u_);
}
}
////////////////////////////////////////////////////////////////////////////////
__global__ void update_dual_sparse(float2* device_p, float sigma,
int width, int height, int stride)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if(x<width && y<height)
{
float2 p = tex2D(rof_tex_p, x+0.5f, y+0.5f) + sigma*tex2D(rof_tex_gradient, x+0.5f, y+0.5f);
device_p[y*stride + x] = p/max(1.0f, length(p));
}
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
void rof_primal_dual_sparse(iu::SparseMatrixGpu_32f* G,
iu::ImageGpu_32f_C1* device_f, iu::ImageGpu_32f_C1* device_u,
iu::ImageGpu_32f_C1* device_u_, iu::ImageGpu_32f_C2* device_p,
float lambda, int max_iter)
{
int width = device_f->width();
int height = device_f->height();
// fragmentation
int nb_x = width/BSX;
int nb_y = height/BSY;
if (nb_x*BSX < width) nb_x++;
if (nb_y*BSY < height) nb_y++;
dim3 dimBlock(BSX,BSY);
dim3 dimGrid(nb_x,nb_y);
iu::ImageGpu_32f_C2 gradient(width, height);
iu::ImageGpu_32f_C1 divergence(width, height);
float L = sqrtf(8.0f);
float tau = 1/L;
float sigma = 1/L;
float theta;
for (int k = 0; k < max_iter; ++k)
{
// Gradient
iu::sparseMultiplication(G->handle(), G, device_u_, &gradient);
// Dual update
bindTexture(rof_tex_p, device_p);
bindTexture(rof_tex_gradient, &gradient);
hipLaunchKernelGGL(( update_dual_sparse), dim3(dimGrid), dim3(dimBlock) , 0, 0, device_p->data(), sigma,
width, height, device_p->stride());
hipUnbindTexture(&rof_tex_p);
hipUnbindTexture(&rof_tex_gradient);
// Update Timesteps
if (sigma < 1000.0f)
theta = 1/sqrtf(1.0f+0.7f*lambda*tau);
else
theta = 1.0f;
// Divergence
iu::sparseMultiplication(G->handle(), G, device_p, &divergence, HIPSPARSE_OPERATION_TRANSPOSE);
// Primal update
bindTexture(rof_tex_f, device_f);
bindTexture(rof_tex_u, device_u);
bindTexture(rof_tex_divergence, &divergence);
hipLaunchKernelGGL(( update_primal_sparse), dim3(dimGrid), dim3(dimBlock) , 0, 0, device_u->data(), device_u_->data(),
tau, theta, lambda, width, height,
device_u->stride());
hipUnbindTexture(&rof_tex_f);
hipUnbindTexture(&rof_tex_u);
hipUnbindTexture(&rof_tex_divergence);
// Update Timesteps
sigma /= theta;
tau *= theta;
}
iu::checkCudaErrorState(__FILE__, __FUNCTION__, __LINE__);
}
////////////////////////////////////////////////////////////////////////////////
//############################################################################//
//OOOOOOOOOOOOOOOOOOOOOOOOOOO SHARED MEMORY OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO//
//############################################################################//
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
__global__ void update_primal_shared(float* device_u, float* device_u_,
float tau, float theta, float lambda,
int width, int height, int xstride)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int c = y*xstride + x;
if(x<width && y<height)
{
// texture fetches
float f = tex2D(rof_tex_f, x+0.5f, y+0.5f);
float u = tex2D(rof_tex_u, x+0.5f, y+0.5f);
float u_ = u;
__shared__ float2 p[BSX+1][BSY+1];
p[threadIdx.x+1][threadIdx.y+1] = tex2D(rof_tex_p, x+0.5f, y+0.5f);
if (threadIdx.x==0)
{
p[threadIdx.x][threadIdx.y+1] = tex2D(rof_tex_p, x-0.5f, y+0.5f);
if (x==0)
p[threadIdx.x][threadIdx.y+1].x = 0.0f;
}
if (threadIdx.y==0)
{
p[threadIdx.x+1][threadIdx.y] = tex2D(rof_tex_p, x+0.5f, y-0.5f);
if (y==0)
p[threadIdx.x+1][threadIdx.y].y = 0.0f;
}
if (x >= width-1)
p[threadIdx.x+1][threadIdx.y+1].x = 0.0f;
if (y >= height-1)
p[threadIdx.x+1][threadIdx.y+1].y = 0.0f;
// syncthreads();
float divergence = p[threadIdx.x+1][threadIdx.y+1].x - p[threadIdx.x][threadIdx.y+1].x +
p[threadIdx.x+1][threadIdx.y+1].y - p[threadIdx.x+1][threadIdx.y].y;
// update primal variable
u = (u + tau*(divergence + lambda*f))/(1.0f+tau*lambda);
device_u[c] = u;
device_u_[c] = u + theta*(u-u_);
}
}
////////////////////////////////////////////////////////////////////////////////
__global__ void update_dual_shared(float2* device_p, float sigma,
int width, int height, int stride)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if(x<width && y<height)
{
float2 p = tex2D(rof_tex_p, x+0.5f, y+0.5f);
__shared__ float u_[BSX+1][BSY+1];
u_[threadIdx.x][threadIdx.y] = tex2D(rof_tex_u_, x+0.5f, y+0.5f);
if (threadIdx.x==BSX-1)
u_[threadIdx.x+1][threadIdx.y] = tex2D(rof_tex_u_, x+1.5f, y+0.5f);
if (threadIdx.y==BSY-1)
u_[threadIdx.x][threadIdx.y+1] = tex2D(rof_tex_u_, x+0.5f, y+1.5f);
// syncthreads();
float2 grad_u = make_float2(u_[threadIdx.x+1][threadIdx.y] - u_[threadIdx.x][threadIdx.y],
u_[threadIdx.x][threadIdx.y+1] - u_[threadIdx.x][threadIdx.y] );
// update dual variable
p = p + sigma*grad_u;
p = p/max(1.0f, length(p));
device_p[y*stride + x] = p;
}
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
void rof_primal_dual_shared(iu::ImageGpu_32f_C1* device_f, iu::ImageGpu_32f_C1* device_u,
iu::ImageGpu_32f_C1* device_u_, iu::ImageGpu_32f_C2* device_p,
float lambda, int max_iter)
{
int width = device_f->width();
int height = device_f->height();
// fragmentation
int nb_x = width/BSX;
int nb_y = height/BSY;
if (nb_x*BSX < width) nb_x++;
if (nb_y*BSY < height) nb_y++;
dim3 dimBlock(BSX,BSY);
dim3 dimGrid(nb_x,nb_y);
bindTexture(rof_tex_f, device_f);
bindTexture(rof_tex_u, device_u);
bindTexture(rof_tex_u_, device_u_);
bindTexture(rof_tex_p, device_p);
float L = sqrtf(8.0f);
float tau = 1/L;
float sigma = 1/L;
float theta;
for (int k = 0; k < max_iter; ++k)
{
hipLaunchKernelGGL(( update_dual_shared), dim3(dimGrid), dim3(dimBlock), 0, 0, device_p->data(), sigma,
width, height, device_p->stride());
if (sigma < 1000.0f)
theta = 1/sqrtf(1.0f+0.7f*lambda*tau);
else
theta = 1.0f;
hipLaunchKernelGGL(( update_primal_shared), dim3(dimGrid), dim3(dimBlock), 0, 0, device_u->data(), device_u_->data(),
tau, theta, lambda, width, height,
device_u->stride());
sigma /= theta;
tau *= theta;
}
iu::checkCudaErrorState(__FILE__, __FUNCTION__, __LINE__);
}
////////////////////////////////////////////////////////////////////////////////
//############################################################################//
//OOOOOOOOOOOOOOOOOOOOOOOO SINGLE SHARED MEMORY OOOOOOOOOOOOOOOOOOOOOOOOOOOOO//
//############################################################################//
////////////////////////////////////////////////////////////////////////////////
const unsigned int BSX_SINGLE=16;
const unsigned int BSY_SINGLE=12;
////////////////////////////////////////////////////////////////////////////////
__global__ void update_shared_single(float* device_u, float2* device_p,
float tau, float theta, float sigma, float lambda,
int width, int height, int stride1, int stride2)
{
int x = blockIdx.x*(blockDim.x-1) + threadIdx.x;
int y = blockIdx.y*(blockDim.y-1) + threadIdx.y;
__shared__ float u_[BSX_SINGLE][BSY_SINGLE];
float2 p = tex2D(rof_tex_p, x+0.5f, y+0.5f);
float2 pwval = tex2D(rof_tex_p, x-0.5f, y+0.5f);
float2 pnval = tex2D(rof_tex_p, x+0.5f, y-0.5f);
if (x == 0)
pwval.x = 0.0f;
else if (x >= width-1)
p.x = 0.0f;
if (y == 0)
pnval.y = 0.0f;
else if (y >= height-1)
p.y = 0.0f;
float f = tex2D(rof_tex_f, x+0.5f, y+0.5f);
float u = tex2D(rof_tex_u, x+0.5f, y+0.5f);
// Remember old u
u_[threadIdx.x][threadIdx.y] = u;
// Primal update
u = (u + tau*((p.x - pwval.x + p.y - pnval.y) + lambda*f))/(1.0f+tau*lambda);
// Overrelaxation
u_[threadIdx.x][threadIdx.y] = u + theta*(u-u_[threadIdx.x][threadIdx.y]);
__syncthreads();
if(x<width && y<height)
{
if ( (threadIdx.x < BSX_SINGLE-1) && (threadIdx.y < BSY_SINGLE-1) )
{
// Dual Update
float2 grad_u = make_float2(u_[threadIdx.x+1][threadIdx.y] - u_[threadIdx.x][threadIdx.y],
u_[threadIdx.x][threadIdx.y+1] - u_[threadIdx.x][threadIdx.y] );
p = p + sigma*grad_u;
p = p/max(1.0f, length(p));
// Write Back
device_p[y*stride2 + x] = p;
device_u[y*stride1 + x] = u;
}
}
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
void rof_primal_dual_shared_single(iu::ImageGpu_32f_C1* device_f, iu::ImageGpu_32f_C1* device_u,
iu::ImageGpu_32f_C2* device_p,
float lambda, int max_iter)
{
int width = device_f->width();
int height = device_f->height();
// fragmentation
int nb_x = width/(BSX_SINGLE-1);
int nb_y = height/(BSY_SINGLE-1);
if (nb_x*(BSX_SINGLE-1) < width) nb_x++;
if (nb_y*(BSY_SINGLE-1) < height) nb_y++;
dim3 dimBlock(BSX_SINGLE,BSY_SINGLE);
dim3 dimGrid(nb_x,nb_y);
bindTexture(rof_tex_f, device_f);
bindTexture(rof_tex_u, device_u);
bindTexture(rof_tex_p, device_p);
float L = sqrtf(8.0f);
float tau = 1/L;
float sigma = 1/L;
float theta;
for (int k = 0; k < max_iter; ++k)
{
if (sigma < 1000.0f)
theta = 1/sqrtf(1.0f+0.7f*lambda*tau);
else
theta = 1.0f;
hipLaunchKernelGGL(( update_shared_single), dim3(dimGrid), dim3(dimBlock), 0, 0, device_u->data(), device_p->data(),
tau, theta, sigma, lambda, width, height,
device_u->stride(), device_p->stride());
sigma /= theta;
tau *= theta;
}
iu::checkCudaErrorState(__FILE__, __FUNCTION__, __LINE__);
}
////////////////////////////////////////////////////////////////////////////////
//############################################################################//
//OOOOOOOOOOOOOOOOOOOOOO TWO SINGLE SHARED MEMORY OOOOOOOOOOOOOOOOOOOOOOOOOOO//
//############################################################################//
////////////////////////////////////////////////////////////////////////////////
const unsigned int BSX_SINGLE2=16;
const unsigned int BSY_SINGLE2=10;
////////////////////////////////////////////////////////////////////////////////
__global__ void update_shared_single2(float* device_u, float2* device_p,
float tau, float theta, float sigma, float lambda,
int width, int height, int stride1, int stride2)
{
int x = blockIdx.x*(blockDim.x-3) + threadIdx.x - 1;
int y = blockIdx.y*(blockDim.y-3) + threadIdx.y - 1;
__shared__ float u_[BSX_SINGLE2][BSY_SINGLE2];
__shared__ float2 p[BSX_SINGLE2][BSY_SINGLE2];
float2 pc = tex2D(rof_tex_p, x+0.5f, y+0.5f);
float2 pwval = tex2D(rof_tex_p, x-0.5f, y+0.5f);
float2 pnval = tex2D(rof_tex_p, x+0.5f, y-0.5f);
if (x == 0)
pwval.x = 0.0f;
else if (x >= width-1)
pc.x = 0.0f;
if (y == 0)
pnval.y = 0.0f;
else if (y >= height-1)
pc.y = 0.0f;
float f = tex2D(rof_tex_f, x+0.5f, y+0.5f);
float u = tex2D(rof_tex_u, x+0.5f, y+0.5f);
// Remember old u
u_[threadIdx.x][threadIdx.y] = u;
// Primal update
u = (u + tau*((pc.x - pwval.x + pc.y - pnval.y) + lambda*f))/(1.0f+tau*lambda);
// Overrelaxation
u_[threadIdx.x][threadIdx.y] = u + theta*(u-u_[threadIdx.x][threadIdx.y]);
__syncthreads();
if ( (threadIdx.x < BSX_SINGLE2-1) && (threadIdx.y < BSY_SINGLE2-1) )
{
// Dual Update
float2 grad_u = make_float2(u_[threadIdx.x+1][threadIdx.y] - u_[threadIdx.x][threadIdx.y],
u_[threadIdx.x][threadIdx.y+1] - u_[threadIdx.x][threadIdx.y] );
pc = pc + sigma*grad_u;
p[threadIdx.x][threadIdx.y] = pc/max(1.0f, length(pc));
}
__syncthreads();
if (threadIdx.x>0 && threadIdx.y>0)
{
// Remember old u
u_[threadIdx.x][threadIdx.y] = u;
// Primal update
float div = p[threadIdx.x][threadIdx.y].x - p[threadIdx.x-1][threadIdx.y].x +
p[threadIdx.x][threadIdx.y].y - p[threadIdx.x][threadIdx.y-1].y;
u = (u + tau*(div + lambda*f))/(1.0f+tau*lambda);
// Overrelaxation
u_[threadIdx.x][threadIdx.y] = u + theta*(u-u_[threadIdx.x][threadIdx.y]);
}
__syncthreads();
if(x<width && y<height)
{
if ( (threadIdx.x < BSX_SINGLE2-2) && (threadIdx.y < BSY_SINGLE2-2) )
{
// Dual Update
float2 grad_u = make_float2(u_[threadIdx.x+1][threadIdx.y] - u_[threadIdx.x][threadIdx.y],
u_[threadIdx.x][threadIdx.y+1] - u_[threadIdx.x][threadIdx.y] );
pc = p[threadIdx.x][threadIdx.y] + sigma*grad_u;
p[threadIdx.x][threadIdx.y] = pc/max(1.0f, length(pc));
if (threadIdx.x>0 && threadIdx.y>0)
{
// Write Back
device_p[y*stride2 + x] = p[threadIdx.x][threadIdx.y];
device_u[y*stride1 + x] = u;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
void rof_primal_dual_shared_single2(iu::ImageGpu_32f_C1* device_f, iu::ImageGpu_32f_C1* device_u,
iu::ImageGpu_32f_C2* device_p,
float lambda, int max_iter)
{
int width = device_f->width();
int height = device_f->height();
// fragmentation
int nb_x = width/(BSX_SINGLE2-3);
int nb_y = height/(BSY_SINGLE2-3);
if (nb_x*(BSX_SINGLE2-3) < width) nb_x++;
if (nb_y*(BSY_SINGLE2-3) < height) nb_y++;
dim3 dimBlock(BSX_SINGLE2,BSY_SINGLE2);
dim3 dimGrid(nb_x,nb_y);
bindTexture(rof_tex_f, device_f);
bindTexture(rof_tex_u, device_u);
bindTexture(rof_tex_p, device_p);
float L = sqrtf(8.0f);
float tau = 1/L;
float sigma = 1/L;
float theta;
for (int k = 0; k < max_iter; k+=2)
{
if (sigma < 1000.0f)
theta = 1/sqrtf(1.0f+0.7f*lambda*tau);
else
theta = 1.0f;
hipLaunchKernelGGL(( update_shared_single2), dim3(dimGrid), dim3(dimBlock), 0, 0, device_u->data(), device_p->data(),
tau, theta, sigma, lambda, width, height,
device_u->stride(), device_p->stride());
sigma /= theta;
tau *= theta;
}
iu::checkCudaErrorState(__FILE__, __FUNCTION__, __LINE__);
}
#endif // IUSPARSECOMPARE_CU
| ff0e56878bfce0767cc2442ab473348992beeaba.cu | #ifndef IUSPARSECOMPARE_CU
#define IUSPARSECOMPARE_CU
#include <list>
#include <iostream>
#include <iudefs.h>
#include <iucore.h>
#include <iucutil.h>
#include <iumath.h>
#include <iusparse.h>
// textures
texture<float, 2, cudaReadModeElementType> rof_tex_u;
texture<float, 2, cudaReadModeElementType> rof_tex_u_;
texture<float, 2, cudaReadModeElementType> rof_tex_f;
texture<float2, 2, cudaReadModeElementType> rof_tex_p;
texture<float2, 2, cudaReadModeElementType> rof_tex_gradient;
texture<float, 2, cudaReadModeElementType> rof_tex_divergence;
const unsigned int BSX=32;
const unsigned int BSY=16;
////////////////////////////////////////////////////////////////////////////////
void bindTexture(texture<float, 2>& tex, iu::ImageGpu_32f_C1* mem)
{
tex.addressMode[0] = cudaAddressModeClamp; // Neumann Boundary Conditions
tex.addressMode[1] = cudaAddressModeClamp; // Neumann Boundary Conditions
tex.filterMode = cudaFilterModeLinear;
tex.normalized = false;
cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float>();
( cudaBindTexture2D( 0, &tex, mem->data(), &channel_desc,
mem->width(), mem->height(), mem->pitch()));
}
////////////////////////////////////////////////////////////////////////////////
void bindTexture(texture<float2, 2>& tex, iu::ImageGpu_32f_C2* mem)
{
tex.addressMode[0] = cudaAddressModeClamp; // Neumann Boundary Conditions
tex.addressMode[1] = cudaAddressModeClamp; // Neumann Boundary Conditions
tex.filterMode = cudaFilterModeLinear;
tex.normalized = false;
cudaChannelFormatDesc channel_desc_float2 = cudaCreateChannelDesc<float2>();
( cudaBindTexture2D( 0, &tex, mem->data(), &channel_desc_float2,
mem->width(), mem->height(), mem->pitch()));
}
////////////////////////////////////////////////////////////////////////////////
//############################################################################//
//OOOOOOOOOOOOOOOOOOOOOOOOOOO STANDARD MODEL OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO//
//############################################################################//
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
__global__ void update_primal(float* device_u, float* device_u_,
float tau, float theta, float lambda,
int width, int height, int xstride)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int c = y*xstride + x;
if(x<width && y<height)
{
// texture fetches
float f = tex2D(rof_tex_f, x+0.5f, y+0.5f);
float u = tex2D(rof_tex_u, x+0.5f, y+0.5f);
float u_ = u;
float divergence = dp_ad(rof_tex_p, x, y, width, height);
// update primal variable
u = (u + tau*(divergence + lambda*f))/(1.0f+tau*lambda);
device_u[c] = u;
device_u_[c] = u + theta*(u-u_);
}
}
////////////////////////////////////////////////////////////////////////////////
__global__ void update_dual(float2* device_p, float sigma,
int width, int height, int stride)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if(x<width && y<height)
{
float2 p = tex2D(rof_tex_p, x+0.5f, y+0.5f);
float2 grad_u = dp(rof_tex_u_, x, y);
// update dual variable
p = p + sigma*grad_u;
p = p/max(1.0f, length(p));
device_p[y*stride + x] = p;
}
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
void rof_primal_dual(iu::ImageGpu_32f_C1* device_f, iu::ImageGpu_32f_C1* device_u,
iu::ImageGpu_32f_C1* device_u_, iu::ImageGpu_32f_C2* device_p,
float lambda, int max_iter)
{
int width = device_f->width();
int height = device_f->height();
// fragmentation
int nb_x = width/BSX;
int nb_y = height/BSY;
if (nb_x*BSX < width) nb_x++;
if (nb_y*BSY < height) nb_y++;
dim3 dimBlock(BSX,BSY);
dim3 dimGrid(nb_x,nb_y);
bindTexture(rof_tex_f, device_f);
bindTexture(rof_tex_u, device_u);
bindTexture(rof_tex_u_, device_u_);
bindTexture(rof_tex_p, device_p);
float L = sqrtf(8.0f);
float tau = 1/L;
float sigma = 1/L;
float theta;
for (int k = 0; k < max_iter; ++k)
{
update_dual<<<dimGrid, dimBlock>>>(device_p->data(), sigma,
width, height, device_p->stride());
if (sigma < 1000.0f)
theta = 1/sqrtf(1.0f+0.7f*lambda*tau);
else
theta = 1.0f;
update_primal<<<dimGrid, dimBlock>>>(device_u->data(), device_u_->data(),
tau, theta, lambda, width, height,
device_u->stride());
sigma /= theta;
tau *= theta;
}
iu::checkCudaErrorState(__FILE__, __FUNCTION__, __LINE__);
}
////////////////////////////////////////////////////////////////////////////////
//############################################################################//
//OOOOOOOOOOOOOOOOOOOOOOOOOOOOO NO TEXTURE OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO//
//############################################################################//
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
__global__ void update_primal_notex(float* device_u, float* device_u_,
float* device_f, float2* device_p,
float tau, float theta, float lambda,
int width, int height, int xstride, int xstride2)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int c = y*xstride + x;
if(x<width && y<height)
{
// texture fetches
float f = device_f[c];
float u = device_u[c];
float u_ = u;
float2 cval = device_p[y*xstride2 + x];
float2 wval = device_p[y*xstride2 + max(0,x-1)];
float2 nval = device_p[max(0,y-1)*xstride2 + x];
if (x == 0)
wval.x = 0.0f;
else if (x >= width-1)
cval.x = 0.0f;
if (y == 0)
nval.y = 0.0f;
else if (y >= height-1)
cval.y = 0.0f;
float divergence = cval.x - wval.x + cval.y - nval.y;
// update primal variable
u = (u + tau*(divergence + lambda*f))/(1.0f+tau*lambda);
device_u[c] = u;
device_u_[c] = u + theta*(u-u_);
}
}
////////////////////////////////////////////////////////////////////////////////
__global__ void update_dual_notex(float2* device_p, float* device_u_, float sigma,
int width, int height, int xstride, int xstride2)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int c2 = y*xstride2 + x;
if(x<width && y<height)
{
float2 p = device_p[c2];
float2 grad_u = make_float2(0.0f, 0.0f);
float cval = device_u_[y*xstride + x];
grad_u.x = device_u_[y*xstride + min(width-1,x+1)] - cval;
grad_u.y = device_u_[min(height-1,y+1)*xstride + x] - cval;
// update dual variable
p = p + sigma*grad_u;
p = p/max(1.0f, length(p));
device_p[c2] = p;
}
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
void rof_primal_dual_notex(iu::ImageGpu_32f_C1* device_f, iu::ImageGpu_32f_C1* device_u,
iu::ImageGpu_32f_C1* device_u_, iu::ImageGpu_32f_C2* device_p,
float lambda, int max_iter)
{
int width = device_f->width();
int height = device_f->height();
// fragmentation
int nb_x = width/BSX;
int nb_y = height/BSY;
if (nb_x*BSX < width) nb_x++;
if (nb_y*BSY < height) nb_y++;
dim3 dimBlock(BSX,BSY);
dim3 dimGrid(nb_x,nb_y);
float L = sqrtf(8.0f);
float tau = 1/L;
float sigma = 1/L;
float theta;
cudaFuncSetCacheConfig("_Z17update_dual_notexP6float2Pffiiii", cudaFuncCachePreferShared);
iu::checkCudaErrorState(__FILE__, __FUNCTION__, __LINE__);
cudaFuncSetCacheConfig("_Z19update_primal_notexPfS_S_P6float2fffiiii", cudaFuncCachePreferShared);
iu::checkCudaErrorState(__FILE__, __FUNCTION__, __LINE__);
for (int k = 0; k < max_iter; ++k)
{
update_dual_notex<<<dimGrid, dimBlock>>>(device_p->data(), device_u_->data(),
sigma, width, height,
device_u_->stride(), device_p->stride());
if (sigma < 1000.0f)
theta = 1/sqrtf(1.0f+0.7f*lambda*tau);
else
theta = 1.0f;
update_primal_notex<<<dimGrid, dimBlock>>>(device_u->data(), device_u_->data(),
device_f->data(), device_p->data(),
tau, theta, lambda, width, height,
device_u->stride(), device_p->stride());
sigma /= theta;
tau *= theta;
}
iu::checkCudaErrorState(__FILE__, __FUNCTION__, __LINE__);
}
////////////////////////////////////////////////////////////////////////////////
//############################################################################//
//OOOOOOOOOOOOOOOOOOOOOOOOOOO SPARSE MATRIX OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO//
//############################################################################//
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
__global__ void update_primal_sparse(float* device_u, float* device_u_,
float tau, float theta, float lambda,
int width, int height, int xstride)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int c = y*xstride + x;
if(x<width && y<height)
{
// texture fetches
float u = tex2D(rof_tex_u, x+0.5f, y+0.5f);
float u_ = u;
// update primal variable
u = (u + tau*(-tex2D(rof_tex_divergence, x+0.5f, y+0.5f) + lambda*tex2D(rof_tex_f, x+0.5f, y+0.5f)))/(1.0f+tau*lambda);
device_u[c] = u;
device_u_[c] = u + theta*(u-u_);
}
}
////////////////////////////////////////////////////////////////////////////////
__global__ void update_dual_sparse(float2* device_p, float sigma,
int width, int height, int stride)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if(x<width && y<height)
{
float2 p = tex2D(rof_tex_p, x+0.5f, y+0.5f) + sigma*tex2D(rof_tex_gradient, x+0.5f, y+0.5f);
device_p[y*stride + x] = p/max(1.0f, length(p));
}
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
void rof_primal_dual_sparse(iu::SparseMatrixGpu_32f* G,
iu::ImageGpu_32f_C1* device_f, iu::ImageGpu_32f_C1* device_u,
iu::ImageGpu_32f_C1* device_u_, iu::ImageGpu_32f_C2* device_p,
float lambda, int max_iter)
{
int width = device_f->width();
int height = device_f->height();
// fragmentation
int nb_x = width/BSX;
int nb_y = height/BSY;
if (nb_x*BSX < width) nb_x++;
if (nb_y*BSY < height) nb_y++;
dim3 dimBlock(BSX,BSY);
dim3 dimGrid(nb_x,nb_y);
iu::ImageGpu_32f_C2 gradient(width, height);
iu::ImageGpu_32f_C1 divergence(width, height);
float L = sqrtf(8.0f);
float tau = 1/L;
float sigma = 1/L;
float theta;
for (int k = 0; k < max_iter; ++k)
{
// Gradient
iu::sparseMultiplication(G->handle(), G, device_u_, &gradient);
// Dual update
bindTexture(rof_tex_p, device_p);
bindTexture(rof_tex_gradient, &gradient);
update_dual_sparse<<< dimGrid, dimBlock >>>(device_p->data(), sigma,
width, height, device_p->stride());
cudaUnbindTexture(&rof_tex_p);
cudaUnbindTexture(&rof_tex_gradient);
// Update Timesteps
if (sigma < 1000.0f)
theta = 1/sqrtf(1.0f+0.7f*lambda*tau);
else
theta = 1.0f;
// Divergence
iu::sparseMultiplication(G->handle(), G, device_p, &divergence, CUSPARSE_OPERATION_TRANSPOSE);
// Primal update
bindTexture(rof_tex_f, device_f);
bindTexture(rof_tex_u, device_u);
bindTexture(rof_tex_divergence, &divergence);
update_primal_sparse<<< dimGrid, dimBlock >>>(device_u->data(), device_u_->data(),
tau, theta, lambda, width, height,
device_u->stride());
cudaUnbindTexture(&rof_tex_f);
cudaUnbindTexture(&rof_tex_u);
cudaUnbindTexture(&rof_tex_divergence);
// Update Timesteps
sigma /= theta;
tau *= theta;
}
iu::checkCudaErrorState(__FILE__, __FUNCTION__, __LINE__);
}
////////////////////////////////////////////////////////////////////////////////
//############################################################################//
//OOOOOOOOOOOOOOOOOOOOOOOOOOO SHARED MEMORY OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO//
//############################################################################//
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
__global__ void update_primal_shared(float* device_u, float* device_u_,
float tau, float theta, float lambda,
int width, int height, int xstride)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int c = y*xstride + x;
if(x<width && y<height)
{
// texture fetches
float f = tex2D(rof_tex_f, x+0.5f, y+0.5f);
float u = tex2D(rof_tex_u, x+0.5f, y+0.5f);
float u_ = u;
__shared__ float2 p[BSX+1][BSY+1];
p[threadIdx.x+1][threadIdx.y+1] = tex2D(rof_tex_p, x+0.5f, y+0.5f);
if (threadIdx.x==0)
{
p[threadIdx.x][threadIdx.y+1] = tex2D(rof_tex_p, x-0.5f, y+0.5f);
if (x==0)
p[threadIdx.x][threadIdx.y+1].x = 0.0f;
}
if (threadIdx.y==0)
{
p[threadIdx.x+1][threadIdx.y] = tex2D(rof_tex_p, x+0.5f, y-0.5f);
if (y==0)
p[threadIdx.x+1][threadIdx.y].y = 0.0f;
}
if (x >= width-1)
p[threadIdx.x+1][threadIdx.y+1].x = 0.0f;
if (y >= height-1)
p[threadIdx.x+1][threadIdx.y+1].y = 0.0f;
// syncthreads();
float divergence = p[threadIdx.x+1][threadIdx.y+1].x - p[threadIdx.x][threadIdx.y+1].x +
p[threadIdx.x+1][threadIdx.y+1].y - p[threadIdx.x+1][threadIdx.y].y;
// update primal variable
u = (u + tau*(divergence + lambda*f))/(1.0f+tau*lambda);
device_u[c] = u;
device_u_[c] = u + theta*(u-u_);
}
}
////////////////////////////////////////////////////////////////////////////////
__global__ void update_dual_shared(float2* device_p, float sigma,
int width, int height, int stride)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if(x<width && y<height)
{
float2 p = tex2D(rof_tex_p, x+0.5f, y+0.5f);
__shared__ float u_[BSX+1][BSY+1];
u_[threadIdx.x][threadIdx.y] = tex2D(rof_tex_u_, x+0.5f, y+0.5f);
if (threadIdx.x==BSX-1)
u_[threadIdx.x+1][threadIdx.y] = tex2D(rof_tex_u_, x+1.5f, y+0.5f);
if (threadIdx.y==BSY-1)
u_[threadIdx.x][threadIdx.y+1] = tex2D(rof_tex_u_, x+0.5f, y+1.5f);
// syncthreads();
float2 grad_u = make_float2(u_[threadIdx.x+1][threadIdx.y] - u_[threadIdx.x][threadIdx.y],
u_[threadIdx.x][threadIdx.y+1] - u_[threadIdx.x][threadIdx.y] );
// update dual variable
p = p + sigma*grad_u;
p = p/max(1.0f, length(p));
device_p[y*stride + x] = p;
}
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
void rof_primal_dual_shared(iu::ImageGpu_32f_C1* device_f, iu::ImageGpu_32f_C1* device_u,
iu::ImageGpu_32f_C1* device_u_, iu::ImageGpu_32f_C2* device_p,
float lambda, int max_iter)
{
int width = device_f->width();
int height = device_f->height();
// fragmentation
int nb_x = width/BSX;
int nb_y = height/BSY;
if (nb_x*BSX < width) nb_x++;
if (nb_y*BSY < height) nb_y++;
dim3 dimBlock(BSX,BSY);
dim3 dimGrid(nb_x,nb_y);
bindTexture(rof_tex_f, device_f);
bindTexture(rof_tex_u, device_u);
bindTexture(rof_tex_u_, device_u_);
bindTexture(rof_tex_p, device_p);
float L = sqrtf(8.0f);
float tau = 1/L;
float sigma = 1/L;
float theta;
for (int k = 0; k < max_iter; ++k)
{
update_dual_shared<<<dimGrid, dimBlock>>>(device_p->data(), sigma,
width, height, device_p->stride());
if (sigma < 1000.0f)
theta = 1/sqrtf(1.0f+0.7f*lambda*tau);
else
theta = 1.0f;
update_primal_shared<<<dimGrid, dimBlock>>>(device_u->data(), device_u_->data(),
tau, theta, lambda, width, height,
device_u->stride());
sigma /= theta;
tau *= theta;
}
iu::checkCudaErrorState(__FILE__, __FUNCTION__, __LINE__);
}
////////////////////////////////////////////////////////////////////////////////
//############################################################################//
//OOOOOOOOOOOOOOOOOOOOOOOO SINGLE SHARED MEMORY OOOOOOOOOOOOOOOOOOOOOOOOOOOOO//
//############################################################################//
////////////////////////////////////////////////////////////////////////////////
const unsigned int BSX_SINGLE=16;
const unsigned int BSY_SINGLE=12;
////////////////////////////////////////////////////////////////////////////////
__global__ void update_shared_single(float* device_u, float2* device_p,
float tau, float theta, float sigma, float lambda,
int width, int height, int stride1, int stride2)
{
int x = blockIdx.x*(blockDim.x-1) + threadIdx.x;
int y = blockIdx.y*(blockDim.y-1) + threadIdx.y;
__shared__ float u_[BSX_SINGLE][BSY_SINGLE];
float2 p = tex2D(rof_tex_p, x+0.5f, y+0.5f);
float2 pwval = tex2D(rof_tex_p, x-0.5f, y+0.5f);
float2 pnval = tex2D(rof_tex_p, x+0.5f, y-0.5f);
if (x == 0)
pwval.x = 0.0f;
else if (x >= width-1)
p.x = 0.0f;
if (y == 0)
pnval.y = 0.0f;
else if (y >= height-1)
p.y = 0.0f;
float f = tex2D(rof_tex_f, x+0.5f, y+0.5f);
float u = tex2D(rof_tex_u, x+0.5f, y+0.5f);
// Remember old u
u_[threadIdx.x][threadIdx.y] = u;
// Primal update
u = (u + tau*((p.x - pwval.x + p.y - pnval.y) + lambda*f))/(1.0f+tau*lambda);
// Overrelaxation
u_[threadIdx.x][threadIdx.y] = u + theta*(u-u_[threadIdx.x][threadIdx.y]);
__syncthreads();
if(x<width && y<height)
{
if ( (threadIdx.x < BSX_SINGLE-1) && (threadIdx.y < BSY_SINGLE-1) )
{
// Dual Update
float2 grad_u = make_float2(u_[threadIdx.x+1][threadIdx.y] - u_[threadIdx.x][threadIdx.y],
u_[threadIdx.x][threadIdx.y+1] - u_[threadIdx.x][threadIdx.y] );
p = p + sigma*grad_u;
p = p/max(1.0f, length(p));
// Write Back
device_p[y*stride2 + x] = p;
device_u[y*stride1 + x] = u;
}
}
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
void rof_primal_dual_shared_single(iu::ImageGpu_32f_C1* device_f, iu::ImageGpu_32f_C1* device_u,
iu::ImageGpu_32f_C2* device_p,
float lambda, int max_iter)
{
int width = device_f->width();
int height = device_f->height();
// fragmentation
int nb_x = width/(BSX_SINGLE-1);
int nb_y = height/(BSY_SINGLE-1);
if (nb_x*(BSX_SINGLE-1) < width) nb_x++;
if (nb_y*(BSY_SINGLE-1) < height) nb_y++;
dim3 dimBlock(BSX_SINGLE,BSY_SINGLE);
dim3 dimGrid(nb_x,nb_y);
bindTexture(rof_tex_f, device_f);
bindTexture(rof_tex_u, device_u);
bindTexture(rof_tex_p, device_p);
float L = sqrtf(8.0f);
float tau = 1/L;
float sigma = 1/L;
float theta;
for (int k = 0; k < max_iter; ++k)
{
if (sigma < 1000.0f)
theta = 1/sqrtf(1.0f+0.7f*lambda*tau);
else
theta = 1.0f;
update_shared_single<<<dimGrid, dimBlock>>>(device_u->data(), device_p->data(),
tau, theta, sigma, lambda, width, height,
device_u->stride(), device_p->stride());
sigma /= theta;
tau *= theta;
}
iu::checkCudaErrorState(__FILE__, __FUNCTION__, __LINE__);
}
////////////////////////////////////////////////////////////////////////////////
//############################################################################//
//OOOOOOOOOOOOOOOOOOOOOO TWO SINGLE SHARED MEMORY OOOOOOOOOOOOOOOOOOOOOOOOOOO//
//############################################################################//
////////////////////////////////////////////////////////////////////////////////
const unsigned int BSX_SINGLE2=16;
const unsigned int BSY_SINGLE2=10;
////////////////////////////////////////////////////////////////////////////////
__global__ void update_shared_single2(float* device_u, float2* device_p,
float tau, float theta, float sigma, float lambda,
int width, int height, int stride1, int stride2)
{
int x = blockIdx.x*(blockDim.x-3) + threadIdx.x - 1;
int y = blockIdx.y*(blockDim.y-3) + threadIdx.y - 1;
__shared__ float u_[BSX_SINGLE2][BSY_SINGLE2];
__shared__ float2 p[BSX_SINGLE2][BSY_SINGLE2];
float2 pc = tex2D(rof_tex_p, x+0.5f, y+0.5f);
float2 pwval = tex2D(rof_tex_p, x-0.5f, y+0.5f);
float2 pnval = tex2D(rof_tex_p, x+0.5f, y-0.5f);
if (x == 0)
pwval.x = 0.0f;
else if (x >= width-1)
pc.x = 0.0f;
if (y == 0)
pnval.y = 0.0f;
else if (y >= height-1)
pc.y = 0.0f;
float f = tex2D(rof_tex_f, x+0.5f, y+0.5f);
float u = tex2D(rof_tex_u, x+0.5f, y+0.5f);
// Remember old u
u_[threadIdx.x][threadIdx.y] = u;
// Primal update
u = (u + tau*((pc.x - pwval.x + pc.y - pnval.y) + lambda*f))/(1.0f+tau*lambda);
// Overrelaxation
u_[threadIdx.x][threadIdx.y] = u + theta*(u-u_[threadIdx.x][threadIdx.y]);
__syncthreads();
if ( (threadIdx.x < BSX_SINGLE2-1) && (threadIdx.y < BSY_SINGLE2-1) )
{
// Dual Update
float2 grad_u = make_float2(u_[threadIdx.x+1][threadIdx.y] - u_[threadIdx.x][threadIdx.y],
u_[threadIdx.x][threadIdx.y+1] - u_[threadIdx.x][threadIdx.y] );
pc = pc + sigma*grad_u;
p[threadIdx.x][threadIdx.y] = pc/max(1.0f, length(pc));
}
__syncthreads();
if (threadIdx.x>0 && threadIdx.y>0)
{
// Remember old u
u_[threadIdx.x][threadIdx.y] = u;
// Primal update
float div = p[threadIdx.x][threadIdx.y].x - p[threadIdx.x-1][threadIdx.y].x +
p[threadIdx.x][threadIdx.y].y - p[threadIdx.x][threadIdx.y-1].y;
u = (u + tau*(div + lambda*f))/(1.0f+tau*lambda);
// Overrelaxation
u_[threadIdx.x][threadIdx.y] = u + theta*(u-u_[threadIdx.x][threadIdx.y]);
}
__syncthreads();
if(x<width && y<height)
{
if ( (threadIdx.x < BSX_SINGLE2-2) && (threadIdx.y < BSY_SINGLE2-2) )
{
// Dual Update
float2 grad_u = make_float2(u_[threadIdx.x+1][threadIdx.y] - u_[threadIdx.x][threadIdx.y],
u_[threadIdx.x][threadIdx.y+1] - u_[threadIdx.x][threadIdx.y] );
pc = p[threadIdx.x][threadIdx.y] + sigma*grad_u;
p[threadIdx.x][threadIdx.y] = pc/max(1.0f, length(pc));
if (threadIdx.x>0 && threadIdx.y>0)
{
// Write Back
device_p[y*stride2 + x] = p[threadIdx.x][threadIdx.y];
device_u[y*stride1 + x] = u;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
void rof_primal_dual_shared_single2(iu::ImageGpu_32f_C1* device_f, iu::ImageGpu_32f_C1* device_u,
iu::ImageGpu_32f_C2* device_p,
float lambda, int max_iter)
{
int width = device_f->width();
int height = device_f->height();
// fragmentation
int nb_x = width/(BSX_SINGLE2-3);
int nb_y = height/(BSY_SINGLE2-3);
if (nb_x*(BSX_SINGLE2-3) < width) nb_x++;
if (nb_y*(BSY_SINGLE2-3) < height) nb_y++;
dim3 dimBlock(BSX_SINGLE2,BSY_SINGLE2);
dim3 dimGrid(nb_x,nb_y);
bindTexture(rof_tex_f, device_f);
bindTexture(rof_tex_u, device_u);
bindTexture(rof_tex_p, device_p);
float L = sqrtf(8.0f);
float tau = 1/L;
float sigma = 1/L;
float theta;
for (int k = 0; k < max_iter; k+=2)
{
if (sigma < 1000.0f)
theta = 1/sqrtf(1.0f+0.7f*lambda*tau);
else
theta = 1.0f;
update_shared_single2<<<dimGrid, dimBlock>>>(device_u->data(), device_p->data(),
tau, theta, sigma, lambda, width, height,
device_u->stride(), device_p->stride());
sigma /= theta;
tau *= theta;
}
iu::checkCudaErrorState(__FILE__, __FUNCTION__, __LINE__);
}
#endif // IUSPARSECOMPARE_CU
|
c306b41743d4cee93ed10acf8d942280b593e702.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "raytrace.h"
#include <cassert>
#include <iostream>
#include "helper_cuda.h"
#include "helper_math.h"
#include "geometry.cuh"
#include "geometry.cu"
__device__ float siddonRPL(
float3 source,
float3 dest,
float3 start,
uint3 size,
float3 spacing,
hipTextureObject_t texDens,
float stop_early=-1.f
) {
// avoid division by zero (fixes blank center pixel issue)
dest += 1e-12f;
// projection vector for this thread
float3 diff = dest - source;
////////////////////////////////////////////////////////
/* Siddon's algorithm normalizes the distance between points 1 and 2
* alpha is the current position along that normalized vector as a scalar amount
* alpha_min and alpha_max set the bounds of intersection between this vector and the volume of interest
* limits of alpha parameter - aligned to plane positions not voxel center
*/
float a_min, a_max;
float3 alpha_min, alpha_max;
{
float3 end = start + spacing*make_float3(size)-1.f;
float3 a_first, a_last;
a_first.x = (start.x - source.x - 0.5f*spacing.x) / diff.x;
a_first.y = (start.y - source.y - 0.5f*spacing.y) / diff.y;
a_first.z = (start.z - source.z - 0.5f*spacing.z) / diff.z;
a_last.x = (end.x - source.x + 0.5f*spacing.x) / diff.x;
a_last.y = (end.y - source.y + 0.5f*spacing.y) / diff.y;
a_last.z = (end.z - source.z + 0.5f*spacing.z) / diff.z;
alpha_min.x = min(a_first.x, a_last.x);
alpha_min.y = min(a_first.y, a_last.y);
alpha_min.z = min(a_first.z, a_last.z);
alpha_max.x = max(a_first.x, a_last.x);
alpha_max.y = max(a_first.y, a_last.y);
alpha_max.z = max(a_first.z, a_last.z);
a_min = fmaxf(0,fmaxf(fmaxf(alpha_min.x, alpha_min.y), alpha_min.z));
a_max = fminf(1,fminf(fminf(alpha_max.x, alpha_max.y), alpha_max.z));
}
float rpl = 0.f;
if (!(a_min >= a_max)) {
float d12 = length(diff); // distance between ray end points
float step_x = fabsf(spacing.x/diff.x);
float step_y = fabsf(spacing.y/diff.y);
float step_z = fabsf(spacing.z/diff.z);
// step along the vector, sampling each time we cross any plane (meaning we've entered a new voxel)
float alpha = a_min;
float alpha_x = a_min;
float alpha_y = a_min;
float alpha_z = a_min;
float nextalpha;
float alpha_mid;
const int max_iters = 5000;
const float intersect_min = 0.001f*fmin(fmin(spacing.x, spacing.y), spacing.z);
int iter = 0;
while (alpha < a_max && ++iter < max_iters) {
// find next intersection plane
bool valid_x, valid_y, valid_z;
if (alpha_x >= 0.0f && alpha_x < 1.0f) { valid_x = true; } else { valid_x = false; };
if (alpha_y >= 0.0f && alpha_y < 1.0f) { valid_y = true; } else { valid_y = false; };
if (alpha_z >= 0.0f && alpha_z < 1.0f) { valid_z = true; } else { valid_z = false; };
if (!(valid_x || valid_y || valid_z)) { break; }
if (valid_x && (!valid_y || alpha_x <= alpha_y) && (!valid_z || alpha_x <= alpha_z)) {
nextalpha = alpha_x;
alpha_x += step_x;
}
else if (valid_y && (!valid_x || alpha_y <= alpha_x) && (!valid_z || alpha_y <= alpha_z)) {
nextalpha = alpha_y;
alpha_y += step_y;
}
else if (valid_z && (!valid_x || alpha_z <= alpha_x) && (!valid_y || alpha_z <= alpha_y)) {
nextalpha = alpha_z;
alpha_z += step_z;
}
// the total intersection length of previous voxel
float intersection = fabsf(d12*(nextalpha-alpha)); // intersection is voxel intersection length
if (intersection>=intersect_min) { // do not process unless > 0.1 mm
alpha_mid = (nextalpha + alpha)*0.5f; // midpoint between intersections
// Remember that this function traces only a single ray.
// rpl has been set to zero during initialisation.
float fetchX = (source.x + alpha_mid*diff.x - start.x) / spacing.x;
float fetchY = (source.y + alpha_mid*diff.y - start.y) / spacing.y;
float fetchZ = (source.z + alpha_mid*diff.z - start.z) / spacing.z;
rpl += intersection * tex3D<float>( texDens, fetchX, fetchY, fetchZ);
if (stop_early >= 0 && rpl > stop_early) { break; }
}
alpha = nextalpha;
}
assert(iter<max_iters); // something is wrong
}
if (!isfinite(rpl)) { rpl = 0.f; }
return rpl;
}
// Ray-trace from source along beamlet central axis, returning path-length
__global__ void cudaRayTrace(
float* rpl, // output of radiologic path lengths for each src/dest pair
float* sources, // array of src coords
float* dests, // array of dest coords
uint npts, // number of dests/rpls
float3 densStart, // coords of volume start
uint3 densSize, // array size
float3 densSpacing, // voxelsize of volume
hipTextureObject_t texDens, // volume texture with tri-linear interpolation
float stop_early=-1.f
) {
uint tid = threadIdx.y + blockIdx.x * blockDim.y;
if (tid < npts) {
// for each voxel, the ray begins at the beam source (point source assumption)
// the ray ends at the center of current voxel
float3 source = {sources[3*tid], sources[3*tid + 1], sources[3*tid + 2]};
float3 dest = {dests[3*tid], dests[3*tid + 1], dests[3*tid + 2]};
// radiological path length
rpl[tid] = siddonRPL(
source,
dest,
densStart,
densSize,
densSpacing,
texDens,
stop_early
);
}
}
// Ray-trace from common source through center/corners/mid-edges of "detector" element positioned on the detector plane which is centered at isocenter and oriented at some angle
#define subrayidx (threadIdx.x)
#define subraycount (blockDim.x)
#define s_index(ii) threadIdx.y+blockDim.y*(threadIdx.z+blockDim.z*ii)
__global__ void cudaBeamTrace(
float* rpl,
float sad,
uint2 detDims,
float3 detCenter,
float2 detSpacing,
float2 detPixelSize,
float detAzi, // gantry angle
float detZen, // couch angle
float detAng, // detAngimator angle
float3 densStart,
uint3 densSize,
float3 densSpacing,
hipTextureObject_t texDens,
float stop_early=-1.f
) {
// dynamically allocated shared memory
extern __shared__ char s_subrpl[];
s_subrpl[s_index(subrayidx)] = 0;
__syncthreads();
int ray_X = threadIdx.y + blockIdx.y*blockDim.y;
int ray_Z = threadIdx.z + blockIdx.z*blockDim.z;
// threads are launched for each pixel in the 2D output map
// ray_X/ray_Z in Fluence Coord Sys (FCS) which matches RCS before rotation
if (ray_X >= detDims.x || ray_Z >= detDims.y) { return; }
// shift for sub-ray [-1, 0, +1]
int subray_X = (subrayidx % 3) - 1;
int subray_Z = (subrayidx / 3) - 1;
// the end point of each ray is found from the 2D coordinates on the fluence map
// center coord of fluence map is detCenter
// bixel coords defined in ray_X-ray_Z plane (FCS) then rotated with beam angles into RCS
// dont let x/y in int2 scare you, it is really x/z
// we directly define bixel coords in DCS to be consistent with storage of texRay
float2 detsize = make_float2(detDims-1)*detSpacing;
float3 bixel_ctr_FCS = make_float3(
-0.5f*detsize.x + ray_X*detSpacing.x + 0.5f*subray_X*detPixelSize.x,
0,
-0.5f*detsize.y + ray_Z*detSpacing.y + 0.5f*subray_Z*detPixelSize.y
);
float3 bixel_ctr = inverseRotateBeamAtOriginRHS(bixel_ctr_FCS, detAzi, detZen, detAng);
bixel_ctr += detCenter;
float3 source = inverseRotateBeamAtOriginRHS(make_float3(0.f, -sad, 0.f), detAzi, detZen, detAng) + detCenter;
// extend end of raytrace beyond fluence map plane
float3 shortdiff = bixel_ctr - source;
float3 sink = source + 10.f*shortdiff;
s_subrpl[s_index(subrayidx)] = siddonRPL(
source,
sink,
densStart,
densSize,
densSpacing,
texDens,
stop_early
);
__syncthreads();
if (subrayidx == 0) {
float f = 0.f;
for (int ii=0; ii<subraycount; ii++) {
f += s_subrpl[s_index(ii)];
}
// write out the fluence map
rpl[ray_X + detDims.x * ray_Z] = f/9.f;
}
}
// C-CALLABLE ENTRY POINT
void raytrace_c(
float* rpl,
const float* sources,
const float* dests,
unsigned int npts,
float* dens,
float3 densStart,
uint3 densSize,
float3 densSpacing,
float stop_early
) {
// allocate hipArray opaque memory
hipArray* d_densArr;
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
hipExtent extent = make_hipExtent(densSize.x, densSize.y, densSize.z);
checkCudaErrors( hipMalloc3DArray(&d_densArr, &desc, extent) );
// copy to hipArray
hipMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_hipPitchedPtr(dens, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_densArr;
copyParams.kind = hipMemcpyHostToDevice;
copyParams.extent = extent;
checkCudaErrors( hipMemcpy3D(©Params) );
// create texture object
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(hipResourceDesc));
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = d_densArr;
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(hipTextureDesc));
texDesc.normalizedCoords = false;
texDesc.filterMode = hipFilterModeLinear;
texDesc.addressMode[0] = hipAddressModeBorder;
texDesc.addressMode[1] = hipAddressModeBorder;
texDesc.addressMode[2] = hipAddressModeBorder;
texDesc.readMode = hipReadModeElementType;
hipTextureObject_t texDens;
checkCudaErrors( hipCreateTextureObject(&texDens, &resDesc, &texDesc, NULL) );
// allocate device memory
float* d_rpl;
checkCudaErrors( hipMalloc(&d_rpl, npts*sizeof(float)) );
float* d_sources;
checkCudaErrors( hipMalloc(&d_sources, npts*sizeof(float3)) );
checkCudaErrors( hipMemcpy(d_sources, sources, npts*3*sizeof(float), hipMemcpyHostToDevice) );
float* d_dests;
checkCudaErrors( hipMalloc(&d_dests, npts*3*sizeof(float)) );
checkCudaErrors( hipMemcpy(d_dests, dests, npts*3*sizeof(float), hipMemcpyHostToDevice) );
// setup kernel call params
dim3 rayBlock;
uint threadLimit = 256;
rayBlock = dim3(1, threadLimit, 1);
dim3 rayGrid = dim3((uint)ceilf((float)npts/rayBlock.y), 1, 1);
int sharedMem = rayBlock.x*rayBlock.y*rayBlock.z*sizeof(float);
// std::cout << "rayBlock: ("<<rayBlock.x<<","<<rayBlock.y<<","<<rayBlock.z<<")"<<std::endl;
// std::cout << "rayGrid: ("<<rayGrid.x<<","<<rayGrid.y<<","<<rayGrid.z<<")"<<std::endl;
// std::cout << "raySharedMem: "<<sharedMem << " bytes"<< std::endl;
// call raytracing kernel
hipLaunchKernelGGL(( cudaRayTrace) , dim3(rayGrid), dim3(rayBlock), sharedMem , 0,
d_rpl,
d_sources,
d_dests,
npts,
densStart,
densSize,
densSpacing,
texDens,
stop_early
);
hipDeviceSynchronize();
getLastCudaError("Error during cudaRayTrace");
checkCudaErrors( hipMemcpy(rpl, d_rpl, npts*sizeof(float), hipMemcpyDeviceToHost) );
// free memory
checkCudaErrors( hipFree(d_rpl) );
checkCudaErrors( hipFree(d_dests) );
checkCudaErrors( hipDestroyTextureObject(texDens) );
checkCudaErrors( hipFreeArray(d_densArr) );
}
void beamtrace_c(
float* rpl,
const float sad,
const uint2 detDims,
const float3 detCenter,
const float2 detSpacing,
const float2 detPixelSize,
const float detAzi,
const float detZen,
const float detAng,
const float* dens,
const float3 densStart,
const uint3 densSize,
const float3 densSpacing,
const float stop_early
) {
// allocate hipArray opaque memory
hipArray* d_densArr;
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
hipExtent extent = make_hipExtent(densSize.x, densSize.y, densSize.z);
checkCudaErrors( hipMalloc3DArray(&d_densArr, &desc, extent) );
// copy to hipArray
hipMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_hipPitchedPtr((void*)dens, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_densArr;
copyParams.kind = hipMemcpyHostToDevice;
copyParams.extent = extent;
checkCudaErrors( hipMemcpy3D(©Params) );
// create texture object
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(hipResourceDesc));
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = d_densArr;
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(hipTextureDesc));
texDesc.normalizedCoords = false;
texDesc.filterMode = hipFilterModeLinear;
texDesc.addressMode[0] = hipAddressModeBorder;
texDesc.addressMode[1] = hipAddressModeBorder;
texDesc.addressMode[2] = hipAddressModeBorder;
texDesc.readMode = hipReadModeElementType;
hipTextureObject_t texDens;
checkCudaErrors( hipCreateTextureObject(&texDens, &resDesc, &texDesc, NULL) );
//////////////////////////////////////////////////////////////////////////////////////////////
// projection through iso_cntr_matrix to create conformal field map from source
float *d_rpl;
int nbixels = detDims.x * detDims.y;
checkCudaErrors( hipMalloc( &d_rpl, nbixels*sizeof(float) ) );
checkCudaErrors( hipMemset( d_rpl, 0, nbixels*sizeof(float) ) );
// use center/corner/edge sampling pattern
dim3 rayGrid;
dim3 rayBlock = dim3{9, 8, 8};
// create a thread for each pixel in a 2D square fluence map array
rayGrid.y = ceilf((float)detDims.x/rayBlock.y);
rayGrid.z = ceilf((float)detDims.y/rayBlock.z);
// call raytracing kernel
size_t sharedMem = rayBlock.x*rayBlock.y*rayBlock.z*sizeof(char);
hipLaunchKernelGGL(( cudaBeamTrace) , dim3(rayGrid), dim3(rayBlock), sharedMem , 0,
d_rpl,
sad,
detDims,
detCenter,
detSpacing,
detPixelSize,
detAzi,
detZen,
detAng,
densStart,
densSize,
densSpacing,
texDens,
stop_early
);
hipDeviceSynchronize();
getLastCudaError("Error during cudaBeamTrace");
checkCudaErrors( hipMemcpy(rpl, d_rpl, nbixels*sizeof(float), hipMemcpyDeviceToHost) );
// free memory
checkCudaErrors( hipDestroyTextureObject(texDens) );
checkCudaErrors( hipFreeArray(d_densArr) );
checkCudaErrors( hipFree(d_rpl) );
}
| c306b41743d4cee93ed10acf8d942280b593e702.cu | #include "raytrace.h"
#include <cassert>
#include <iostream>
#include "helper_cuda.h"
#include "helper_math.h"
#include "geometry.cuh"
#include "geometry.cu"
__device__ float siddonRPL(
float3 source,
float3 dest,
float3 start,
uint3 size,
float3 spacing,
cudaTextureObject_t texDens,
float stop_early=-1.f
) {
// avoid division by zero (fixes blank center pixel issue)
dest += 1e-12f;
// projection vector for this thread
float3 diff = dest - source;
////////////////////////////////////////////////////////
/* Siddon's algorithm normalizes the distance between points 1 and 2
* alpha is the current position along that normalized vector as a scalar amount
* alpha_min and alpha_max set the bounds of intersection between this vector and the volume of interest
* limits of alpha parameter - aligned to plane positions not voxel center
*/
float a_min, a_max;
float3 alpha_min, alpha_max;
{
float3 end = start + spacing*make_float3(size)-1.f;
float3 a_first, a_last;
a_first.x = (start.x - source.x - 0.5f*spacing.x) / diff.x;
a_first.y = (start.y - source.y - 0.5f*spacing.y) / diff.y;
a_first.z = (start.z - source.z - 0.5f*spacing.z) / diff.z;
a_last.x = (end.x - source.x + 0.5f*spacing.x) / diff.x;
a_last.y = (end.y - source.y + 0.5f*spacing.y) / diff.y;
a_last.z = (end.z - source.z + 0.5f*spacing.z) / diff.z;
alpha_min.x = min(a_first.x, a_last.x);
alpha_min.y = min(a_first.y, a_last.y);
alpha_min.z = min(a_first.z, a_last.z);
alpha_max.x = max(a_first.x, a_last.x);
alpha_max.y = max(a_first.y, a_last.y);
alpha_max.z = max(a_first.z, a_last.z);
a_min = fmaxf(0,fmaxf(fmaxf(alpha_min.x, alpha_min.y), alpha_min.z));
a_max = fminf(1,fminf(fminf(alpha_max.x, alpha_max.y), alpha_max.z));
}
float rpl = 0.f;
if (!(a_min >= a_max)) {
float d12 = length(diff); // distance between ray end points
float step_x = fabsf(spacing.x/diff.x);
float step_y = fabsf(spacing.y/diff.y);
float step_z = fabsf(spacing.z/diff.z);
// step along the vector, sampling each time we cross any plane (meaning we've entered a new voxel)
float alpha = a_min;
float alpha_x = a_min;
float alpha_y = a_min;
float alpha_z = a_min;
float nextalpha;
float alpha_mid;
const int max_iters = 5000;
const float intersect_min = 0.001f*fmin(fmin(spacing.x, spacing.y), spacing.z);
int iter = 0;
while (alpha < a_max && ++iter < max_iters) {
// find next intersection plane
bool valid_x, valid_y, valid_z;
if (alpha_x >= 0.0f && alpha_x < 1.0f) { valid_x = true; } else { valid_x = false; };
if (alpha_y >= 0.0f && alpha_y < 1.0f) { valid_y = true; } else { valid_y = false; };
if (alpha_z >= 0.0f && alpha_z < 1.0f) { valid_z = true; } else { valid_z = false; };
if (!(valid_x || valid_y || valid_z)) { break; }
if (valid_x && (!valid_y || alpha_x <= alpha_y) && (!valid_z || alpha_x <= alpha_z)) {
nextalpha = alpha_x;
alpha_x += step_x;
}
else if (valid_y && (!valid_x || alpha_y <= alpha_x) && (!valid_z || alpha_y <= alpha_z)) {
nextalpha = alpha_y;
alpha_y += step_y;
}
else if (valid_z && (!valid_x || alpha_z <= alpha_x) && (!valid_y || alpha_z <= alpha_y)) {
nextalpha = alpha_z;
alpha_z += step_z;
}
// the total intersection length of previous voxel
float intersection = fabsf(d12*(nextalpha-alpha)); // intersection is voxel intersection length
if (intersection>=intersect_min) { // do not process unless > 0.1 mm
alpha_mid = (nextalpha + alpha)*0.5f; // midpoint between intersections
// Remember that this function traces only a single ray.
// rpl has been set to zero during initialisation.
float fetchX = (source.x + alpha_mid*diff.x - start.x) / spacing.x;
float fetchY = (source.y + alpha_mid*diff.y - start.y) / spacing.y;
float fetchZ = (source.z + alpha_mid*diff.z - start.z) / spacing.z;
rpl += intersection * tex3D<float>( texDens, fetchX, fetchY, fetchZ);
if (stop_early >= 0 && rpl > stop_early) { break; }
}
alpha = nextalpha;
}
assert(iter<max_iters); // something is wrong
}
if (!isfinite(rpl)) { rpl = 0.f; }
return rpl;
}
// Ray-trace from source along beamlet central axis, returning path-length
__global__ void cudaRayTrace(
float* rpl, // output of radiologic path lengths for each src/dest pair
float* sources, // array of src coords
float* dests, // array of dest coords
uint npts, // number of dests/rpls
float3 densStart, // coords of volume start
uint3 densSize, // array size
float3 densSpacing, // voxelsize of volume
cudaTextureObject_t texDens, // volume texture with tri-linear interpolation
float stop_early=-1.f
) {
uint tid = threadIdx.y + blockIdx.x * blockDim.y;
if (tid < npts) {
// for each voxel, the ray begins at the beam source (point source assumption)
// the ray ends at the center of current voxel
float3 source = {sources[3*tid], sources[3*tid + 1], sources[3*tid + 2]};
float3 dest = {dests[3*tid], dests[3*tid + 1], dests[3*tid + 2]};
// radiological path length
rpl[tid] = siddonRPL(
source,
dest,
densStart,
densSize,
densSpacing,
texDens,
stop_early
);
}
}
// Ray-trace from common source through center/corners/mid-edges of "detector" element positioned on the detector plane which is centered at isocenter and oriented at some angle
#define subrayidx (threadIdx.x)
#define subraycount (blockDim.x)
#define s_index(ii) threadIdx.y+blockDim.y*(threadIdx.z+blockDim.z*ii)
__global__ void cudaBeamTrace(
float* rpl,
float sad,
uint2 detDims,
float3 detCenter,
float2 detSpacing,
float2 detPixelSize,
float detAzi, // gantry angle
float detZen, // couch angle
float detAng, // detAngimator angle
float3 densStart,
uint3 densSize,
float3 densSpacing,
cudaTextureObject_t texDens,
float stop_early=-1.f
) {
// dynamically allocated shared memory
extern __shared__ char s_subrpl[];
s_subrpl[s_index(subrayidx)] = 0;
__syncthreads();
int ray_X = threadIdx.y + blockIdx.y*blockDim.y;
int ray_Z = threadIdx.z + blockIdx.z*blockDim.z;
// threads are launched for each pixel in the 2D output map
// ray_X/ray_Z in Fluence Coord Sys (FCS) which matches RCS before rotation
if (ray_X >= detDims.x || ray_Z >= detDims.y) { return; }
// shift for sub-ray [-1, 0, +1]
int subray_X = (subrayidx % 3) - 1;
int subray_Z = (subrayidx / 3) - 1;
// the end point of each ray is found from the 2D coordinates on the fluence map
// center coord of fluence map is detCenter
// bixel coords defined in ray_X-ray_Z plane (FCS) then rotated with beam angles into RCS
// dont let x/y in int2 scare you, it is really x/z
// we directly define bixel coords in DCS to be consistent with storage of texRay
float2 detsize = make_float2(detDims-1)*detSpacing;
float3 bixel_ctr_FCS = make_float3(
-0.5f*detsize.x + ray_X*detSpacing.x + 0.5f*subray_X*detPixelSize.x,
0,
-0.5f*detsize.y + ray_Z*detSpacing.y + 0.5f*subray_Z*detPixelSize.y
);
float3 bixel_ctr = inverseRotateBeamAtOriginRHS(bixel_ctr_FCS, detAzi, detZen, detAng);
bixel_ctr += detCenter;
float3 source = inverseRotateBeamAtOriginRHS(make_float3(0.f, -sad, 0.f), detAzi, detZen, detAng) + detCenter;
// extend end of raytrace beyond fluence map plane
float3 shortdiff = bixel_ctr - source;
float3 sink = source + 10.f*shortdiff;
s_subrpl[s_index(subrayidx)] = siddonRPL(
source,
sink,
densStart,
densSize,
densSpacing,
texDens,
stop_early
);
__syncthreads();
if (subrayidx == 0) {
float f = 0.f;
for (int ii=0; ii<subraycount; ii++) {
f += s_subrpl[s_index(ii)];
}
// write out the fluence map
rpl[ray_X + detDims.x * ray_Z] = f/9.f;
}
}
// C-CALLABLE ENTRY POINT
void raytrace_c(
float* rpl,
const float* sources,
const float* dests,
unsigned int npts,
float* dens,
float3 densStart,
uint3 densSize,
float3 densSpacing,
float stop_early
) {
// allocate cudaArray opaque memory
cudaArray* d_densArr;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
cudaExtent extent = make_cudaExtent(densSize.x, densSize.y, densSize.z);
checkCudaErrors( cudaMalloc3DArray(&d_densArr, &desc, extent) );
// copy to cudaArray
cudaMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_cudaPitchedPtr(dens, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_densArr;
copyParams.kind = cudaMemcpyHostToDevice;
copyParams.extent = extent;
checkCudaErrors( cudaMemcpy3D(©Params) );
// create texture object
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(cudaResourceDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = d_densArr;
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(cudaTextureDesc));
texDesc.normalizedCoords = false;
texDesc.filterMode = cudaFilterModeLinear;
texDesc.addressMode[0] = cudaAddressModeBorder;
texDesc.addressMode[1] = cudaAddressModeBorder;
texDesc.addressMode[2] = cudaAddressModeBorder;
texDesc.readMode = cudaReadModeElementType;
cudaTextureObject_t texDens;
checkCudaErrors( cudaCreateTextureObject(&texDens, &resDesc, &texDesc, NULL) );
// allocate device memory
float* d_rpl;
checkCudaErrors( cudaMalloc(&d_rpl, npts*sizeof(float)) );
float* d_sources;
checkCudaErrors( cudaMalloc(&d_sources, npts*sizeof(float3)) );
checkCudaErrors( cudaMemcpy(d_sources, sources, npts*3*sizeof(float), cudaMemcpyHostToDevice) );
float* d_dests;
checkCudaErrors( cudaMalloc(&d_dests, npts*3*sizeof(float)) );
checkCudaErrors( cudaMemcpy(d_dests, dests, npts*3*sizeof(float), cudaMemcpyHostToDevice) );
// setup kernel call params
dim3 rayBlock;
uint threadLimit = 256;
rayBlock = dim3(1, threadLimit, 1);
dim3 rayGrid = dim3((uint)ceilf((float)npts/rayBlock.y), 1, 1);
int sharedMem = rayBlock.x*rayBlock.y*rayBlock.z*sizeof(float);
// std::cout << "rayBlock: ("<<rayBlock.x<<","<<rayBlock.y<<","<<rayBlock.z<<")"<<std::endl;
// std::cout << "rayGrid: ("<<rayGrid.x<<","<<rayGrid.y<<","<<rayGrid.z<<")"<<std::endl;
// std::cout << "raySharedMem: "<<sharedMem << " bytes"<< std::endl;
// call raytracing kernel
cudaRayTrace <<< rayGrid, rayBlock, sharedMem >>>(
d_rpl,
d_sources,
d_dests,
npts,
densStart,
densSize,
densSpacing,
texDens,
stop_early
);
cudaDeviceSynchronize();
getLastCudaError("Error during cudaRayTrace");
checkCudaErrors( cudaMemcpy(rpl, d_rpl, npts*sizeof(float), cudaMemcpyDeviceToHost) );
// free memory
checkCudaErrors( cudaFree(d_rpl) );
checkCudaErrors( cudaFree(d_dests) );
checkCudaErrors( cudaDestroyTextureObject(texDens) );
checkCudaErrors( cudaFreeArray(d_densArr) );
}
void beamtrace_c(
float* rpl,
const float sad,
const uint2 detDims,
const float3 detCenter,
const float2 detSpacing,
const float2 detPixelSize,
const float detAzi,
const float detZen,
const float detAng,
const float* dens,
const float3 densStart,
const uint3 densSize,
const float3 densSpacing,
const float stop_early
) {
// allocate cudaArray opaque memory
cudaArray* d_densArr;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
cudaExtent extent = make_cudaExtent(densSize.x, densSize.y, densSize.z);
checkCudaErrors( cudaMalloc3DArray(&d_densArr, &desc, extent) );
// copy to cudaArray
cudaMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_cudaPitchedPtr((void*)dens, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_densArr;
copyParams.kind = cudaMemcpyHostToDevice;
copyParams.extent = extent;
checkCudaErrors( cudaMemcpy3D(©Params) );
// create texture object
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(cudaResourceDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = d_densArr;
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(cudaTextureDesc));
texDesc.normalizedCoords = false;
texDesc.filterMode = cudaFilterModeLinear;
texDesc.addressMode[0] = cudaAddressModeBorder;
texDesc.addressMode[1] = cudaAddressModeBorder;
texDesc.addressMode[2] = cudaAddressModeBorder;
texDesc.readMode = cudaReadModeElementType;
cudaTextureObject_t texDens;
checkCudaErrors( cudaCreateTextureObject(&texDens, &resDesc, &texDesc, NULL) );
//////////////////////////////////////////////////////////////////////////////////////////////
// projection through iso_cntr_matrix to create conformal field map from source
float *d_rpl;
int nbixels = detDims.x * detDims.y;
checkCudaErrors( cudaMalloc( &d_rpl, nbixels*sizeof(float) ) );
checkCudaErrors( cudaMemset( d_rpl, 0, nbixels*sizeof(float) ) );
// use center/corner/edge sampling pattern
dim3 rayGrid;
dim3 rayBlock = dim3{9, 8, 8};
// create a thread for each pixel in a 2D square fluence map array
rayGrid.y = ceilf((float)detDims.x/rayBlock.y);
rayGrid.z = ceilf((float)detDims.y/rayBlock.z);
// call raytracing kernel
size_t sharedMem = rayBlock.x*rayBlock.y*rayBlock.z*sizeof(char);
cudaBeamTrace <<< rayGrid, rayBlock, sharedMem >>>(
d_rpl,
sad,
detDims,
detCenter,
detSpacing,
detPixelSize,
detAzi,
detZen,
detAng,
densStart,
densSize,
densSpacing,
texDens,
stop_early
);
cudaDeviceSynchronize();
getLastCudaError("Error during cudaBeamTrace");
checkCudaErrors( cudaMemcpy(rpl, d_rpl, nbixels*sizeof(float), cudaMemcpyDeviceToHost) );
// free memory
checkCudaErrors( cudaDestroyTextureObject(texDens) );
checkCudaErrors( cudaFreeArray(d_densArr) );
checkCudaErrors( cudaFree(d_rpl) );
}
|
d6949cabde689ab18401a5994c2ef4cee2b78575.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/TensorInfo.cuh>
#define THREADS 1024
#define BLOCKS(N) (N + THREADS - 1) / THREADS
template <typename scalar_t>
__global__ void grid_kernel(int64_t *cluster,
at::cuda::detail::TensorInfo<scalar_t, int64_t> pos,
scalar_t *__restrict__ size,
scalar_t *__restrict__ start,
scalar_t *__restrict__ end, size_t numel) {
const size_t index = blockIdx.x * blockDim.x + threadIdx.x;
const size_t stride = blockDim.x * gridDim.x;
for (ptrdiff_t i = index; i < numel; i += stride) {
int64_t c = 0, k = 1;
for (ptrdiff_t d = 0; d < pos.sizes[1]; d++) {
scalar_t p = pos.data[i * pos.strides[0] + d * pos.strides[1]] - start[d];
c += (int64_t)(p / size[d]) * k;
k *= (int64_t)((end[d] - start[d]) / size[d]) + 1;
}
cluster[i] = c;
}
}
at::Tensor grid_cuda(at::Tensor pos, at::Tensor size, at::Tensor start,
at::Tensor end) {
hipSetDevice(pos.get_device());
auto cluster = at::empty(pos.size(0), pos.options().dtype(at::kLong));
AT_DISPATCH_ALL_TYPES(pos.type(), "grid_kernel", [&] {
hipLaunchKernelGGL(( grid_kernel<scalar_t>), dim3(BLOCKS(cluster.numel())), dim3(THREADS), 0, 0,
cluster.data<int64_t>(),
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(pos),
size.data<scalar_t>(), start.data<scalar_t>(), end.data<scalar_t>(),
cluster.numel());
});
return cluster;
}
| d6949cabde689ab18401a5994c2ef4cee2b78575.cu | #include <ATen/ATen.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/TensorInfo.cuh>
#define THREADS 1024
#define BLOCKS(N) (N + THREADS - 1) / THREADS
template <typename scalar_t>
__global__ void grid_kernel(int64_t *cluster,
at::cuda::detail::TensorInfo<scalar_t, int64_t> pos,
scalar_t *__restrict__ size,
scalar_t *__restrict__ start,
scalar_t *__restrict__ end, size_t numel) {
const size_t index = blockIdx.x * blockDim.x + threadIdx.x;
const size_t stride = blockDim.x * gridDim.x;
for (ptrdiff_t i = index; i < numel; i += stride) {
int64_t c = 0, k = 1;
for (ptrdiff_t d = 0; d < pos.sizes[1]; d++) {
scalar_t p = pos.data[i * pos.strides[0] + d * pos.strides[1]] - start[d];
c += (int64_t)(p / size[d]) * k;
k *= (int64_t)((end[d] - start[d]) / size[d]) + 1;
}
cluster[i] = c;
}
}
at::Tensor grid_cuda(at::Tensor pos, at::Tensor size, at::Tensor start,
at::Tensor end) {
cudaSetDevice(pos.get_device());
auto cluster = at::empty(pos.size(0), pos.options().dtype(at::kLong));
AT_DISPATCH_ALL_TYPES(pos.type(), "grid_kernel", [&] {
grid_kernel<scalar_t><<<BLOCKS(cluster.numel()), THREADS>>>(
cluster.data<int64_t>(),
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(pos),
size.data<scalar_t>(), start.data<scalar_t>(), end.data<scalar_t>(),
cluster.numel());
});
return cluster;
}
|
3a5306bed0f2fd6dc21b7d13ac8a1dbb207e8b08.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
//
//
#include <loops/type_conversions.h>
#include <types/types.h>
namespace nd4j {
template<typename S, typename T>
void TypeCast::convertGenericCuda(Nd4jPointer *extras, void *dx, Nd4jLong N, void *dz) {
auto stream = reinterpret_cast<hipStream_t *>(&extras[1]);
hipLaunchKernelGGL(( nd4j::convertKernel<S, T>), dim3(256), dim3(1024), 1024, *stream, dx, N, dz);
};
template<typename S, typename T>
__device__ void convertKernelGeneric(S *x, Nd4jLong N, T *z) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < N; i+= blockDim.x * gridDim.x) {
// despite it's stupid, it simplifies conversion to bottom dtypes
// FIXME: get rid of through-float though
z[i] = static_cast<T>(static_cast<float>(x[i]));
}
};
/*
* PLEASE NOTE: This kernel doesn't allow loop for data. Basically: grid will be huge.
*/
template<typename T>
__device__ inline void encoderKernelP1Generic(void *dx, Nd4jLong N, void *dz, float threshold) {
auto x = reinterpret_cast<T *> (dx);
auto z = reinterpret_cast<int *> (dz);
//basically, for phase One we want do calculation: how many eligible values we have, and which blocks will be holding data
Nd4jLong tid = blockIdx.x * blockDim.x + threadIdx.x;
int pass = tid < N && nd4j::math::nd4j_abs<T>(x[tid]) >= static_cast<T>(threshold) ? 1 : 0;
int bp=__syncthreads_count(pass);
if (threadIdx.x == 0) {
// saving out per-block passes
z[blockIdx.x+1] = bp;
// saving out sum
atomicAdd(&z[0], bp);
}
}
__device__ __inline__ int pow2i (int e){
return 1<<e;
}
// Define this to more rigorously avoid bank conflicts, even at the lower (root) levels of the tree
//#define ZERO_BANK_CONFLICTS
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2 * LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
#endif
#ifdef CHECK_BANK_CONFLICTS
#define TEMP(index) CUT_BANK_CHECKER(temp, index)
#else
#define TEMP(index) temp[index]
#endif
template <bool isNP2>
__device__ void loadSharedChunkFromMem(int *s_data, const int *g_idata, int n, int baseIndex, int& ai, int& bi, int& mem_ai, int& mem_bi, int& bankOffsetA, int& bankOffsetB) {
int thid = threadIdx.x;
mem_ai = baseIndex + threadIdx.x;
mem_bi = mem_ai + blockDim.x;
ai = thid;
bi = thid + blockDim.x;
// compute spacing to avoid bank conflicts
bankOffsetA = CONFLICT_FREE_OFFSET(ai);
bankOffsetB = CONFLICT_FREE_OFFSET(bi);
// Cache the computational window in shared memory
// pad values beyond n with zeros
s_data[ai + bankOffsetA] = g_idata[mem_ai];
if (isNP2) { // compile-time decision
s_data[bi + bankOffsetB] = (bi < n) ? g_idata[mem_bi] : 0;
} else {
s_data[bi + bankOffsetB] = g_idata[mem_bi];
}
}
template <bool isNP2>
__device__ void storeSharedChunkToMem(int* g_odata, int* s_data, int n, int ai, int bi, int mem_ai, int mem_bi, int bankOffsetA, int bankOffsetB) {
__syncthreads();
// write results to global memory
g_odata[mem_ai] = s_data[ai + bankOffsetA];
if (isNP2) { // compile-time decision
if (bi < n)
g_odata[mem_bi] = s_data[bi + bankOffsetB];
} else {
g_odata[mem_bi] = s_data[bi + bankOffsetB];
}
}
template <bool storeSum>
__device__ void clearLastElement(int* s_data, int *g_blockSums, int blockIndex) {
if (threadIdx.x == 0)
{
int index = (blockDim.x << 1) - 1;
index += CONFLICT_FREE_OFFSET(index);
if (storeSum) { // compile-time decision
// write this block's total sum to the corresponding index in the blockSums array
g_blockSums[blockIndex] = s_data[index];
}
// zero the last element in the scan so it will propagate back to the front
s_data[index] = 0;
}
}
__device__ unsigned int buildSum(int *s_data) {
unsigned int thid = threadIdx.x;
unsigned int stride = 1;
// build the sum in place up the tree
for (int d = blockDim.x; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_data[bi] += s_data[ai];
}
stride *= 2;
}
return stride;
}
__device__ void scanRootToLeaves(int *s_data, unsigned int stride) {
unsigned int thid = threadIdx.x;
// traverse down the tree building the scan in place
for (int d = 1; d <= blockDim.x; d *= 2) {
stride >>= 1;
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
float t = s_data[ai];
s_data[ai] = s_data[bi];
s_data[bi] += t;
}
}
}
template <bool storeSum>
__device__ void prescanBlock(int *data, int blockIndex, int *blockSums) {
int stride = buildSum(data); // build the sum in place up the tree
clearLastElement<storeSum>(data, blockSums,
(blockIndex == 0) ? blockIdx.x : blockIndex);
scanRootToLeaves(data, stride); // traverse down tree to build the scan
}
template <bool storeSum, bool isNP2>
__global__ void prescan(int *g_odata, const int *g_idata, int *g_blockSums, int n, int blockIndex, int baseIndex) {
int ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB;
extern __shared__ int s_data[];
// load data into shared memory
loadSharedChunkFromMem<isNP2>(reinterpret_cast<int *>(s_data), g_idata, n, (baseIndex == 0) ? __mul24(blockIdx.x, (blockDim.x << 1)):baseIndex, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB);
// scan the data in each block
prescanBlock<storeSum>(s_data, blockIndex, g_blockSums);
// write results to device memory
storeSharedChunkToMem<isNP2>(g_odata, s_data, n, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB);
}
__global__ void uniformAdd(int *g_data, int *uniforms, int n, int blockOffset, int baseIndex) {
__shared__ float uni;
if (threadIdx.x == 0)
uni = uniforms[blockIdx.x + blockOffset];
unsigned int address = __mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x;
__syncthreads();
// note two adds per thread
g_data[address] += uni;
g_data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni;
}
/*
* This kernel does prefix sum in parallel, to calculate offsets for each block
*/
template<typename T>
__device__ inline void encoderKernelP2Generic(void *dx, Nd4jLong n, void *dz) {
// TODO: to be remove
}
/*
* PLEASE NOTE: This kernel doesn't allow loop for data. Basically: grid will be huge.
*
* Based on: https://github.com/knotman90/cuStreamComp <-- efficient CUDA stream compaction algorithm
*/
template<typename T>
__device__ inline void encoderKernelP3Generic(void *dx, int *offsets, Nd4jLong N, void *dz) {
T *x = reinterpret_cast<T *> (dx);
int *z = reinterpret_cast<int *> (dz);
Nd4jLong tid = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ int warpTotals[];
// fetch block offset only once
__shared__ float threshold;
__shared__ FloatBits fb;
__shared__ int bo;
__shared__ int limit;
if (threadIdx.x == 0) {
limit = z[0];
fb.i_ = z[2];
threshold = fb.f_;
bo = offsets[blockIdx.x];
}
__syncthreads();
if (tid < N) {
T value = x[tid];
int pred = nd4j::math::nd4j_abs<T>(value) >= static_cast<T>(threshold) ? 1 : 0;
int w_i = threadIdx.x/warpSize; //warp index
int w_l = tid % warpSize;//thread index within a warp
int t_m = INT_MAX >> (warpSize-w_l-1); //thread mask (ERROR IN THE PAPER minus one is required)
int b = __ballot(pred) & t_m; //balres = number whose ith bit isone if the ith's thread pred is true masked up to the current index in warp
int t_u = __popc(b); // popc count the number of bit one. simply count the number predicated true BEFORE MY INDEX
if(w_l==warpSize-1){
warpTotals[w_i]=t_u+pred;
}
__syncthreads();
if(w_i==0 && w_l<blockDim.x/warpSize){
int w_i_u=0;
for(int j=0;j<=5;j++){
int b_j =__ballot( warpTotals[w_l] & pow2i(j) ); //# of the ones in the j'th digit of the warp offsets
w_i_u += (__popc(b_j & t_m) ) << j;
//printf("indice %i t_m=%i,j=%i,b_j=%i,w_i_u=%i\n",w_l,t_m,j,b_j,w_i_u);
}
warpTotals[w_l]=w_i_u;
}
__syncthreads();
if(pred){
int idx = t_u + warpTotals[w_i] + bo + 4;
if (idx < limit + 4) {
z[idx]= value > static_cast<T>(0.0f) ? tid+1 : -(tid + 1);
x[tid] = value > static_cast<T>(0.0f) ? x[tid] - threshold : x[tid] + threshold;
}
}
}
}
/*
* This kernel handles decode from sparse threshold array, to dense array
*
* PLEASE NOTE: Z is expected to be memset to 0
*/
template<typename T>
__device__ inline void decoderKernelGeneric(void *dx, Nd4jLong N, void *dz) {
auto x = reinterpret_cast<int *> (dx);
auto z = reinterpret_cast<T *> (dz);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float threshold;
__shared__ int limit;
__shared__ FloatBits fb;
if (threadIdx.x == 0) {
limit = x[0];
fb.i_ = x[2];
threshold = fb.f_;
}
__syncthreads();
for (int e = tid; e < limit; e += blockDim.x * gridDim.x) {
int el = x[e+4];
int ael = nd4j::math::nd4j_abs<int>(el) - 1;
// TODO: investigate, if += would work better here, as in "decoded accumulation"
z[ael] += el > 0 ? threshold : -threshold;
}
}
template<typename T>
__device__ inline void cudaDecodeBitmapGeneric(void *dx, Nd4jLong N, T *dz) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ T *shmem;
__shared__ FloatBits fb;
__shared__ float threshold;
__shared__ int *x;
if (threadIdx.x == 0){
extern __shared__ char mem[];
shmem = reinterpret_cast<T*>(mem);
x = reinterpret_cast<int *>(dx);
fb.i_ = x[2];
threshold = fb.f_;
}
__syncthreads();
int lim = N / 16 + 5;
for (int i = tid; i < N; i += blockDim.x * gridDim.x) {
int byteId = i / 16 + 4;
// printf("I: [%i]; byteId: [%i]\n", i, byteId);
shmem[threadIdx.x] = dz[i];
__syncthreads();
if (threadIdx.x % 16 == 0) {
int byte = x[byteId];
for (int e = 0; e < 16; e++) {
if (i + e >= N)
continue;
int bitId = (i + e) % 16;
bool hasBit = (byte & 1 << (bitId) ) != 0;
bool hasSign = (byte & 1 << (bitId + 16) ) != 0;
if (hasBit) {
if (hasSign)
shmem[threadIdx.x + bitId] -= threshold;
else
shmem[threadIdx.x + bitId] += threshold;
} else if (hasSign) {
shmem[threadIdx.x + bitId] -= threshold / 2;
}
}
}
__syncthreads();
dz[i] = shmem[threadIdx.x];
}
}
template<typename T>
__device__ inline void cudaEncodeBitmapGeneric(T *dx, Nd4jLong N, int *dz, int *scalar, int *reductionBuffer, float threshold) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int counter;
__shared__ int *shmem;
__shared__ T *vals;
if (threadIdx.x == 0){
extern __shared__ char mem[];
shmem = reinterpret_cast<int*>(mem);
vals = reinterpret_cast<T *>(shmem + blockDim.x);
counter = 0;
}
__syncthreads();
for (int i = tid; i < N; i += blockDim.x * gridDim.x) {
// all threads in block reading stuff
T val = dx[i];
T abs = nd4j::math::nd4j_abs<T>(val);
int byteId = i / 16 + 4;
int bitId = i % 16;
shmem[threadIdx.x] = 0;
vals[threadIdx.x] = val;
if (abs >= static_cast<T>(threshold)) {
shmem[threadIdx.x] = 1 << (bitId);
atomicAdd(&counter, 1);
if (val < static_cast<T>(0.0f)) {
shmem[threadIdx.x] |= 1 << (bitId + 16);
vals[threadIdx.x] += static_cast<T>(threshold);
} else {
vals[threadIdx.x] -= static_cast<T>(threshold);
}
} else if (abs >= static_cast<T>(threshold) / static_cast<T>(2.0f) && val < static_cast<T>(0.0f)) {
atomicAdd(&counter, 1);
shmem[threadIdx.x] = 1 << (bitId + 16);
vals[threadIdx.x] += static_cast<T>(threshold) / static_cast<T>(2.0f);
}
__syncthreads();
if (threadIdx.x % 16 == 0) {
int byte = 0;
for (int e = 0; e < 16; e++) {
if (i + e >= N)
continue;
byte |= shmem[threadIdx.x + e];
}
dz[byteId] = byte;
}
__syncthreads();
dx[i] = vals[threadIdx.x];
}
__syncthreads();
if (threadIdx.x == 0) {
atomicAdd(scalar, counter);
}
}
__global__ void cudaEncodeBitmapFloat(float *dx, Nd4jLong N, int *dz, int *scalar, int *reductionBuffer, float threshold) {
nd4j::cudaEncodeBitmapGeneric<float>(dx, N, dz, scalar, reductionBuffer, threshold);
}
__global__ void cudaEncodeBitmapDouble(double *dx, Nd4jLong N, int *dz, int *scalar, int *reductionBuffer, float threshold) {
nd4j::cudaEncodeBitmapGeneric<double>(dx, N, dz, scalar, reductionBuffer, threshold);
}
__global__ void cudaEncodeBitmapHalf(float16 *dx, Nd4jLong N, int *dz, int *scalar, int *reductionBuffer, float threshold) {
nd4j::cudaEncodeBitmapGeneric<float16>(dx, N, dz, scalar, reductionBuffer, threshold);
}
__global__ void cudaDecodeBitmapFloat(void *dx, Nd4jLong N, float *dz) {
nd4j::cudaDecodeBitmapGeneric<float>(dx, N, dz);
}
__global__ void cudaDecodeBitmapDouble(void *dx, Nd4jLong N, double *dz) {
nd4j::cudaDecodeBitmapGeneric<double>(dx, N, dz);
}
__global__ void cudaDecodeBitmapHalf(void *dx, Nd4jLong N, float16 *dz) {
nd4j::cudaDecodeBitmapGeneric<float16>(dx, N, dz);
}
__global__ void encoderKernelP1Float(void *dx, Nd4jLong N, void *dz, float threshold) {
nd4j::encoderKernelP1Generic<float>(dx, N, dz, threshold);
}
__global__ void encoderKernelP1Double(void *dx, Nd4jLong N, void *dz, float threshold) {
nd4j::encoderKernelP1Generic<double>(dx, N, dz, threshold);
}
__global__ void encoderKernelP1Half(void *dx, Nd4jLong N, void *dz, float threshold) {
nd4j::encoderKernelP1Generic<float16>(dx, N, dz, threshold);
}
__global__ void encoderKernelP2Float(int *dx, Nd4jLong N, int *dz) {
nd4j::encoderKernelP2Generic<float>(dx, N, dz);
}
__global__ void encoderKernelP3Float(void *dx, int *offsets, Nd4jLong N, void *dz) {
nd4j::encoderKernelP3Generic<float>(dx, offsets, N, dz);
}
__global__ void encoderKernelP3Double(void *dx, int *offsets, Nd4jLong N, void *dz) {
nd4j::encoderKernelP3Generic<double>(dx, offsets, N, dz);
}
__global__ void encoderKernelP3Half(void *dx, int *offsets, Nd4jLong N, void *dz) {
nd4j::encoderKernelP3Generic<float16>(dx, offsets, N, dz);
}
__global__ void decoderKernelFloat(void *dx, Nd4jLong N, void *dz) {
nd4j::decoderKernelGeneric<float>(dx, N, dz);
}
__global__ void decoderKernelDouble(void *dx, Nd4jLong N, void *dz) {
nd4j::decoderKernelGeneric<double>(dx, N, dz);
}
__global__ void decoderKernelHalf(void *dx, Nd4jLong N, void *dz) {
nd4j::decoderKernelGeneric<float16>(dx, N, dz);
}
template <bool storeSum, bool isNP2>
__host__ void prescanLauncher(dim3 &blocks, dim3 &threads, int shmem, hipStream_t *stream, int *g_odata, const int *g_idata, int *g_blockSums, int n, int blockIndex, int baseIndex) {
hipLaunchKernelGGL(( prescan<storeSum, isNP2>), dim3(blocks), dim3(threads), shmem, *stream, g_odata, g_idata, g_blockSums, n, blockIndex, baseIndex);
};
template <typename S, typename T>
__global__ void convertKernel(void *dx, Nd4jLong N, void *dz) {
auto x = reinterpret_cast<S *>(dx);
auto z = reinterpret_cast<T *>(dz);
nd4j::convertKernelGeneric(x, N, z);
}
#define LIBND4J_BOOLS \
0, \
1
BUILD_DOUBLE_TEMPLATE(template void TypeCast::convertGenericCuda, (Nd4jPointer * extras, void *dx, Nd4jLong N, void *dz), LIBND4J_TYPES, LIBND4J_TYPES)
BUILD_DOUBLE_TEMPLATE(template void prescanLauncher, (dim3 &blocks, dim3 &threads, int shmem, hipStream_t *stream, int *g_odata, const int *g_idata, int *g_blockSums, int n, int blockIndex, int baseIndex), LIBND4J_BOOLS, LIBND4J_BOOLS)
#undef LIBND4J_BOOLS
} | 3a5306bed0f2fd6dc21b7d13ac8a1dbb207e8b08.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
//
//
#include <loops/type_conversions.h>
#include <types/types.h>
namespace nd4j {
template<typename S, typename T>
void TypeCast::convertGenericCuda(Nd4jPointer *extras, void *dx, Nd4jLong N, void *dz) {
auto stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
nd4j::convertKernel<S, T><<<256, 1024, 1024, *stream>>>(dx, N, dz);
};
template<typename S, typename T>
__device__ void convertKernelGeneric(S *x, Nd4jLong N, T *z) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < N; i+= blockDim.x * gridDim.x) {
// despite it's stupid, it simplifies conversion to bottom dtypes
// FIXME: get rid of through-float though
z[i] = static_cast<T>(static_cast<float>(x[i]));
}
};
/*
* PLEASE NOTE: This kernel doesn't allow loop for data. Basically: grid will be huge.
*/
template<typename T>
__device__ inline void encoderKernelP1Generic(void *dx, Nd4jLong N, void *dz, float threshold) {
auto x = reinterpret_cast<T *> (dx);
auto z = reinterpret_cast<int *> (dz);
//basically, for phase One we want do calculation: how many eligible values we have, and which blocks will be holding data
Nd4jLong tid = blockIdx.x * blockDim.x + threadIdx.x;
int pass = tid < N && nd4j::math::nd4j_abs<T>(x[tid]) >= static_cast<T>(threshold) ? 1 : 0;
int bp=__syncthreads_count(pass);
if (threadIdx.x == 0) {
// saving out per-block passes
z[blockIdx.x+1] = bp;
// saving out sum
atomicAdd(&z[0], bp);
}
}
__device__ __inline__ int pow2i (int e){
return 1<<e;
}
// Define this to more rigorously avoid bank conflicts, even at the lower (root) levels of the tree
//#define ZERO_BANK_CONFLICTS
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2 * LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
#endif
#ifdef CHECK_BANK_CONFLICTS
#define TEMP(index) CUT_BANK_CHECKER(temp, index)
#else
#define TEMP(index) temp[index]
#endif
template <bool isNP2>
__device__ void loadSharedChunkFromMem(int *s_data, const int *g_idata, int n, int baseIndex, int& ai, int& bi, int& mem_ai, int& mem_bi, int& bankOffsetA, int& bankOffsetB) {
int thid = threadIdx.x;
mem_ai = baseIndex + threadIdx.x;
mem_bi = mem_ai + blockDim.x;
ai = thid;
bi = thid + blockDim.x;
// compute spacing to avoid bank conflicts
bankOffsetA = CONFLICT_FREE_OFFSET(ai);
bankOffsetB = CONFLICT_FREE_OFFSET(bi);
// Cache the computational window in shared memory
// pad values beyond n with zeros
s_data[ai + bankOffsetA] = g_idata[mem_ai];
if (isNP2) { // compile-time decision
s_data[bi + bankOffsetB] = (bi < n) ? g_idata[mem_bi] : 0;
} else {
s_data[bi + bankOffsetB] = g_idata[mem_bi];
}
}
template <bool isNP2>
__device__ void storeSharedChunkToMem(int* g_odata, int* s_data, int n, int ai, int bi, int mem_ai, int mem_bi, int bankOffsetA, int bankOffsetB) {
__syncthreads();
// write results to global memory
g_odata[mem_ai] = s_data[ai + bankOffsetA];
if (isNP2) { // compile-time decision
if (bi < n)
g_odata[mem_bi] = s_data[bi + bankOffsetB];
} else {
g_odata[mem_bi] = s_data[bi + bankOffsetB];
}
}
template <bool storeSum>
__device__ void clearLastElement(int* s_data, int *g_blockSums, int blockIndex) {
if (threadIdx.x == 0)
{
int index = (blockDim.x << 1) - 1;
index += CONFLICT_FREE_OFFSET(index);
if (storeSum) { // compile-time decision
// write this block's total sum to the corresponding index in the blockSums array
g_blockSums[blockIndex] = s_data[index];
}
// zero the last element in the scan so it will propagate back to the front
s_data[index] = 0;
}
}
__device__ unsigned int buildSum(int *s_data) {
unsigned int thid = threadIdx.x;
unsigned int stride = 1;
// build the sum in place up the tree
for (int d = blockDim.x; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_data[bi] += s_data[ai];
}
stride *= 2;
}
return stride;
}
__device__ void scanRootToLeaves(int *s_data, unsigned int stride) {
unsigned int thid = threadIdx.x;
// traverse down the tree building the scan in place
for (int d = 1; d <= blockDim.x; d *= 2) {
stride >>= 1;
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
float t = s_data[ai];
s_data[ai] = s_data[bi];
s_data[bi] += t;
}
}
}
template <bool storeSum>
__device__ void prescanBlock(int *data, int blockIndex, int *blockSums) {
int stride = buildSum(data); // build the sum in place up the tree
clearLastElement<storeSum>(data, blockSums,
(blockIndex == 0) ? blockIdx.x : blockIndex);
scanRootToLeaves(data, stride); // traverse down tree to build the scan
}
template <bool storeSum, bool isNP2>
__global__ void prescan(int *g_odata, const int *g_idata, int *g_blockSums, int n, int blockIndex, int baseIndex) {
int ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB;
extern __shared__ int s_data[];
// load data into shared memory
loadSharedChunkFromMem<isNP2>(reinterpret_cast<int *>(s_data), g_idata, n, (baseIndex == 0) ? __mul24(blockIdx.x, (blockDim.x << 1)):baseIndex, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB);
// scan the data in each block
prescanBlock<storeSum>(s_data, blockIndex, g_blockSums);
// write results to device memory
storeSharedChunkToMem<isNP2>(g_odata, s_data, n, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB);
}
__global__ void uniformAdd(int *g_data, int *uniforms, int n, int blockOffset, int baseIndex) {
__shared__ float uni;
if (threadIdx.x == 0)
uni = uniforms[blockIdx.x + blockOffset];
unsigned int address = __mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x;
__syncthreads();
// note two adds per thread
g_data[address] += uni;
g_data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni;
}
/*
* This kernel does prefix sum in parallel, to calculate offsets for each block
*/
template<typename T>
__device__ inline void encoderKernelP2Generic(void *dx, Nd4jLong n, void *dz) {
// TODO: to be remove
}
/*
* PLEASE NOTE: This kernel doesn't allow loop for data. Basically: grid will be huge.
*
* Based on: https://github.com/knotman90/cuStreamComp <-- efficient CUDA stream compaction algorithm
*/
template<typename T>
__device__ inline void encoderKernelP3Generic(void *dx, int *offsets, Nd4jLong N, void *dz) {
T *x = reinterpret_cast<T *> (dx);
int *z = reinterpret_cast<int *> (dz);
Nd4jLong tid = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ int warpTotals[];
// fetch block offset only once
__shared__ float threshold;
__shared__ FloatBits fb;
__shared__ int bo;
__shared__ int limit;
if (threadIdx.x == 0) {
limit = z[0];
fb.i_ = z[2];
threshold = fb.f_;
bo = offsets[blockIdx.x];
}
__syncthreads();
if (tid < N) {
T value = x[tid];
int pred = nd4j::math::nd4j_abs<T>(value) >= static_cast<T>(threshold) ? 1 : 0;
int w_i = threadIdx.x/warpSize; //warp index
int w_l = tid % warpSize;//thread index within a warp
int t_m = INT_MAX >> (warpSize-w_l-1); //thread mask (ERROR IN THE PAPER minus one is required)
int b = __ballot(pred) & t_m; //balres = number whose ith bit isone if the ith's thread pred is true masked up to the current index in warp
int t_u = __popc(b); // popc count the number of bit one. simply count the number predicated true BEFORE MY INDEX
if(w_l==warpSize-1){
warpTotals[w_i]=t_u+pred;
}
__syncthreads();
if(w_i==0 && w_l<blockDim.x/warpSize){
int w_i_u=0;
for(int j=0;j<=5;j++){
int b_j =__ballot( warpTotals[w_l] & pow2i(j) ); //# of the ones in the j'th digit of the warp offsets
w_i_u += (__popc(b_j & t_m) ) << j;
//printf("indice %i t_m=%i,j=%i,b_j=%i,w_i_u=%i\n",w_l,t_m,j,b_j,w_i_u);
}
warpTotals[w_l]=w_i_u;
}
__syncthreads();
if(pred){
int idx = t_u + warpTotals[w_i] + bo + 4;
if (idx < limit + 4) {
z[idx]= value > static_cast<T>(0.0f) ? tid+1 : -(tid + 1);
x[tid] = value > static_cast<T>(0.0f) ? x[tid] - threshold : x[tid] + threshold;
}
}
}
}
/*
* This kernel handles decode from sparse threshold array, to dense array
*
* PLEASE NOTE: Z is expected to be memset to 0
*/
template<typename T>
__device__ inline void decoderKernelGeneric(void *dx, Nd4jLong N, void *dz) {
auto x = reinterpret_cast<int *> (dx);
auto z = reinterpret_cast<T *> (dz);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float threshold;
__shared__ int limit;
__shared__ FloatBits fb;
if (threadIdx.x == 0) {
limit = x[0];
fb.i_ = x[2];
threshold = fb.f_;
}
__syncthreads();
for (int e = tid; e < limit; e += blockDim.x * gridDim.x) {
int el = x[e+4];
int ael = nd4j::math::nd4j_abs<int>(el) - 1;
// TODO: investigate, if += would work better here, as in "decoded accumulation"
z[ael] += el > 0 ? threshold : -threshold;
}
}
template<typename T>
__device__ inline void cudaDecodeBitmapGeneric(void *dx, Nd4jLong N, T *dz) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ T *shmem;
__shared__ FloatBits fb;
__shared__ float threshold;
__shared__ int *x;
if (threadIdx.x == 0){
extern __shared__ char mem[];
shmem = reinterpret_cast<T*>(mem);
x = reinterpret_cast<int *>(dx);
fb.i_ = x[2];
threshold = fb.f_;
}
__syncthreads();
int lim = N / 16 + 5;
for (int i = tid; i < N; i += blockDim.x * gridDim.x) {
int byteId = i / 16 + 4;
// printf("I: [%i]; byteId: [%i]\n", i, byteId);
shmem[threadIdx.x] = dz[i];
__syncthreads();
if (threadIdx.x % 16 == 0) {
int byte = x[byteId];
for (int e = 0; e < 16; e++) {
if (i + e >= N)
continue;
int bitId = (i + e) % 16;
bool hasBit = (byte & 1 << (bitId) ) != 0;
bool hasSign = (byte & 1 << (bitId + 16) ) != 0;
if (hasBit) {
if (hasSign)
shmem[threadIdx.x + bitId] -= threshold;
else
shmem[threadIdx.x + bitId] += threshold;
} else if (hasSign) {
shmem[threadIdx.x + bitId] -= threshold / 2;
}
}
}
__syncthreads();
dz[i] = shmem[threadIdx.x];
}
}
template<typename T>
__device__ inline void cudaEncodeBitmapGeneric(T *dx, Nd4jLong N, int *dz, int *scalar, int *reductionBuffer, float threshold) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int counter;
__shared__ int *shmem;
__shared__ T *vals;
if (threadIdx.x == 0){
extern __shared__ char mem[];
shmem = reinterpret_cast<int*>(mem);
vals = reinterpret_cast<T *>(shmem + blockDim.x);
counter = 0;
}
__syncthreads();
for (int i = tid; i < N; i += blockDim.x * gridDim.x) {
// all threads in block reading stuff
T val = dx[i];
T abs = nd4j::math::nd4j_abs<T>(val);
int byteId = i / 16 + 4;
int bitId = i % 16;
shmem[threadIdx.x] = 0;
vals[threadIdx.x] = val;
if (abs >= static_cast<T>(threshold)) {
shmem[threadIdx.x] = 1 << (bitId);
atomicAdd(&counter, 1);
if (val < static_cast<T>(0.0f)) {
shmem[threadIdx.x] |= 1 << (bitId + 16);
vals[threadIdx.x] += static_cast<T>(threshold);
} else {
vals[threadIdx.x] -= static_cast<T>(threshold);
}
} else if (abs >= static_cast<T>(threshold) / static_cast<T>(2.0f) && val < static_cast<T>(0.0f)) {
atomicAdd(&counter, 1);
shmem[threadIdx.x] = 1 << (bitId + 16);
vals[threadIdx.x] += static_cast<T>(threshold) / static_cast<T>(2.0f);
}
__syncthreads();
if (threadIdx.x % 16 == 0) {
int byte = 0;
for (int e = 0; e < 16; e++) {
if (i + e >= N)
continue;
byte |= shmem[threadIdx.x + e];
}
dz[byteId] = byte;
}
__syncthreads();
dx[i] = vals[threadIdx.x];
}
__syncthreads();
if (threadIdx.x == 0) {
atomicAdd(scalar, counter);
}
}
__global__ void cudaEncodeBitmapFloat(float *dx, Nd4jLong N, int *dz, int *scalar, int *reductionBuffer, float threshold) {
nd4j::cudaEncodeBitmapGeneric<float>(dx, N, dz, scalar, reductionBuffer, threshold);
}
__global__ void cudaEncodeBitmapDouble(double *dx, Nd4jLong N, int *dz, int *scalar, int *reductionBuffer, float threshold) {
nd4j::cudaEncodeBitmapGeneric<double>(dx, N, dz, scalar, reductionBuffer, threshold);
}
__global__ void cudaEncodeBitmapHalf(float16 *dx, Nd4jLong N, int *dz, int *scalar, int *reductionBuffer, float threshold) {
nd4j::cudaEncodeBitmapGeneric<float16>(dx, N, dz, scalar, reductionBuffer, threshold);
}
__global__ void cudaDecodeBitmapFloat(void *dx, Nd4jLong N, float *dz) {
nd4j::cudaDecodeBitmapGeneric<float>(dx, N, dz);
}
__global__ void cudaDecodeBitmapDouble(void *dx, Nd4jLong N, double *dz) {
nd4j::cudaDecodeBitmapGeneric<double>(dx, N, dz);
}
__global__ void cudaDecodeBitmapHalf(void *dx, Nd4jLong N, float16 *dz) {
nd4j::cudaDecodeBitmapGeneric<float16>(dx, N, dz);
}
__global__ void encoderKernelP1Float(void *dx, Nd4jLong N, void *dz, float threshold) {
nd4j::encoderKernelP1Generic<float>(dx, N, dz, threshold);
}
__global__ void encoderKernelP1Double(void *dx, Nd4jLong N, void *dz, float threshold) {
nd4j::encoderKernelP1Generic<double>(dx, N, dz, threshold);
}
__global__ void encoderKernelP1Half(void *dx, Nd4jLong N, void *dz, float threshold) {
nd4j::encoderKernelP1Generic<float16>(dx, N, dz, threshold);
}
__global__ void encoderKernelP2Float(int *dx, Nd4jLong N, int *dz) {
nd4j::encoderKernelP2Generic<float>(dx, N, dz);
}
__global__ void encoderKernelP3Float(void *dx, int *offsets, Nd4jLong N, void *dz) {
nd4j::encoderKernelP3Generic<float>(dx, offsets, N, dz);
}
__global__ void encoderKernelP3Double(void *dx, int *offsets, Nd4jLong N, void *dz) {
nd4j::encoderKernelP3Generic<double>(dx, offsets, N, dz);
}
__global__ void encoderKernelP3Half(void *dx, int *offsets, Nd4jLong N, void *dz) {
nd4j::encoderKernelP3Generic<float16>(dx, offsets, N, dz);
}
__global__ void decoderKernelFloat(void *dx, Nd4jLong N, void *dz) {
nd4j::decoderKernelGeneric<float>(dx, N, dz);
}
__global__ void decoderKernelDouble(void *dx, Nd4jLong N, void *dz) {
nd4j::decoderKernelGeneric<double>(dx, N, dz);
}
__global__ void decoderKernelHalf(void *dx, Nd4jLong N, void *dz) {
nd4j::decoderKernelGeneric<float16>(dx, N, dz);
}
template <bool storeSum, bool isNP2>
__host__ void prescanLauncher(dim3 &blocks, dim3 &threads, int shmem, cudaStream_t *stream, int *g_odata, const int *g_idata, int *g_blockSums, int n, int blockIndex, int baseIndex) {
prescan<storeSum, isNP2><<<blocks, threads, shmem, *stream>>>(g_odata, g_idata, g_blockSums, n, blockIndex, baseIndex);
};
template <typename S, typename T>
__global__ void convertKernel(void *dx, Nd4jLong N, void *dz) {
auto x = reinterpret_cast<S *>(dx);
auto z = reinterpret_cast<T *>(dz);
nd4j::convertKernelGeneric(x, N, z);
}
#define LIBND4J_BOOLS \
0, \
1
BUILD_DOUBLE_TEMPLATE(template void TypeCast::convertGenericCuda, (Nd4jPointer * extras, void *dx, Nd4jLong N, void *dz), LIBND4J_TYPES, LIBND4J_TYPES)
BUILD_DOUBLE_TEMPLATE(template void prescanLauncher, (dim3 &blocks, dim3 &threads, int shmem, cudaStream_t *stream, int *g_odata, const int *g_idata, int *g_blockSums, int n, int blockIndex, int baseIndex), LIBND4J_BOOLS, LIBND4J_BOOLS)
#undef LIBND4J_BOOLS
} |
b76345531a36457a002430f14d3220bb159669de.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THGeneral.h"
#include "THHGeneral.h"
#include "THHTensor.h"
#ifndef DIVUP
#define DIVUP(x, y) (((x) + (y) - 1) / (y))
#endif
static void THCudaTensor_computesz(THCudaTensor *self, long **sz_, long **st_)
{
long *sz, *st, *szh;
int i;
THCudaCheck(hipMalloc(&sz, sizeof(long)*self->nDimension));
THCudaCheck(hipMalloc(&st, sizeof(long)*self->nDimension));
szh = (long*)THAlloc(sizeof(long)*self->nDimension);
for(i = self->nDimension-1; i >= 0; i--)
{
if(i == self->nDimension-1)
szh[i] = 1;
else
szh[i] = szh[i+1]*self->size[i+1];
}
THCudaCheck(hipMemcpy(sz, szh, self->nDimension * sizeof(long), hipMemcpyHostToDevice));
THCudaCheck(hipMemcpy(st, self->stride, self->nDimension * sizeof(long), hipMemcpyHostToDevice));
THFree(szh);
*sz_ = sz;
*st_ = st;
}
__global__ void THCudaTensor_kernel_copy(float *dst,
long *dst_sz, long *dst_st, int dst_dim,
float *src,
long *src_sz, long *src_st, int src_dim,
long n_elem, long innerdim)
{
long k = (blockIdx.z * gridDim.x * gridDim.y + blockIdx.y * gridDim.x + blockIdx.x)*blockDim.y + threadIdx.y;
long i_start = threadIdx.x * src_st[src_dim-1];
long i_step = blockDim.x * src_st[src_dim-1];
long o_start = threadIdx.x * dst_st[dst_dim-1];
long o_step = blockDim.x * dst_st[dst_dim-1];
long o_end = innerdim * dst_st[dst_dim-1];
if ( ((k+1) * innerdim) <= n_elem) // too safe
{
long dst_idx = 0;
long dst_rest = k * innerdim;
for(int dim = 0; dim < dst_dim; dim++)
{
dst_idx += (dst_rest/dst_sz[dim])*dst_st[dim];
dst_rest = dst_rest % dst_sz[dim];
}
long src_idx = 0;
long src_rest = k * innerdim;
for(int dim = 0; dim < src_dim; dim++)
{
src_idx += (src_rest/src_sz[dim])*src_st[dim];
src_rest = src_rest % src_sz[dim];
}
for (int i=i_start, o=o_start; o<o_end; i+=i_step, o+=o_step) {
dst[dst_idx + o] = src[src_idx + i];
}
}
}
THC_API void THCudaTensor_copy(THCudaTensor *self, THCudaTensor *src)
{
THArgCheck(THCudaTensor_nElement(self) == THCudaTensor_nElement(src), 2, "sizes do not match");
if(THCudaTensor_isContiguous(self) && THCudaTensor_isContiguous(src))
THCudaCheck(hipMemcpy(self->storage->data + self->storageOffset, src->storage->data + src->storageOffset, THCudaTensor_nElement(src) * sizeof(float), hipMemcpyDeviceToDevice));
else
{
long *d_self_sz, *d_self_st, *d_src_sz, *d_src_st;
long size = THCudaTensor_nElement(self);
long ndims = self->nDimension;
long innermostdim = self->size[ndims-1];
THCudaTensor_computesz(self, &d_self_sz, &d_self_st);
THCudaTensor_computesz(src, &d_src_sz, &d_src_st);
dim3 threads(16,16);
int nblocks = ceil((float)size / (16 * innermostdim ));
// if nblocks greater than 65535 then we need to open a second dimension
#define __MAX_NUM_BLOCKS_PER_GRID_DIM__ 65535
/* The configuration below can deal with Tensors
* of size up to 65535 * 65535 * 65535 * 16 elements.
*/
int nblocks_x = (nblocks > __MAX_NUM_BLOCKS_PER_GRID_DIM__) ? __MAX_NUM_BLOCKS_PER_GRID_DIM__ : nblocks;
int number_blocks_dim_x = DIVUP(nblocks, nblocks_x);
int nblocks_y = (number_blocks_dim_x > __MAX_NUM_BLOCKS_PER_GRID_DIM__) ? __MAX_NUM_BLOCKS_PER_GRID_DIM__ : number_blocks_dim_x;
int number_blocks_dim_y = DIVUP(nblocks, nblocks_x * nblocks_y);
int nblocks_z = number_blocks_dim_y;
dim3 grid(nblocks_x, nblocks_y, nblocks_z);
hipLaunchKernelGGL(( THCudaTensor_kernel_copy), dim3(grid), dim3(threads), 0, 0, THCudaTensor_data(self),
d_self_sz, d_self_st, ndims,
THCudaTensor_data(src),
d_src_sz, d_src_st, src->nDimension,
size, innermostdim);
hipError_t errcode = hipGetLastError();
if(errcode != hipSuccess)
THError(hipGetErrorString(errcode));
THCudaCheck(hipFree(d_self_sz));
THCudaCheck(hipFree(d_self_st));
THCudaCheck(hipFree(d_src_sz));
THCudaCheck(hipFree(d_src_st));
}
}
| b76345531a36457a002430f14d3220bb159669de.cu | #include "THGeneral.h"
#include "THCGeneral.h"
#include "THCTensor.h"
#ifndef DIVUP
#define DIVUP(x, y) (((x) + (y) - 1) / (y))
#endif
static void THCudaTensor_computesz(THCudaTensor *self, long **sz_, long **st_)
{
long *sz, *st, *szh;
int i;
THCudaCheck(cudaMalloc(&sz, sizeof(long)*self->nDimension));
THCudaCheck(cudaMalloc(&st, sizeof(long)*self->nDimension));
szh = (long*)THAlloc(sizeof(long)*self->nDimension);
for(i = self->nDimension-1; i >= 0; i--)
{
if(i == self->nDimension-1)
szh[i] = 1;
else
szh[i] = szh[i+1]*self->size[i+1];
}
THCudaCheck(cudaMemcpy(sz, szh, self->nDimension * sizeof(long), cudaMemcpyHostToDevice));
THCudaCheck(cudaMemcpy(st, self->stride, self->nDimension * sizeof(long), cudaMemcpyHostToDevice));
THFree(szh);
*sz_ = sz;
*st_ = st;
}
__global__ void THCudaTensor_kernel_copy(float *dst,
long *dst_sz, long *dst_st, int dst_dim,
float *src,
long *src_sz, long *src_st, int src_dim,
long n_elem, long innerdim)
{
long k = (blockIdx.z * gridDim.x * gridDim.y + blockIdx.y * gridDim.x + blockIdx.x)*blockDim.y + threadIdx.y;
long i_start = threadIdx.x * src_st[src_dim-1];
long i_step = blockDim.x * src_st[src_dim-1];
long o_start = threadIdx.x * dst_st[dst_dim-1];
long o_step = blockDim.x * dst_st[dst_dim-1];
long o_end = innerdim * dst_st[dst_dim-1];
if ( ((k+1) * innerdim) <= n_elem) // too safe
{
long dst_idx = 0;
long dst_rest = k * innerdim;
for(int dim = 0; dim < dst_dim; dim++)
{
dst_idx += (dst_rest/dst_sz[dim])*dst_st[dim];
dst_rest = dst_rest % dst_sz[dim];
}
long src_idx = 0;
long src_rest = k * innerdim;
for(int dim = 0; dim < src_dim; dim++)
{
src_idx += (src_rest/src_sz[dim])*src_st[dim];
src_rest = src_rest % src_sz[dim];
}
for (int i=i_start, o=o_start; o<o_end; i+=i_step, o+=o_step) {
dst[dst_idx + o] = src[src_idx + i];
}
}
}
THC_API void THCudaTensor_copy(THCudaTensor *self, THCudaTensor *src)
{
THArgCheck(THCudaTensor_nElement(self) == THCudaTensor_nElement(src), 2, "sizes do not match");
if(THCudaTensor_isContiguous(self) && THCudaTensor_isContiguous(src))
THCudaCheck(cudaMemcpy(self->storage->data + self->storageOffset, src->storage->data + src->storageOffset, THCudaTensor_nElement(src) * sizeof(float), cudaMemcpyDeviceToDevice));
else
{
long *d_self_sz, *d_self_st, *d_src_sz, *d_src_st;
long size = THCudaTensor_nElement(self);
long ndims = self->nDimension;
long innermostdim = self->size[ndims-1];
THCudaTensor_computesz(self, &d_self_sz, &d_self_st);
THCudaTensor_computesz(src, &d_src_sz, &d_src_st);
dim3 threads(16,16);
int nblocks = ceil((float)size / (16 * innermostdim ));
// if nblocks greater than 65535 then we need to open a second dimension
#define __MAX_NUM_BLOCKS_PER_GRID_DIM__ 65535
/* The configuration below can deal with Tensors
* of size up to 65535 * 65535 * 65535 * 16 elements.
*/
int nblocks_x = (nblocks > __MAX_NUM_BLOCKS_PER_GRID_DIM__) ? __MAX_NUM_BLOCKS_PER_GRID_DIM__ : nblocks;
int number_blocks_dim_x = DIVUP(nblocks, nblocks_x);
int nblocks_y = (number_blocks_dim_x > __MAX_NUM_BLOCKS_PER_GRID_DIM__) ? __MAX_NUM_BLOCKS_PER_GRID_DIM__ : number_blocks_dim_x;
int number_blocks_dim_y = DIVUP(nblocks, nblocks_x * nblocks_y);
int nblocks_z = number_blocks_dim_y;
dim3 grid(nblocks_x, nblocks_y, nblocks_z);
THCudaTensor_kernel_copy<<<grid, threads>>>(THCudaTensor_data(self),
d_self_sz, d_self_st, ndims,
THCudaTensor_data(src),
d_src_sz, d_src_st, src->nDimension,
size, innermostdim);
cudaError errcode = cudaGetLastError();
if(errcode != cudaSuccess)
THError(cudaGetErrorString(errcode));
THCudaCheck(cudaFree(d_self_sz));
THCudaCheck(cudaFree(d_self_st));
THCudaCheck(cudaFree(d_src_sz));
THCudaCheck(cudaFree(d_src_st));
}
}
|
2d8146927caae8b3b35bc02a089d031de9a2adae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
#include <time.h>
#define LENGTH 256
using namespace std;
struct soa{
int *a;
int *b;
int *c;
};
__global__ void vector_add(int *a, int *b, int *c){
int i = threadIdx.x ;
if (i < LENGTH)
c[i] = a[i] + b[i]; // read
}
__host__ void vector_add_cpu(float a[], float b[], float *c){
for(int i=0 ; i< LENGTH ; i++){
c[i] = a[i] + b[i];
// std::cout << c[i] << std::endl;
}
}
int main(){
soa h_s;
int *d_s, *d_a, *d_b;
h_s.a = new int [LENGTH];
h_s.b = new int [LENGTH];
h_s.c = new int [LENGTH];
for(int i=0 ; i< LENGTH; i++){
h_s.a[i] = i;
h_s.b[i] = i;
}
hipMalloc((void**)&d_s, LENGTH*sizeof(int));
hipMalloc((void**)&d_a, LENGTH*sizeof(int));
hipMalloc((void**)&d_b, LENGTH*sizeof(int));
hipMemcpy(d_a, h_s.a, LENGTH*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_s.b, LENGTH*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_s, h_s.c, LENGTH*sizeof(int), hipMemcpyHostToDevice);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float milliseconds = 0;
float total_time = 0.0;
// for(int k=0 ; k< 1000 ; k++){
// hipEventRecord(start);
hipLaunchKernelGGL(( vector_add), dim3(LENGTH/128), dim3(128), 0, 0, d_a, d_b, d_s);
hipDeviceSynchronize();
// hipEventRecord(stop);
hipMemcpy(h_s.c, d_s, LENGTH*sizeof(int), hipMemcpyDeviceToHost);
// hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
total_time += milliseconds;
// }
std::cout << "Time taken : " << milliseconds << " Avg time : " << total_time / 1000 << std::endl;
for(int i=0; i<10 ;i++){
cout << h_s.c[i] << endl;
}
}
| 2d8146927caae8b3b35bc02a089d031de9a2adae.cu | #include <iostream>
#include <stdio.h>
#include <time.h>
#define LENGTH 256
using namespace std;
struct soa{
int *a;
int *b;
int *c;
};
__global__ void vector_add(int *a, int *b, int *c){
int i = threadIdx.x ;
if (i < LENGTH)
c[i] = a[i] + b[i]; // read
}
__host__ void vector_add_cpu(float a[], float b[], float *c){
for(int i=0 ; i< LENGTH ; i++){
c[i] = a[i] + b[i];
// std::cout << c[i] << std::endl;
}
}
int main(){
soa h_s;
int *d_s, *d_a, *d_b;
h_s.a = new int [LENGTH];
h_s.b = new int [LENGTH];
h_s.c = new int [LENGTH];
for(int i=0 ; i< LENGTH; i++){
h_s.a[i] = i;
h_s.b[i] = i;
}
cudaMalloc((void**)&d_s, LENGTH*sizeof(int));
cudaMalloc((void**)&d_a, LENGTH*sizeof(int));
cudaMalloc((void**)&d_b, LENGTH*sizeof(int));
cudaMemcpy(d_a, h_s.a, LENGTH*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_s.b, LENGTH*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_s, h_s.c, LENGTH*sizeof(int), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
float total_time = 0.0;
// for(int k=0 ; k< 1000 ; k++){
// cudaEventRecord(start);
vector_add<<<LENGTH/128, 128>>>(d_a, d_b, d_s);
cudaDeviceSynchronize();
// cudaEventRecord(stop);
cudaMemcpy(h_s.c, d_s, LENGTH*sizeof(int), cudaMemcpyDeviceToHost);
// cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
total_time += milliseconds;
// }
std::cout << "Time taken : " << milliseconds << " Avg time : " << total_time / 1000 << std::endl;
for(int i=0; i<10 ;i++){
cout << h_s.c[i] << endl;
}
}
|
86c4b5fb86d2ee917933d1f410c50a4d4491ec89.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Implementes the math functions for CPU.
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/system/hip/detail/par.h>
#include <thrust/version.h>
#include "math.h"
#include "context_gpu.h"
static_assert(THRUST_VERSION >= 100800,
"kurff requires a thrust version > 1.8.");
namespace kurff {
namespace math {
// TODO(Yangqing): Yuck again. Maybe change it to templated functors?
#define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Funcname, function) \
__global__ \
void _Kernel_##T##_##Funcname(const int N, const T* x, T* y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
y[i] = function(x[i]); \
} \
} \
template <> \
void Funcname<T, CUDAContext>( \
const int N, const T* x, T* y, \
CUDAContext* context) { \
hipLaunchKernelGGL(( _Kernel_##T##_##Funcname), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), \
0, context->cuda_stream(), \
N, x, y); \
}
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Exp, exp)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Log, log)
__device__ float cuda_sqrf(const float x) { return x * x; }
__device__ double cuda_sqr(const double x) { return x * x; }
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, cuda_sqrf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Sqr, cuda_sqr)
#define DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(T, Funcname, function) \
__global__ \
void _Kernel_##T##_##Funcname( \
const int N, const T* a, const T* b, T* y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
y[i] = function(a[i], b[i]); \
} \
} \
template <> \
void Funcname<T, CUDAContext>( \
const int N, const T* a, const T* b, T* y, \
CUDAContext* context) { \
hipLaunchKernelGGL(( _Kernel_##T##_##Funcname), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), \
0, context->cuda_stream(), \
N, a, b, y); \
}
#define CAFFE_MATH_CUDA_ADD(x, y) (x + y)
#define CAFFE_MATH_CUDA_SUB(x, y) (x - y)
#define CAFFE_MATH_CUDA_MUL(x, y) (x * y)
#define CAFFE_MATH_CUDA_DIV(x, y) (x / y)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Add, CAFFE_MATH_CUDA_ADD)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Add, CAFFE_MATH_CUDA_ADD)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Sub, CAFFE_MATH_CUDA_SUB)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Sub, CAFFE_MATH_CUDA_SUB)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Mul, CAFFE_MATH_CUDA_MUL)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Mul, CAFFE_MATH_CUDA_MUL)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Div, CAFFE_MATH_CUDA_DIV)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Div, CAFFE_MATH_CUDA_DIV)
/*
#define CAFFE2_SPECIALIZED_ROWWISEMAX(T) \
template <> \
void RowwiseMax<T, CPUContext>( \
const int N, const int D, const T* x, T* y, CPUContext* context) { \
for (int i = 0; i < N; ++i) { \
y[i] = x[i*D]; \
for (int j = 1; j < D; ++j) { \
y[i] = ::max(y[i], x[i * D + j]); \
} \
} \
}
CAFFE2_SPECIALIZED_ROWWISEMAX(float)
#define CAFFE2_SPECIALIZED_COLWISEMAX(T) \
template <> \
void ColwiseMax<T, CPUContext>( \
const int N, const int D, const T* x, T* y, CPUContext* context) { \
memcpy(y, x, sizeof(T) * D); \
for (int i = 1; i < N; ++i) { \
for (int j = 0; j < D; ++j) { \
y[j] = ::max(y[j], x[i * D + j]); \
} \
} \
}
CAFFE2_SPECIALIZED_COLWISEMAX(float)
*/
namespace {
template<typename T>
__global__ void AddToRowKernel(const int M, const int N, const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(i, M * N) {
y[i] += x[i % N];
}
}
template<typename T>
__global__ void AddToColKernel(const int M, const int N, const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(i, M * N) {
y[i] += x[i % M];
}
}
} // namespace
template <>
void AddToRow<float, CUDAContext>(
const int M, const int N, const float* x, float* y, CUDAContext* context) {
hipLaunchKernelGGL(( AddToRowKernel<float>), dim3(CAFFE_GET_BLOCKS(M * N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), M, N, x, y);
}
template <>
void AddToCol<float, CUDAContext>(
const int M, const int N, const float* x, float* y, CUDAContext* context) {
hipLaunchKernelGGL(( AddToColKernel<float>), dim3(CAFFE_GET_BLOCKS(M * N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), M, N, x, y);
}
// Caffe2 gemm provides a simpler interface to the gemm functions, with the
// limitation that the data has to be contiguous in memory.
template <>
void Gemm<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB,
const int M, const int N, const int K, const float alpha, const float* A,
const float* B, const float beta, float* C, CUDAContext* context) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(context->cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void Gemv<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha,
const float* A, const float* x, const float beta, float* y,
CUDAContext* context) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(context->cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
namespace {
template <typename T>
__global__ void SetKernel(const int N, const T alpha, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = alpha;
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_SET(T) \
template <> \
void Set<T, CUDAContext>(const int N, const T alpha, T *Y, \
CUDAContext* context) { \
hipLaunchKernelGGL(( SetKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), \
0, context->cuda_stream(), N, alpha, Y); \
}
CAFFE2_SPECIALIZED_CUDA_SET(float);
CAFFE2_SPECIALIZED_CUDA_SET(double);
CAFFE2_SPECIALIZED_CUDA_SET(int);
#undef CAFFE2_SPECIALIZED_CUDA_SET
namespace {
template <typename T>
__global__ void UniformShift(const int N, const T min, const T max,
T* x) {
T scale = max - min;
CUDA_1D_KERNEL_LOOP(i, N) {
x[i] = x[i] * scale + min;
}
}
} // namespace
template <>
void RandUniform<float, CUDAContext>(
const int n, const float min, const float max, float* r,
CUDAContext* context) {
CURAND_CHECK(hiprandGenerateUniform(context->curand_generator(), r, n));
hipLaunchKernelGGL(( UniformShift<float>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), n, min, max, r);
}
template <>
void RandUniform<double, CUDAContext>(
const int n, const double min, const double max, double* r,
CUDAContext* context) {
CURAND_CHECK(hiprandGenerateUniformDouble(context->curand_generator(), r, n));
hipLaunchKernelGGL(( UniformShift<double>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), n, min, max, r);
}
template <>
void RandGaussian<float, CUDAContext>(
const int n, const float mean, const float std, float* r,
CUDAContext* context) {
CURAND_CHECK(hiprandGenerateNormal(
context->curand_generator(), r, n, mean, std));
}
template <>
void RandGaussian<double, CUDAContext>(
const int n, const double mean, const double std, double* r,
CUDAContext* context) {
CURAND_CHECK(hiprandGenerateNormalDouble(
context->curand_generator(), r, n, mean, std));
}
template<>
void Dot<float, CUDAContext>(
const int n, const float* a, const float* b, float* y,
CUDAContext* context) {
float result;
CUBLAS_CHECK(hipblasSdot(context->cublas_handle(), n, a, 1, b, 1, &result));
context->Copy<float, CPUContext, CUDAContext>(1, &result, y);
}
template<>
void Dot<double, CUDAContext>(
const int n, const double* a, const double* b, double* y,
CUDAContext* context) {
double result;
CUBLAS_CHECK(hipblasDdot(context->cublas_handle(), n, a, 1, b, 1, y));
context->Copy<double, CPUContext, CUDAContext>(1, &result, y);
}
#define CAFFE2_MATH_SUM_FUNC(T) \
template<> \
void Sum<T, CUDAContext>(const int N, const T* x, T* y, CUDAContext* context) {\
thrust::device_ptr<const T> dev_ptr(x); \
T result = thrust::reduce( \
thrust::hip::par.on(context->cuda_stream()), \
dev_ptr, dev_ptr + N, static_cast<T>(0), thrust::plus<T>()); \
context->Copy<T, CPUContext, CUDAContext>(1, &result, y); \
}
CAFFE2_MATH_SUM_FUNC(float)
CAFFE2_MATH_SUM_FUNC(double)
#undef CAFFE2_MATH_SUM_FUNC
namespace {
template <typename T>
__global__ void SelectKernel(
const int N, const int D, const float* x, const int* idx, float* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = x[i * D + idx[i]];
}
}
} // namespace
template <>
void Select<float, CUDAContext>(
const int N, const int D, const float* x, const int* idx, float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( SelectKernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), N, D, x, idx, y);
}
namespace {
template <typename T>
__global__ void ScaleKernel(
const int n, const T alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = x[i] * alpha;
}
}
template <typename T>
__global__ void ScaleKernelDeviceAlpha(
const int n, const T* alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = x[i] * (*alpha);
}
}
} // namespace
template <>
void Scale<float, CUDAContext>(
const int n, const float alpha, const float *x, float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( ScaleKernel<float>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), n, alpha, x, y);
}
template <>
void Scale<double, CUDAContext>(
const int n, const double alpha, const double *x, double* y,
CUDAContext* context) {
hipLaunchKernelGGL(( ScaleKernel<double>),
dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(),
n, alpha, x, y);
}
template <>
void Scale<float, CUDAContext>(
const int n, const float* alpha, const float *x, float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( ScaleKernelDeviceAlpha<float>),
dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(),
n, alpha, x, y);
}
template <>
void Scale<double, CUDAContext>(
const int n, const double* alpha, const double *x, double* y,
CUDAContext* context) {
hipLaunchKernelGGL(( ScaleKernelDeviceAlpha<double>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), n, alpha, x, y);
}
template <>
void Axpy<float, CUDAContext>(const int N, const float alpha, const float* X,
float* Y, CUDAContext* context) {
CUBLAS_CHECK(hipblasSaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void Axpy<double, CUDAContext>(
const int N, const double alpha, const double* X,
double* Y, CUDAContext* context) {
CUBLAS_CHECK(hipblasDaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1));
}
namespace {
template <typename T>
__global__ void AxpyKernel(const int n, const T* a, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(index, n) {
y[index] += x[index] * (*a);
}
}
} // namespace
template <>
void Axpy<float, CUDAContext>(
const int n, const float* alpha, const float* X,
float* Y, CUDAContext* context) {
hipLaunchKernelGGL(( AxpyKernel<float>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), n, alpha, X, Y);
}
template <>
void Axpy<double, CUDAContext>(
const int n, const double* alpha, const double* X,
double* Y, CUDAContext* context) {
hipLaunchKernelGGL(( AxpyKernel<double>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), n, alpha, X, Y);
}
namespace {
template <typename T>
__global__ void AxpbyKernel(const int n, const T a, const T* x,
const T b, T* y) {
CUDA_1D_KERNEL_LOOP(index, n) {
y[index] = x[index] * a + y[index] * b;
}
}
} // namespace
template <>
void Axpby<float, CUDAContext>(
const int n, const float a, const float* x, const float b, float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( AxpbyKernel<float>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), n, a, x, b, y);
}
template <>
void Axpby<double, CUDAContext>(
const int n, const double a, const double* x, const double b, double* y,
CUDAContext* context) {
hipLaunchKernelGGL(( AxpbyKernel<double>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), n, a, x, b, y);
}
namespace {
template <typename T>
__global__ void im2col_gpu_kernel_nchw(const int n, const T* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
T* data_col) {
CUDA_1D_KERNEL_LOOP(index, n) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * kernel_h * kernel_w;
int h_in = h_out * stride_h - pad_t;
int w_in = w_out * stride_w - pad_l;
T* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const T* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename T>
__global__ void im2col_gpu_kernel_nhwc(const int n, const T* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int width_col, const int channels,
T* data_col) {
CUDA_1D_KERNEL_LOOP(index, n) {
int channel_in = index % channels;
int w_out = index / channels % width_col;
int h_out = index / channels / width_col;
int h_in = h_out * stride_h - pad_t;
int w_in = w_out * stride_w - pad_l;
T* local_data_col = data_col +
((h_out * width_col) + w_out) * channels * kernel_h * kernel_w
+ channel_in;
for (int i = 0; i < kernel_h; ++i) {
int h = h_in + i;
for (int j = 0; j < kernel_w; ++j) {
int w = w_in + j;
*local_data_col = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im[(h * width + w) * channels + channel_in] : 0;
local_data_col += channels;
}
}
}
}
template <typename T>
__global__ void col2im_gpu_kernel_nchw(const int n, const T* data_col,
const int height, const int width,
const int patch_h, const int patch_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
T* data_im) {
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
int w = index % width + pad_l;
int h = (index / width) % height + pad_t;
int c = index / (width * height);
// compute the start and end of the output
int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
int offset =
(c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;
int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;
int coeff_w_col = (1 - stride_w * height_col * width_col);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
}
}
data_im[index] = val;
}
}
template <typename T>
__global__ void col2im_gpu_kernel_nhwc(const int n, const T* data_col,
const int width, const int channels,
const int patch_h, const int patch_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
T* data_im) {
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
int c = index % channels;
int w = index / channels % width + pad_l;
int h = index / channels / width + pad_t;
// compute the start and end of the output
int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
int channels_col = patch_h * patch_w * channels;
/*
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int c_col = ((h - h_col * stride_h) * patch_w + w - w_col * stride_w) * channels + c;
val += data_col[(h_col * width_col + w_col) * channels_col + c_col];
}
}
*/
// Equivalent of above
int offset = (h * patch_w + w) * channels + c;
int coeff_h_col = width_col * channels_col - stride_h * patch_w * channels;
int coeff_w_col = channels_col - stride_w * channels;
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
}
}
data_im[index] = val;
}
}
} // namespace
template <>
void Im2col<float, CUDAContext, StorageOrder::NCHW>(
const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_col, CUDAContext* context) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + pad_t + pad_b - kernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - kernel_w) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( im2col_gpu_kernel_nchw<float>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0,
context->cuda_stream(),
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_t,
pad_l, stride_h, stride_w, height_col, width_col, data_col);
}
template <>
void Im2col<float, CUDAContext, StorageOrder::NHWC>(
const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_col, CUDAContext* context) {
// We are going to launch height_col * width_col * channels kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + pad_t + pad_b - kernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - kernel_w) / stride_w + 1;
int num_kernels = height_col * width_col * channels;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( im2col_gpu_kernel_nhwc<float>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0,
context->cuda_stream(),
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_t,
pad_l, stride_h, stride_w, width_col, channels, data_col);
}
template <>
void Col2im<float, CUDAContext, StorageOrder::NCHW>(
const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_im, CUDAContext* context) {
int height_col = (height + pad_t + pad_b - kernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - kernel_w) / stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
hipLaunchKernelGGL(( col2im_gpu_kernel_nchw<float>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0,
context->cuda_stream(),
num_kernels, data_col, height, width, kernel_h, kernel_w,
pad_t, pad_l, stride_h, stride_w,
height_col, width_col, data_im);
}
template <>
void Col2im<float, CUDAContext, StorageOrder::NHWC>(
const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_im, CUDAContext* context) {
int height_col = (height + pad_t + pad_b - kernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - kernel_w) / stride_w + 1;
int num_kernels = height * width * channels;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
hipLaunchKernelGGL(( col2im_gpu_kernel_nhwc<float>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0,
context->cuda_stream(),
num_kernels, data_col, width, channels, kernel_h, kernel_w,
pad_t, pad_l, stride_h, stride_w, height_col, width_col, data_im);
}
namespace {
__global__ void CopyMatrixKernel(
const int M, const int N, const char* A, const int lda,
char* B, const int ldb) {
CUDA_1D_KERNEL_LOOP(i, M * N) {
int r = i / N;
int c = i % N;
B[r * ldb + c] = A[r * lda + c];
}
}
} // namespace
template <>
void CopyMatrix<CUDAContext>(
const size_t itemsize, const int M, const int N, const void* A,
const int lda, void* B, const int ldb, CUDAContext* context) {
hipLaunchKernelGGL(( CopyMatrixKernel), dim3(CAFFE_GET_BLOCKS(M * N)), dim3(CAFFE_CUDA_NUM_THREADS), 0,
context->cuda_stream(),
M, N * itemsize, static_cast<const char*>(A), lda * itemsize,
static_cast<char*>(B), ldb * itemsize);
}
} // namespace math
} // namespace caffe2
| 86c4b5fb86d2ee917933d1f410c50a4d4491ec89.cu | // Implementes the math functions for CPU.
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/system/cuda/detail/par.h>
#include <thrust/version.h>
#include "math.h"
#include "context_gpu.h"
static_assert(THRUST_VERSION >= 100800,
"kurff requires a thrust version > 1.8.");
namespace kurff {
namespace math {
// TODO(Yangqing): Yuck again. Maybe change it to templated functors?
#define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Funcname, function) \
__global__ \
void _Kernel_##T##_##Funcname(const int N, const T* x, T* y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
y[i] = function(x[i]); \
} \
} \
template <> \
void Funcname<T, CUDAContext>( \
const int N, const T* x, T* y, \
CUDAContext* context) { \
_Kernel_##T##_##Funcname<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, \
0, context->cuda_stream()>>>( \
N, x, y); \
}
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Exp, exp)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Log, log)
__device__ float cuda_sqrf(const float x) { return x * x; }
__device__ double cuda_sqr(const double x) { return x * x; }
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, cuda_sqrf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Sqr, cuda_sqr)
#define DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(T, Funcname, function) \
__global__ \
void _Kernel_##T##_##Funcname( \
const int N, const T* a, const T* b, T* y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
y[i] = function(a[i], b[i]); \
} \
} \
template <> \
void Funcname<T, CUDAContext>( \
const int N, const T* a, const T* b, T* y, \
CUDAContext* context) { \
_Kernel_##T##_##Funcname<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, \
0, context->cuda_stream()>>>( \
N, a, b, y); \
}
#define CAFFE_MATH_CUDA_ADD(x, y) (x + y)
#define CAFFE_MATH_CUDA_SUB(x, y) (x - y)
#define CAFFE_MATH_CUDA_MUL(x, y) (x * y)
#define CAFFE_MATH_CUDA_DIV(x, y) (x / y)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Add, CAFFE_MATH_CUDA_ADD)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Add, CAFFE_MATH_CUDA_ADD)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Sub, CAFFE_MATH_CUDA_SUB)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Sub, CAFFE_MATH_CUDA_SUB)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Mul, CAFFE_MATH_CUDA_MUL)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Mul, CAFFE_MATH_CUDA_MUL)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Div, CAFFE_MATH_CUDA_DIV)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Div, CAFFE_MATH_CUDA_DIV)
/*
#define CAFFE2_SPECIALIZED_ROWWISEMAX(T) \
template <> \
void RowwiseMax<T, CPUContext>( \
const int N, const int D, const T* x, T* y, CPUContext* context) { \
for (int i = 0; i < N; ++i) { \
y[i] = x[i*D]; \
for (int j = 1; j < D; ++j) { \
y[i] = std::max(y[i], x[i * D + j]); \
} \
} \
}
CAFFE2_SPECIALIZED_ROWWISEMAX(float)
#define CAFFE2_SPECIALIZED_COLWISEMAX(T) \
template <> \
void ColwiseMax<T, CPUContext>( \
const int N, const int D, const T* x, T* y, CPUContext* context) { \
memcpy(y, x, sizeof(T) * D); \
for (int i = 1; i < N; ++i) { \
for (int j = 0; j < D; ++j) { \
y[j] = std::max(y[j], x[i * D + j]); \
} \
} \
}
CAFFE2_SPECIALIZED_COLWISEMAX(float)
*/
namespace {
template<typename T>
__global__ void AddToRowKernel(const int M, const int N, const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(i, M * N) {
y[i] += x[i % N];
}
}
template<typename T>
__global__ void AddToColKernel(const int M, const int N, const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(i, M * N) {
y[i] += x[i % M];
}
}
} // namespace
template <>
void AddToRow<float, CUDAContext>(
const int M, const int N, const float* x, float* y, CUDAContext* context) {
AddToRowKernel<float><<<CAFFE_GET_BLOCKS(M * N), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(M, N, x, y);
}
template <>
void AddToCol<float, CUDAContext>(
const int M, const int N, const float* x, float* y, CUDAContext* context) {
AddToColKernel<float><<<CAFFE_GET_BLOCKS(M * N), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(M, N, x, y);
}
// Caffe2 gemm provides a simpler interface to the gemm functions, with the
// limitation that the data has to be contiguous in memory.
template <>
void Gemm<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB,
const int M, const int N, const int K, const float alpha, const float* A,
const float* B, const float beta, float* C, CUDAContext* context) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(context->cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void Gemv<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha,
const float* A, const float* x, const float beta, float* y,
CUDAContext* context) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(context->cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
namespace {
template <typename T>
__global__ void SetKernel(const int N, const T alpha, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = alpha;
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_SET(T) \
template <> \
void Set<T, CUDAContext>(const int N, const T alpha, T *Y, \
CUDAContext* context) { \
SetKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, \
0, context->cuda_stream()>>>(N, alpha, Y); \
}
CAFFE2_SPECIALIZED_CUDA_SET(float);
CAFFE2_SPECIALIZED_CUDA_SET(double);
CAFFE2_SPECIALIZED_CUDA_SET(int);
#undef CAFFE2_SPECIALIZED_CUDA_SET
namespace {
template <typename T>
__global__ void UniformShift(const int N, const T min, const T max,
T* x) {
T scale = max - min;
CUDA_1D_KERNEL_LOOP(i, N) {
x[i] = x[i] * scale + min;
}
}
} // namespace
template <>
void RandUniform<float, CUDAContext>(
const int n, const float min, const float max, float* r,
CUDAContext* context) {
CURAND_CHECK(curandGenerateUniform(context->curand_generator(), r, n));
UniformShift<float><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(n, min, max, r);
}
template <>
void RandUniform<double, CUDAContext>(
const int n, const double min, const double max, double* r,
CUDAContext* context) {
CURAND_CHECK(curandGenerateUniformDouble(context->curand_generator(), r, n));
UniformShift<double><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(n, min, max, r);
}
template <>
void RandGaussian<float, CUDAContext>(
const int n, const float mean, const float std, float* r,
CUDAContext* context) {
CURAND_CHECK(curandGenerateNormal(
context->curand_generator(), r, n, mean, std));
}
template <>
void RandGaussian<double, CUDAContext>(
const int n, const double mean, const double std, double* r,
CUDAContext* context) {
CURAND_CHECK(curandGenerateNormalDouble(
context->curand_generator(), r, n, mean, std));
}
template<>
void Dot<float, CUDAContext>(
const int n, const float* a, const float* b, float* y,
CUDAContext* context) {
float result;
CUBLAS_CHECK(cublasSdot(context->cublas_handle(), n, a, 1, b, 1, &result));
context->Copy<float, CPUContext, CUDAContext>(1, &result, y);
}
template<>
void Dot<double, CUDAContext>(
const int n, const double* a, const double* b, double* y,
CUDAContext* context) {
double result;
CUBLAS_CHECK(cublasDdot(context->cublas_handle(), n, a, 1, b, 1, y));
context->Copy<double, CPUContext, CUDAContext>(1, &result, y);
}
#define CAFFE2_MATH_SUM_FUNC(T) \
template<> \
void Sum<T, CUDAContext>(const int N, const T* x, T* y, CUDAContext* context) {\
thrust::device_ptr<const T> dev_ptr(x); \
T result = thrust::reduce( \
thrust::cuda::par.on(context->cuda_stream()), \
dev_ptr, dev_ptr + N, static_cast<T>(0), thrust::plus<T>()); \
context->Copy<T, CPUContext, CUDAContext>(1, &result, y); \
}
CAFFE2_MATH_SUM_FUNC(float)
CAFFE2_MATH_SUM_FUNC(double)
#undef CAFFE2_MATH_SUM_FUNC
namespace {
template <typename T>
__global__ void SelectKernel(
const int N, const int D, const float* x, const int* idx, float* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = x[i * D + idx[i]];
}
}
} // namespace
template <>
void Select<float, CUDAContext>(
const int N, const int D, const float* x, const int* idx, float* y,
CUDAContext* context) {
SelectKernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(N, D, x, idx, y);
}
namespace {
template <typename T>
__global__ void ScaleKernel(
const int n, const T alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = x[i] * alpha;
}
}
template <typename T>
__global__ void ScaleKernelDeviceAlpha(
const int n, const T* alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = x[i] * (*alpha);
}
}
} // namespace
template <>
void Scale<float, CUDAContext>(
const int n, const float alpha, const float *x, float* y,
CUDAContext* context) {
ScaleKernel<float><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(n, alpha, x, y);
}
template <>
void Scale<double, CUDAContext>(
const int n, const double alpha, const double *x, double* y,
CUDAContext* context) {
ScaleKernel<double><<<
CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(
n, alpha, x, y);
}
template <>
void Scale<float, CUDAContext>(
const int n, const float* alpha, const float *x, float* y,
CUDAContext* context) {
ScaleKernelDeviceAlpha<float><<<
CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(
n, alpha, x, y);
}
template <>
void Scale<double, CUDAContext>(
const int n, const double* alpha, const double *x, double* y,
CUDAContext* context) {
ScaleKernelDeviceAlpha<double><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(n, alpha, x, y);
}
template <>
void Axpy<float, CUDAContext>(const int N, const float alpha, const float* X,
float* Y, CUDAContext* context) {
CUBLAS_CHECK(cublasSaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void Axpy<double, CUDAContext>(
const int N, const double alpha, const double* X,
double* Y, CUDAContext* context) {
CUBLAS_CHECK(cublasDaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1));
}
namespace {
template <typename T>
__global__ void AxpyKernel(const int n, const T* a, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(index, n) {
y[index] += x[index] * (*a);
}
}
} // namespace
template <>
void Axpy<float, CUDAContext>(
const int n, const float* alpha, const float* X,
float* Y, CUDAContext* context) {
AxpyKernel<float><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(n, alpha, X, Y);
}
template <>
void Axpy<double, CUDAContext>(
const int n, const double* alpha, const double* X,
double* Y, CUDAContext* context) {
AxpyKernel<double><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(n, alpha, X, Y);
}
namespace {
template <typename T>
__global__ void AxpbyKernel(const int n, const T a, const T* x,
const T b, T* y) {
CUDA_1D_KERNEL_LOOP(index, n) {
y[index] = x[index] * a + y[index] * b;
}
}
} // namespace
template <>
void Axpby<float, CUDAContext>(
const int n, const float a, const float* x, const float b, float* y,
CUDAContext* context) {
AxpbyKernel<float><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(n, a, x, b, y);
}
template <>
void Axpby<double, CUDAContext>(
const int n, const double a, const double* x, const double b, double* y,
CUDAContext* context) {
AxpbyKernel<double><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(n, a, x, b, y);
}
namespace {
template <typename T>
__global__ void im2col_gpu_kernel_nchw(const int n, const T* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
T* data_col) {
CUDA_1D_KERNEL_LOOP(index, n) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * kernel_h * kernel_w;
int h_in = h_out * stride_h - pad_t;
int w_in = w_out * stride_w - pad_l;
T* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const T* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename T>
__global__ void im2col_gpu_kernel_nhwc(const int n, const T* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int width_col, const int channels,
T* data_col) {
CUDA_1D_KERNEL_LOOP(index, n) {
int channel_in = index % channels;
int w_out = index / channels % width_col;
int h_out = index / channels / width_col;
int h_in = h_out * stride_h - pad_t;
int w_in = w_out * stride_w - pad_l;
T* local_data_col = data_col +
((h_out * width_col) + w_out) * channels * kernel_h * kernel_w
+ channel_in;
for (int i = 0; i < kernel_h; ++i) {
int h = h_in + i;
for (int j = 0; j < kernel_w; ++j) {
int w = w_in + j;
*local_data_col = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im[(h * width + w) * channels + channel_in] : 0;
local_data_col += channels;
}
}
}
}
template <typename T>
__global__ void col2im_gpu_kernel_nchw(const int n, const T* data_col,
const int height, const int width,
const int patch_h, const int patch_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
T* data_im) {
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
int w = index % width + pad_l;
int h = (index / width) % height + pad_t;
int c = index / (width * height);
// compute the start and end of the output
int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
int offset =
(c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;
int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;
int coeff_w_col = (1 - stride_w * height_col * width_col);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
}
}
data_im[index] = val;
}
}
template <typename T>
__global__ void col2im_gpu_kernel_nhwc(const int n, const T* data_col,
const int width, const int channels,
const int patch_h, const int patch_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
T* data_im) {
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
int c = index % channels;
int w = index / channels % width + pad_l;
int h = index / channels / width + pad_t;
// compute the start and end of the output
int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
int channels_col = patch_h * patch_w * channels;
/*
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int c_col = ((h - h_col * stride_h) * patch_w + w - w_col * stride_w) * channels + c;
val += data_col[(h_col * width_col + w_col) * channels_col + c_col];
}
}
*/
// Equivalent of above
int offset = (h * patch_w + w) * channels + c;
int coeff_h_col = width_col * channels_col - stride_h * patch_w * channels;
int coeff_w_col = channels_col - stride_w * channels;
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
}
}
data_im[index] = val;
}
}
} // namespace
template <>
void Im2col<float, CUDAContext, StorageOrder::NCHW>(
const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_col, CUDAContext* context) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + pad_t + pad_b - kernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - kernel_w) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel_nchw<float><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS, 0,
context->cuda_stream()>>>(
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_t,
pad_l, stride_h, stride_w, height_col, width_col, data_col);
}
template <>
void Im2col<float, CUDAContext, StorageOrder::NHWC>(
const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_col, CUDAContext* context) {
// We are going to launch height_col * width_col * channels kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + pad_t + pad_b - kernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - kernel_w) / stride_w + 1;
int num_kernels = height_col * width_col * channels;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel_nhwc<float><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS, 0,
context->cuda_stream()>>>(
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_t,
pad_l, stride_h, stride_w, width_col, channels, data_col);
}
template <>
void Col2im<float, CUDAContext, StorageOrder::NCHW>(
const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_im, CUDAContext* context) {
int height_col = (height + pad_t + pad_b - kernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - kernel_w) / stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
col2im_gpu_kernel_nchw<float><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS, 0,
context->cuda_stream()>>>(
num_kernels, data_col, height, width, kernel_h, kernel_w,
pad_t, pad_l, stride_h, stride_w,
height_col, width_col, data_im);
}
template <>
void Col2im<float, CUDAContext, StorageOrder::NHWC>(
const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_im, CUDAContext* context) {
int height_col = (height + pad_t + pad_b - kernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - kernel_w) / stride_w + 1;
int num_kernels = height * width * channels;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
col2im_gpu_kernel_nhwc<float><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS, 0,
context->cuda_stream()>>>(
num_kernels, data_col, width, channels, kernel_h, kernel_w,
pad_t, pad_l, stride_h, stride_w, height_col, width_col, data_im);
}
namespace {
__global__ void CopyMatrixKernel(
const int M, const int N, const char* A, const int lda,
char* B, const int ldb) {
CUDA_1D_KERNEL_LOOP(i, M * N) {
int r = i / N;
int c = i % N;
B[r * ldb + c] = A[r * lda + c];
}
}
} // namespace
template <>
void CopyMatrix<CUDAContext>(
const size_t itemsize, const int M, const int N, const void* A,
const int lda, void* B, const int ldb, CUDAContext* context) {
CopyMatrixKernel<<<CAFFE_GET_BLOCKS(M * N), CAFFE_CUDA_NUM_THREADS, 0,
context->cuda_stream()>>>(
M, N * itemsize, static_cast<const char*>(A), lda * itemsize,
static_cast<char*>(B), ldb * itemsize);
}
} // namespace math
} // namespace caffe2
|
e0dab058fe93e0302d99ffe3edfb8945cea2554b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "orc_common.h"
#include "orc_gpu.h"
#if (__CUDACC_VER_MAJOR__ >= 9)
#define SHFL0(v) __shfl_sync(~0, v, 0)
#define SHFL(v, t) __shfl_sync(~0, v, t)
#define SYNCWARP() __syncwarp()
#define BALLOT(v) __ballot_sync(~0, v)
#else
#define SHFL0(v) __shfl(v, 0)
#define SHFL(v, t) __shfl(v, t)
#define SYNCWARP()
#define BALLOT(v) __ballot(v)
#endif
namespace cudf {
namespace io {
namespace orc {
namespace gpu {
struct compressed_stream_s
{
CompressedStreamInfo info;
};
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128, 8)
gpuParseCompressedStripeData(CompressedStreamInfo *strm_info, int32_t num_streams, uint32_t block_size, uint32_t log2maxcr)
{
__shared__ compressed_stream_s strm_g[4];
volatile compressed_stream_s * const s = &strm_g[threadIdx.x >> 5];
int strm_id = blockIdx.x * 4 + (threadIdx.x >> 5);
int t = threadIdx.x & 0x1f;
if (strm_id < num_streams && t < sizeof(CompressedStreamInfo) / sizeof(uint32_t))
{
// NOTE: Assumes that sizeof(CompressedStreamInfo) <= 128
((uint32_t *)&s->info)[t] = ((const uint32_t *)&strm_info[strm_id])[t];
}
__syncthreads();
if (strm_id < num_streams)
{
// Walk through the compressed blocks
const uint8_t *cur = s->info.compressed_data;
const uint8_t *end = cur + s->info.compressed_data_size;
gpu_inflate_input_s *decctl = s->info.decctl;
uint8_t *uncompressed = s->info.uncompressed_data;
size_t max_uncompressed_size = 0;
uint32_t num_compressed_blocks = 0;
while (cur + 3 < end)
{
uint32_t block_len = SHFL0((t == 0) ? cur[0] | (cur[1] << 8) | (cur[2] << 16) : 0);
uint32_t is_uncompressed = block_len & 1;
uint32_t uncompressed_size;
block_len >>= 1;
cur += 3;
if (block_len > block_size || cur + block_len > end)
{
// Fatal
num_compressed_blocks = 0;
max_uncompressed_size = 0;
break;
}
// TBD: For some codecs like snappy, it wouldn't be too difficult to get the actual uncompressed size and avoid waste due to block size alignment
// For now, rely on the max compression ratio to limit waste for the most extreme cases (small single-block streams)
uncompressed_size = (is_uncompressed) ? block_len : (block_len < (block_size >> log2maxcr)) ? block_len << log2maxcr : block_size;
if (is_uncompressed)
{
// Copy the uncompressed data to output (not very efficient, but should only occur for small blocks and probably faster than decompression)
if (uncompressed && max_uncompressed_size + uncompressed_size <= s->info.max_uncompressed_size)
{
for (int i = t; i < uncompressed_size; i += 32)
{
uncompressed[max_uncompressed_size + i] = cur[i];
}
}
}
else if (decctl && !t && num_compressed_blocks < s->info.max_compressed_blocks)
{
decctl[num_compressed_blocks].srcDevice = const_cast<uint8_t *>(cur);
decctl[num_compressed_blocks].srcSize = block_len;
decctl[num_compressed_blocks].dstDevice = uncompressed + max_uncompressed_size;
decctl[num_compressed_blocks].dstSize = uncompressed_size;
}
cur += block_len;
max_uncompressed_size += uncompressed_size;
num_compressed_blocks += 1 - is_uncompressed;
}
if (!t)
{
s->info.num_compressed_blocks = num_compressed_blocks;
s->info.max_uncompressed_size = max_uncompressed_size;
}
}
__syncthreads();
if (strm_id < num_streams && t < sizeof(CompressedStreamInfo) / sizeof(uint32_t))
{
// NOTE: Assumes that sizeof(CompressedStreamInfo) <= 128
((uint32_t *)&strm_info[strm_id])[t] = ((uint32_t *)&s->info)[t];
}
}
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128, 8)
gpuPostDecompressionReassemble(CompressedStreamInfo *strm_info, int32_t num_streams)
{
__shared__ compressed_stream_s strm_g[4];
volatile compressed_stream_s * const s = &strm_g[threadIdx.x >> 5];
int strm_id = blockIdx.x * 4 + (threadIdx.x >> 5);
int t = threadIdx.x & 0x1f;
if (strm_id < num_streams && t < sizeof(CompressedStreamInfo) / sizeof(uint32_t))
{
// NOTE: Assumes that sizeof(CompressedStreamInfo) <= 128
((uint32_t *)&s->info)[t] = ((const uint32_t *)&strm_info[strm_id])[t];
}
__syncthreads();
if (strm_id < num_streams && s->info.max_compressed_blocks > 0 && s->info.max_uncompressed_size > 0)
{
// Walk through the compressed blocks
const uint8_t *cur = s->info.compressed_data;
const uint8_t *end = cur + s->info.compressed_data_size;
const gpu_inflate_input_s *dec_in = s->info.decctl;
const gpu_inflate_status_s *dec_out = s->info.decstatus;
uint8_t *uncompressed_actual = s->info.uncompressed_data;
uint8_t *uncompressed_estimated = uncompressed_actual;
uint32_t num_compressed_blocks = 0;
uint32_t max_compressed_blocks = min(s->info.num_compressed_blocks, s->info.max_compressed_blocks);
while (cur + 3 < end)
{
uint32_t block_len = SHFL0((t == 0) ? cur[0] | (cur[1] << 8) | (cur[2] << 16) : 0);
uint32_t is_uncompressed = block_len & 1;
uint32_t uncompressed_size_est, uncompressed_size_actual;
block_len >>= 1;
cur += 3;
if (cur + block_len > end)
{
break;
}
if (is_uncompressed)
{
uncompressed_size_est = block_len;
uncompressed_size_actual = block_len;
}
else
{
if (num_compressed_blocks > max_compressed_blocks)
{
break;
}
if (SHFL0((t == 0) ? dec_out[num_compressed_blocks].status : 0) != 0)
{
// Decompression failed, not much point in doing anything else
break;
}
uncompressed_size_est = SHFL0((t == 0) ? *(const uint32_t *)&dec_in[num_compressed_blocks].dstSize : 0);
uncompressed_size_actual = SHFL0((t == 0) ? *(const uint32_t *)&dec_out[num_compressed_blocks].bytes_written : 0);
}
// In practice, this should never happen with a well-behaved writer, as we would expect the uncompressed size to always be equal to
// the compression block size except for the last block
if (uncompressed_actual < uncompressed_estimated)
{
// warp-level memmove
for (int i = t; i < (int)uncompressed_size_actual; i += 32)
{
uncompressed_actual[i] = uncompressed_estimated[i];
}
}
cur += block_len;
num_compressed_blocks += 1 - is_uncompressed;
uncompressed_estimated += uncompressed_size_est;
uncompressed_actual += uncompressed_size_actual;
}
// Update info with actual uncompressed size
if (!t)
{
size_t total_uncompressed_size = uncompressed_actual - s->info.uncompressed_data;
// Set uncompressed size to zero if there were any errors
strm_info[strm_id].max_uncompressed_size = (num_compressed_blocks == s->info.num_compressed_blocks) ? total_uncompressed_size : 0;
}
}
}
/**
* @brief Shared mem state for gpuParseRowGroupIndex
*
*/
struct rowindex_state_s
{
ColumnDesc chunk;
uint32_t rowgroup_start;
uint32_t rowgroup_end;
int is_compressed;
uint32_t row_index_entry[3][CI_PRESENT]; // NOTE: Assumes CI_PRESENT follows CI_DATA and CI_DATA2
CompressedStreamInfo strm_info[2];
RowGroup rowgroups[128];
uint32_t compressed_offset[128][2];
};
#define PB_ROWINDEXENTRY_ID ((1*8) + PB_TYPE_FIXEDLEN)
enum row_entry_state_e {
NOT_FOUND = 0,
GET_LENGTH,
SKIP_VARINT,
SKIP_FIXEDLEN,
STORE_INDEX0,
STORE_INDEX1,
STORE_INDEX2,
};
/**
* @brief Decode a single row group index entry
*
* @param[in,out] s row group index state
* @param[in] start start position in byte stream
* @param[in] end end of byte stream
* @return bytes consumed
*
**/
static uint32_t __device__ ProtobufParseRowIndexEntry(rowindex_state_s *s, const uint8_t *start, const uint8_t *end)
{
const uint8_t *cur = start;
row_entry_state_e state = NOT_FOUND;
uint32_t length = 0, strm_idx_id = s->chunk.skip_count >> 8, idx_id = 1, ci_id = CI_PRESENT, pos_end = 0;
while (cur < end)
{
uint32_t v = 0;
for (uint32_t l = 0; l <= 28; l += 7)
{
uint32_t c = (cur < end) ? *cur++ : 0;
v |= (c & 0x7f) << l;
if (c <= 0x7f)
break;
}
switch (state)
{
case NOT_FOUND:
if (v == PB_ROWINDEXENTRY_ID)
{
state = GET_LENGTH;
}
else
{
v &= 7;
if (v == PB_TYPE_FIXED64)
cur += 8;
else if (v == PB_TYPE_FIXED32)
cur += 4;
else if (v == PB_TYPE_VARINT)
state = SKIP_VARINT;
else if (v == PB_TYPE_FIXEDLEN)
state = SKIP_FIXEDLEN;
}
break;
case SKIP_VARINT:
state = NOT_FOUND;
break;
case SKIP_FIXEDLEN:
cur += v;
state = NOT_FOUND;
break;
case GET_LENGTH:
if (length == 0)
{
length = (uint32_t)(cur + v - start);
state = NOT_FOUND; // Scan for positions (same field id & low-level type as RowIndexEntry entry)
}
else
{
pos_end = min((uint32_t)(cur + v - start), length);
state = STORE_INDEX0;
}
break;
case STORE_INDEX0:
ci_id = (idx_id == (strm_idx_id & 0xff)) ? CI_DATA : (idx_id == ((strm_idx_id >> 8) & 0xff)) ? CI_DATA2 : CI_PRESENT;
idx_id++;
if (s->is_compressed)
{
if (ci_id < CI_PRESENT)
s->row_index_entry[0][ci_id] = v;
if (cur >= start + pos_end)
return length;
state = STORE_INDEX1;
break;
}
else
{
if (ci_id < CI_PRESENT)
s->row_index_entry[0][ci_id] = 0;
// Fall through to STORE_INDEX1 for uncompressed (always block0)
}
case STORE_INDEX1:
if (ci_id < CI_PRESENT)
s->row_index_entry[1][ci_id] = v;
if (cur >= start + pos_end)
return length;
state = (ci_id == CI_DATA && s->chunk.encoding_kind != DICTIONARY && s->chunk.encoding_kind != DICTIONARY_V2
&& (s->chunk.type_kind == STRING || s->chunk.type_kind == BINARY || s->chunk.type_kind == VARCHAR || s->chunk.type_kind == CHAR
|| s->chunk.type_kind == DECIMAL || s->chunk.type_kind == FLOAT || s->chunk.type_kind == DOUBLE))
? STORE_INDEX0 : STORE_INDEX2;
break;
case STORE_INDEX2:
if (ci_id < CI_PRESENT)
s->row_index_entry[2][ci_id] = v;
// Boolean columns have an extra byte to indicate the position of the bit within the byte
// TODO: Currently assuming rowIndexStride is a multiple of 8 and ignoring this value
if (ci_id == CI_PRESENT || s->chunk.type_kind == BOOLEAN)
cur++;
if (cur >= start + pos_end)
return length;
state = STORE_INDEX0;
break;
}
}
return (uint32_t)(end - start);
}
/**
* @brief Decode row group index entries
*
* @param[in,out] s row group index state
* @param[in] num_rowgroups Number of index entries to read
*
**/
static __device__ void gpuReadRowGroupIndexEntries(rowindex_state_s *s, int num_rowgroups)
{
const uint8_t *index_data = s->chunk.streams[CI_INDEX];
int index_data_len = s->chunk.strm_len[CI_INDEX];
for (int i = 0; i < num_rowgroups; i++)
{
s->row_index_entry[0][0] = 0;
s->row_index_entry[0][1] = 0;
s->row_index_entry[1][0] = 0;
s->row_index_entry[1][1] = 0;
s->row_index_entry[2][0] = 0;
s->row_index_entry[2][1] = 0;
if (index_data_len > 0)
{
int len = ProtobufParseRowIndexEntry(s, index_data, index_data + index_data_len);
index_data += len;
index_data_len = max(index_data_len - len, 0);
for (int j = 0; j < 2; j++)
{
s->rowgroups[i].strm_offset[j] = s->row_index_entry[1][j];
s->rowgroups[i].run_pos[j] = s->row_index_entry[2][j];
s->compressed_offset[i][j] = s->row_index_entry[0][j];
}
}
}
s->chunk.streams[CI_INDEX] = index_data;
s->chunk.strm_len[CI_INDEX] = index_data_len;
}
/**
* @brief Translate block+offset compressed position into an uncompressed offset
*
* @param[in,out] s row group index state
* @param[in] ci_id index to convert (CI_DATA or CI_DATA2)
* @param[in] num_rowgroups Number of index entries
* @param[in] t thread id
*
**/
static __device__ void gpuMapRowIndexToUncompressed(rowindex_state_s *s, int ci_id, int num_rowgroups, int t)
{
int32_t strm_len = s->chunk.strm_len[ci_id];
if (strm_len > 0)
{
int32_t compressed_offset = (t < num_rowgroups) ? s->compressed_offset[t][ci_id] : 0;
if (compressed_offset > 0)
{
const uint8_t *start = s->strm_info[ci_id].compressed_data;
const uint8_t *cur = start;
const uint8_t *end = cur + s->strm_info[ci_id].compressed_data_size;
gpu_inflate_status_s *decstatus = s->strm_info[ci_id].decstatus;
uint32_t uncomp_offset = 0;
for (;;)
{
uint32_t block_len, is_uncompressed;
if (cur + 3 > end || cur + 3 >= start + compressed_offset)
{
break;
}
block_len = cur[0] | (cur[1] << 8) | (cur[2] << 16);
cur += 3;
is_uncompressed = block_len & 1;
block_len >>= 1;
cur += block_len;
if (cur > end)
{
break;
}
if (is_uncompressed)
{
uncomp_offset += block_len;
}
else
{
uncomp_offset += decstatus->bytes_written;
decstatus++;
}
}
s->rowgroups[t].strm_offset[ci_id] += uncomp_offset;
}
}
}
/**
* @brief Decode index streams
*
* @param[out] row_groups RowGroup device array [rowgroup][column]
* @param[in] strm_info List of compressed streams (or NULL if uncompressed)
* @param[in] chunks ColumnDesc device array [stripe][column]
* @param[in] num_columns Number of columns
* @param[in] num_stripes Number of stripes
* @param[in] num_rowgroups Number of row groups
*
**/
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128, 8)
gpuParseRowGroupIndex(RowGroup *row_groups, CompressedStreamInfo *strm_info, ColumnDesc *chunks, uint32_t num_columns, uint32_t num_stripes, uint32_t num_rowgroups, uint32_t rowidx_stride)
{
__shared__ __align__(16) rowindex_state_s state_g;
rowindex_state_s * const s = &state_g;
uint32_t chunk_id = blockIdx.y * num_columns + blockIdx.x;
int t = threadIdx.x;
if (t < sizeof(ColumnDesc) / sizeof(uint32_t))
{
// NOTE: Assumes that sizeof(ColumnDesc) <= 128x4
((volatile uint32_t *)&s->chunk)[t] = ((const uint32_t *)&chunks[chunk_id])[t];
}
__syncthreads();
if (strm_info)
{
int strm_len = s->chunk.strm_len[t >> 6];
int strm_id = s->chunk.strm_id[t >> 6];
int t6 = t & 0x3f;
if (strm_len > 0 && t6 < sizeof(CompressedStreamInfo) / sizeof(uint32_t))
{
((volatile uint32_t *)&s->strm_info[t >> 6])[t6] = ((const uint32_t *)&strm_info[strm_id])[t6];
}
}
if (t == 0)
{
uint32_t rowgroups_in_chunk = (rowidx_stride > 0) ? (s->chunk.num_rows + rowidx_stride - 1) / rowidx_stride : 1;
s->rowgroup_start = s->chunk.rowgroup_id;
s->rowgroup_end = s->rowgroup_start + rowgroups_in_chunk;
s->is_compressed = (strm_info != NULL);
}
__syncthreads();
while (s->rowgroup_start < s->rowgroup_end)
{
int num_rowgroups = min(s->rowgroup_end - s->rowgroup_start, 128);
int rowgroup_size4, t4, t32;
s->rowgroups[t].chunk_id = chunk_id;
if (t == 0)
{
gpuReadRowGroupIndexEntries(s, num_rowgroups);
}
__syncthreads();
if (s->is_compressed)
{
// Convert the block + blk_offset pair into a raw offset into the decompressed stream
if (s->chunk.strm_len[CI_DATA] > 0)
{
gpuMapRowIndexToUncompressed(s, CI_DATA, num_rowgroups, t);
}
if (s->chunk.strm_len[CI_DATA2] > 0)
{
gpuMapRowIndexToUncompressed(s, CI_DATA2, num_rowgroups, t);
}
__syncthreads();
}
rowgroup_size4 = sizeof(RowGroup) / sizeof(uint32_t);
t4 = t & 3;
t32 = t >> 2;
for (int i = t32; i < num_rowgroups; i += 32)
{
for (int j = t4; j < rowgroup_size4; j += 4)
{
((uint32_t *)&row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x])[j] = ((volatile uint32_t *)&s->rowgroups[i])[j];
}
}
__syncthreads();
if (t == 0)
{
s->rowgroup_start += num_rowgroups;
}
__syncthreads();
}
}
hipError_t __host__ ParseCompressedStripeData(CompressedStreamInfo *strm_info, int32_t num_streams, uint32_t compression_block_size, uint32_t log2maxcr, hipStream_t stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid((num_streams + 3) >> 2, 1); // 1 stream per warp, 4 warps per block
hipLaunchKernelGGL(( gpuParseCompressedStripeData) , dim3(dim_grid), dim3(dim_block), 0, stream , strm_info, num_streams, compression_block_size, log2maxcr);
return hipSuccess;
}
hipError_t __host__ PostDecompressionReassemble(CompressedStreamInfo *strm_info, int32_t num_streams, hipStream_t stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid((num_streams + 3) >> 2, 1); // 1 stream per warp, 4 warps per block
hipLaunchKernelGGL(( gpuPostDecompressionReassemble) , dim3(dim_grid), dim3(dim_block), 0, stream , strm_info, num_streams);
return hipSuccess;
}
/**
* @brief Launches kernel for constructing rowgroup from index streams
*
* @param[out] row_groups RowGroup device array [rowgroup][column]
* @param[in] strm_info List of compressed streams (or NULL if uncompressed)
* @param[in] chunks ColumnDesc device array [stripe][column]
* @param[in] num_columns Number of columns
* @param[in] num_stripes Number of stripes
* @param[in] num_rowgroups Number of row groups
* @param[in] stream CUDA stream to use, default 0
*
* @return hipSuccess if successful, a CUDA error code otherwise
**/
hipError_t __host__ ParseRowGroupIndex(RowGroup *row_groups, CompressedStreamInfo *strm_info, ColumnDesc *chunks, uint32_t num_columns, uint32_t num_stripes, uint32_t num_rowgroups, uint32_t rowidx_stride, hipStream_t stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid(num_columns, num_stripes); // 1 column chunk per block
hipLaunchKernelGGL(( gpuParseRowGroupIndex) , dim3(dim_grid), dim3(dim_block), 0, stream , row_groups, strm_info, chunks, num_columns, num_stripes, num_rowgroups, rowidx_stride);
return hipSuccess;
}
} // namespace gpu
} // namespace orc
} // namespace io
} // namespace cudf
| e0dab058fe93e0302d99ffe3edfb8945cea2554b.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "orc_common.h"
#include "orc_gpu.h"
#if (__CUDACC_VER_MAJOR__ >= 9)
#define SHFL0(v) __shfl_sync(~0, v, 0)
#define SHFL(v, t) __shfl_sync(~0, v, t)
#define SYNCWARP() __syncwarp()
#define BALLOT(v) __ballot_sync(~0, v)
#else
#define SHFL0(v) __shfl(v, 0)
#define SHFL(v, t) __shfl(v, t)
#define SYNCWARP()
#define BALLOT(v) __ballot(v)
#endif
namespace cudf {
namespace io {
namespace orc {
namespace gpu {
struct compressed_stream_s
{
CompressedStreamInfo info;
};
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128, 8)
gpuParseCompressedStripeData(CompressedStreamInfo *strm_info, int32_t num_streams, uint32_t block_size, uint32_t log2maxcr)
{
__shared__ compressed_stream_s strm_g[4];
volatile compressed_stream_s * const s = &strm_g[threadIdx.x >> 5];
int strm_id = blockIdx.x * 4 + (threadIdx.x >> 5);
int t = threadIdx.x & 0x1f;
if (strm_id < num_streams && t < sizeof(CompressedStreamInfo) / sizeof(uint32_t))
{
// NOTE: Assumes that sizeof(CompressedStreamInfo) <= 128
((uint32_t *)&s->info)[t] = ((const uint32_t *)&strm_info[strm_id])[t];
}
__syncthreads();
if (strm_id < num_streams)
{
// Walk through the compressed blocks
const uint8_t *cur = s->info.compressed_data;
const uint8_t *end = cur + s->info.compressed_data_size;
gpu_inflate_input_s *decctl = s->info.decctl;
uint8_t *uncompressed = s->info.uncompressed_data;
size_t max_uncompressed_size = 0;
uint32_t num_compressed_blocks = 0;
while (cur + 3 < end)
{
uint32_t block_len = SHFL0((t == 0) ? cur[0] | (cur[1] << 8) | (cur[2] << 16) : 0);
uint32_t is_uncompressed = block_len & 1;
uint32_t uncompressed_size;
block_len >>= 1;
cur += 3;
if (block_len > block_size || cur + block_len > end)
{
// Fatal
num_compressed_blocks = 0;
max_uncompressed_size = 0;
break;
}
// TBD: For some codecs like snappy, it wouldn't be too difficult to get the actual uncompressed size and avoid waste due to block size alignment
// For now, rely on the max compression ratio to limit waste for the most extreme cases (small single-block streams)
uncompressed_size = (is_uncompressed) ? block_len : (block_len < (block_size >> log2maxcr)) ? block_len << log2maxcr : block_size;
if (is_uncompressed)
{
// Copy the uncompressed data to output (not very efficient, but should only occur for small blocks and probably faster than decompression)
if (uncompressed && max_uncompressed_size + uncompressed_size <= s->info.max_uncompressed_size)
{
for (int i = t; i < uncompressed_size; i += 32)
{
uncompressed[max_uncompressed_size + i] = cur[i];
}
}
}
else if (decctl && !t && num_compressed_blocks < s->info.max_compressed_blocks)
{
decctl[num_compressed_blocks].srcDevice = const_cast<uint8_t *>(cur);
decctl[num_compressed_blocks].srcSize = block_len;
decctl[num_compressed_blocks].dstDevice = uncompressed + max_uncompressed_size;
decctl[num_compressed_blocks].dstSize = uncompressed_size;
}
cur += block_len;
max_uncompressed_size += uncompressed_size;
num_compressed_blocks += 1 - is_uncompressed;
}
if (!t)
{
s->info.num_compressed_blocks = num_compressed_blocks;
s->info.max_uncompressed_size = max_uncompressed_size;
}
}
__syncthreads();
if (strm_id < num_streams && t < sizeof(CompressedStreamInfo) / sizeof(uint32_t))
{
// NOTE: Assumes that sizeof(CompressedStreamInfo) <= 128
((uint32_t *)&strm_info[strm_id])[t] = ((uint32_t *)&s->info)[t];
}
}
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128, 8)
gpuPostDecompressionReassemble(CompressedStreamInfo *strm_info, int32_t num_streams)
{
__shared__ compressed_stream_s strm_g[4];
volatile compressed_stream_s * const s = &strm_g[threadIdx.x >> 5];
int strm_id = blockIdx.x * 4 + (threadIdx.x >> 5);
int t = threadIdx.x & 0x1f;
if (strm_id < num_streams && t < sizeof(CompressedStreamInfo) / sizeof(uint32_t))
{
// NOTE: Assumes that sizeof(CompressedStreamInfo) <= 128
((uint32_t *)&s->info)[t] = ((const uint32_t *)&strm_info[strm_id])[t];
}
__syncthreads();
if (strm_id < num_streams && s->info.max_compressed_blocks > 0 && s->info.max_uncompressed_size > 0)
{
// Walk through the compressed blocks
const uint8_t *cur = s->info.compressed_data;
const uint8_t *end = cur + s->info.compressed_data_size;
const gpu_inflate_input_s *dec_in = s->info.decctl;
const gpu_inflate_status_s *dec_out = s->info.decstatus;
uint8_t *uncompressed_actual = s->info.uncompressed_data;
uint8_t *uncompressed_estimated = uncompressed_actual;
uint32_t num_compressed_blocks = 0;
uint32_t max_compressed_blocks = min(s->info.num_compressed_blocks, s->info.max_compressed_blocks);
while (cur + 3 < end)
{
uint32_t block_len = SHFL0((t == 0) ? cur[0] | (cur[1] << 8) | (cur[2] << 16) : 0);
uint32_t is_uncompressed = block_len & 1;
uint32_t uncompressed_size_est, uncompressed_size_actual;
block_len >>= 1;
cur += 3;
if (cur + block_len > end)
{
break;
}
if (is_uncompressed)
{
uncompressed_size_est = block_len;
uncompressed_size_actual = block_len;
}
else
{
if (num_compressed_blocks > max_compressed_blocks)
{
break;
}
if (SHFL0((t == 0) ? dec_out[num_compressed_blocks].status : 0) != 0)
{
// Decompression failed, not much point in doing anything else
break;
}
uncompressed_size_est = SHFL0((t == 0) ? *(const uint32_t *)&dec_in[num_compressed_blocks].dstSize : 0);
uncompressed_size_actual = SHFL0((t == 0) ? *(const uint32_t *)&dec_out[num_compressed_blocks].bytes_written : 0);
}
// In practice, this should never happen with a well-behaved writer, as we would expect the uncompressed size to always be equal to
// the compression block size except for the last block
if (uncompressed_actual < uncompressed_estimated)
{
// warp-level memmove
for (int i = t; i < (int)uncompressed_size_actual; i += 32)
{
uncompressed_actual[i] = uncompressed_estimated[i];
}
}
cur += block_len;
num_compressed_blocks += 1 - is_uncompressed;
uncompressed_estimated += uncompressed_size_est;
uncompressed_actual += uncompressed_size_actual;
}
// Update info with actual uncompressed size
if (!t)
{
size_t total_uncompressed_size = uncompressed_actual - s->info.uncompressed_data;
// Set uncompressed size to zero if there were any errors
strm_info[strm_id].max_uncompressed_size = (num_compressed_blocks == s->info.num_compressed_blocks) ? total_uncompressed_size : 0;
}
}
}
/**
* @brief Shared mem state for gpuParseRowGroupIndex
*
*/
struct rowindex_state_s
{
ColumnDesc chunk;
uint32_t rowgroup_start;
uint32_t rowgroup_end;
int is_compressed;
uint32_t row_index_entry[3][CI_PRESENT]; // NOTE: Assumes CI_PRESENT follows CI_DATA and CI_DATA2
CompressedStreamInfo strm_info[2];
RowGroup rowgroups[128];
uint32_t compressed_offset[128][2];
};
#define PB_ROWINDEXENTRY_ID ((1*8) + PB_TYPE_FIXEDLEN)
enum row_entry_state_e {
NOT_FOUND = 0,
GET_LENGTH,
SKIP_VARINT,
SKIP_FIXEDLEN,
STORE_INDEX0,
STORE_INDEX1,
STORE_INDEX2,
};
/**
* @brief Decode a single row group index entry
*
* @param[in,out] s row group index state
* @param[in] start start position in byte stream
* @param[in] end end of byte stream
* @return bytes consumed
*
**/
static uint32_t __device__ ProtobufParseRowIndexEntry(rowindex_state_s *s, const uint8_t *start, const uint8_t *end)
{
const uint8_t *cur = start;
row_entry_state_e state = NOT_FOUND;
uint32_t length = 0, strm_idx_id = s->chunk.skip_count >> 8, idx_id = 1, ci_id = CI_PRESENT, pos_end = 0;
while (cur < end)
{
uint32_t v = 0;
for (uint32_t l = 0; l <= 28; l += 7)
{
uint32_t c = (cur < end) ? *cur++ : 0;
v |= (c & 0x7f) << l;
if (c <= 0x7f)
break;
}
switch (state)
{
case NOT_FOUND:
if (v == PB_ROWINDEXENTRY_ID)
{
state = GET_LENGTH;
}
else
{
v &= 7;
if (v == PB_TYPE_FIXED64)
cur += 8;
else if (v == PB_TYPE_FIXED32)
cur += 4;
else if (v == PB_TYPE_VARINT)
state = SKIP_VARINT;
else if (v == PB_TYPE_FIXEDLEN)
state = SKIP_FIXEDLEN;
}
break;
case SKIP_VARINT:
state = NOT_FOUND;
break;
case SKIP_FIXEDLEN:
cur += v;
state = NOT_FOUND;
break;
case GET_LENGTH:
if (length == 0)
{
length = (uint32_t)(cur + v - start);
state = NOT_FOUND; // Scan for positions (same field id & low-level type as RowIndexEntry entry)
}
else
{
pos_end = min((uint32_t)(cur + v - start), length);
state = STORE_INDEX0;
}
break;
case STORE_INDEX0:
ci_id = (idx_id == (strm_idx_id & 0xff)) ? CI_DATA : (idx_id == ((strm_idx_id >> 8) & 0xff)) ? CI_DATA2 : CI_PRESENT;
idx_id++;
if (s->is_compressed)
{
if (ci_id < CI_PRESENT)
s->row_index_entry[0][ci_id] = v;
if (cur >= start + pos_end)
return length;
state = STORE_INDEX1;
break;
}
else
{
if (ci_id < CI_PRESENT)
s->row_index_entry[0][ci_id] = 0;
// Fall through to STORE_INDEX1 for uncompressed (always block0)
}
case STORE_INDEX1:
if (ci_id < CI_PRESENT)
s->row_index_entry[1][ci_id] = v;
if (cur >= start + pos_end)
return length;
state = (ci_id == CI_DATA && s->chunk.encoding_kind != DICTIONARY && s->chunk.encoding_kind != DICTIONARY_V2
&& (s->chunk.type_kind == STRING || s->chunk.type_kind == BINARY || s->chunk.type_kind == VARCHAR || s->chunk.type_kind == CHAR
|| s->chunk.type_kind == DECIMAL || s->chunk.type_kind == FLOAT || s->chunk.type_kind == DOUBLE))
? STORE_INDEX0 : STORE_INDEX2;
break;
case STORE_INDEX2:
if (ci_id < CI_PRESENT)
s->row_index_entry[2][ci_id] = v;
// Boolean columns have an extra byte to indicate the position of the bit within the byte
// TODO: Currently assuming rowIndexStride is a multiple of 8 and ignoring this value
if (ci_id == CI_PRESENT || s->chunk.type_kind == BOOLEAN)
cur++;
if (cur >= start + pos_end)
return length;
state = STORE_INDEX0;
break;
}
}
return (uint32_t)(end - start);
}
/**
* @brief Decode row group index entries
*
* @param[in,out] s row group index state
* @param[in] num_rowgroups Number of index entries to read
*
**/
static __device__ void gpuReadRowGroupIndexEntries(rowindex_state_s *s, int num_rowgroups)
{
const uint8_t *index_data = s->chunk.streams[CI_INDEX];
int index_data_len = s->chunk.strm_len[CI_INDEX];
for (int i = 0; i < num_rowgroups; i++)
{
s->row_index_entry[0][0] = 0;
s->row_index_entry[0][1] = 0;
s->row_index_entry[1][0] = 0;
s->row_index_entry[1][1] = 0;
s->row_index_entry[2][0] = 0;
s->row_index_entry[2][1] = 0;
if (index_data_len > 0)
{
int len = ProtobufParseRowIndexEntry(s, index_data, index_data + index_data_len);
index_data += len;
index_data_len = max(index_data_len - len, 0);
for (int j = 0; j < 2; j++)
{
s->rowgroups[i].strm_offset[j] = s->row_index_entry[1][j];
s->rowgroups[i].run_pos[j] = s->row_index_entry[2][j];
s->compressed_offset[i][j] = s->row_index_entry[0][j];
}
}
}
s->chunk.streams[CI_INDEX] = index_data;
s->chunk.strm_len[CI_INDEX] = index_data_len;
}
/**
* @brief Translate block+offset compressed position into an uncompressed offset
*
* @param[in,out] s row group index state
* @param[in] ci_id index to convert (CI_DATA or CI_DATA2)
* @param[in] num_rowgroups Number of index entries
* @param[in] t thread id
*
**/
static __device__ void gpuMapRowIndexToUncompressed(rowindex_state_s *s, int ci_id, int num_rowgroups, int t)
{
int32_t strm_len = s->chunk.strm_len[ci_id];
if (strm_len > 0)
{
int32_t compressed_offset = (t < num_rowgroups) ? s->compressed_offset[t][ci_id] : 0;
if (compressed_offset > 0)
{
const uint8_t *start = s->strm_info[ci_id].compressed_data;
const uint8_t *cur = start;
const uint8_t *end = cur + s->strm_info[ci_id].compressed_data_size;
gpu_inflate_status_s *decstatus = s->strm_info[ci_id].decstatus;
uint32_t uncomp_offset = 0;
for (;;)
{
uint32_t block_len, is_uncompressed;
if (cur + 3 > end || cur + 3 >= start + compressed_offset)
{
break;
}
block_len = cur[0] | (cur[1] << 8) | (cur[2] << 16);
cur += 3;
is_uncompressed = block_len & 1;
block_len >>= 1;
cur += block_len;
if (cur > end)
{
break;
}
if (is_uncompressed)
{
uncomp_offset += block_len;
}
else
{
uncomp_offset += decstatus->bytes_written;
decstatus++;
}
}
s->rowgroups[t].strm_offset[ci_id] += uncomp_offset;
}
}
}
/**
* @brief Decode index streams
*
* @param[out] row_groups RowGroup device array [rowgroup][column]
* @param[in] strm_info List of compressed streams (or NULL if uncompressed)
* @param[in] chunks ColumnDesc device array [stripe][column]
* @param[in] num_columns Number of columns
* @param[in] num_stripes Number of stripes
* @param[in] num_rowgroups Number of row groups
*
**/
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128, 8)
gpuParseRowGroupIndex(RowGroup *row_groups, CompressedStreamInfo *strm_info, ColumnDesc *chunks, uint32_t num_columns, uint32_t num_stripes, uint32_t num_rowgroups, uint32_t rowidx_stride)
{
__shared__ __align__(16) rowindex_state_s state_g;
rowindex_state_s * const s = &state_g;
uint32_t chunk_id = blockIdx.y * num_columns + blockIdx.x;
int t = threadIdx.x;
if (t < sizeof(ColumnDesc) / sizeof(uint32_t))
{
// NOTE: Assumes that sizeof(ColumnDesc) <= 128x4
((volatile uint32_t *)&s->chunk)[t] = ((const uint32_t *)&chunks[chunk_id])[t];
}
__syncthreads();
if (strm_info)
{
int strm_len = s->chunk.strm_len[t >> 6];
int strm_id = s->chunk.strm_id[t >> 6];
int t6 = t & 0x3f;
if (strm_len > 0 && t6 < sizeof(CompressedStreamInfo) / sizeof(uint32_t))
{
((volatile uint32_t *)&s->strm_info[t >> 6])[t6] = ((const uint32_t *)&strm_info[strm_id])[t6];
}
}
if (t == 0)
{
uint32_t rowgroups_in_chunk = (rowidx_stride > 0) ? (s->chunk.num_rows + rowidx_stride - 1) / rowidx_stride : 1;
s->rowgroup_start = s->chunk.rowgroup_id;
s->rowgroup_end = s->rowgroup_start + rowgroups_in_chunk;
s->is_compressed = (strm_info != NULL);
}
__syncthreads();
while (s->rowgroup_start < s->rowgroup_end)
{
int num_rowgroups = min(s->rowgroup_end - s->rowgroup_start, 128);
int rowgroup_size4, t4, t32;
s->rowgroups[t].chunk_id = chunk_id;
if (t == 0)
{
gpuReadRowGroupIndexEntries(s, num_rowgroups);
}
__syncthreads();
if (s->is_compressed)
{
// Convert the block + blk_offset pair into a raw offset into the decompressed stream
if (s->chunk.strm_len[CI_DATA] > 0)
{
gpuMapRowIndexToUncompressed(s, CI_DATA, num_rowgroups, t);
}
if (s->chunk.strm_len[CI_DATA2] > 0)
{
gpuMapRowIndexToUncompressed(s, CI_DATA2, num_rowgroups, t);
}
__syncthreads();
}
rowgroup_size4 = sizeof(RowGroup) / sizeof(uint32_t);
t4 = t & 3;
t32 = t >> 2;
for (int i = t32; i < num_rowgroups; i += 32)
{
for (int j = t4; j < rowgroup_size4; j += 4)
{
((uint32_t *)&row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x])[j] = ((volatile uint32_t *)&s->rowgroups[i])[j];
}
}
__syncthreads();
if (t == 0)
{
s->rowgroup_start += num_rowgroups;
}
__syncthreads();
}
}
cudaError_t __host__ ParseCompressedStripeData(CompressedStreamInfo *strm_info, int32_t num_streams, uint32_t compression_block_size, uint32_t log2maxcr, cudaStream_t stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid((num_streams + 3) >> 2, 1); // 1 stream per warp, 4 warps per block
gpuParseCompressedStripeData <<< dim_grid, dim_block, 0, stream >>>(strm_info, num_streams, compression_block_size, log2maxcr);
return cudaSuccess;
}
cudaError_t __host__ PostDecompressionReassemble(CompressedStreamInfo *strm_info, int32_t num_streams, cudaStream_t stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid((num_streams + 3) >> 2, 1); // 1 stream per warp, 4 warps per block
gpuPostDecompressionReassemble <<< dim_grid, dim_block, 0, stream >>>(strm_info, num_streams);
return cudaSuccess;
}
/**
* @brief Launches kernel for constructing rowgroup from index streams
*
* @param[out] row_groups RowGroup device array [rowgroup][column]
* @param[in] strm_info List of compressed streams (or NULL if uncompressed)
* @param[in] chunks ColumnDesc device array [stripe][column]
* @param[in] num_columns Number of columns
* @param[in] num_stripes Number of stripes
* @param[in] num_rowgroups Number of row groups
* @param[in] stream CUDA stream to use, default 0
*
* @return cudaSuccess if successful, a CUDA error code otherwise
**/
cudaError_t __host__ ParseRowGroupIndex(RowGroup *row_groups, CompressedStreamInfo *strm_info, ColumnDesc *chunks, uint32_t num_columns, uint32_t num_stripes, uint32_t num_rowgroups, uint32_t rowidx_stride, cudaStream_t stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid(num_columns, num_stripes); // 1 column chunk per block
gpuParseRowGroupIndex <<< dim_grid, dim_block, 0, stream >>>(row_groups, strm_info, chunks, num_columns, num_stripes, num_rowgroups, rowidx_stride);
return cudaSuccess;
}
} // namespace gpu
} // namespace orc
} // namespace io
} // namespace cudf
|
48b5a465f85ab4b6cc15069e5447ec0a2a68fde4.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <rocblas.h>
#include "convolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "dark_cuda.h"
#include "box.h"
__global__ void binarize_kernel(float *x, int n, float *binary)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= n) return;
binary[i] = (x[i] >= 0) ? 1 : -1;
}
void binarize_gpu(float *x, int n, float *binary)
{
hipLaunchKernelGGL(( binarize_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, get_cuda_stream() , x, n, binary);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void binarize_input_kernel(float *input, int n, int size, float *binary)
{
int s = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (s >= size) return;
int i = 0;
float mean = 0;
for(i = 0; i < n; ++i){
mean += fabs(input[i*size + s]);
}
mean = mean / n;
for(i = 0; i < n; ++i){
binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean;
}
}
void binarize_input_gpu(float *input, int n, int size, float *binary)
{
hipLaunchKernelGGL(( binarize_input_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, get_cuda_stream() , input, n, size, binary);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary)
{
int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (f >= n) return;
int i = 0;
float mean = 0;
for (i = 0; i < size; ++i) {
mean += fabs(weights[f*size + i]);
}
mean = mean / size;
for (i = 0; i < size; ++i) {
binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;
//binary[f*size + i] = weights[f*size + i];
}
}
void binarize_weights_gpu(float *weights, int n, int size, float *binary)
{
binarize_weights_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> >(weights, n, size, binary);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void set_zero_kernel(float *src, int size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) src[i] = 0;
}
__inline__ __device__
float warpAllReduceSum(float val) {
for (int mask = WARP_SIZE / 2; mask > 0; mask /= 2)
#if CUDART_VERSION >= 9000
val += __shfl_xor_sync(0xffffffff, val, mask);
#else
val += __shfl_xor(val, mask);
#endif
return val;
}
// only if (size % 32 == 0)
__global__ void reduce_kernel(float *weights, int n, int size, float *mean_arr_gpu)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int f = i / size;
if (f >= n) return;
float warp_mean = warpAllReduceSum(fabs(weights[i]));
if(i % 32 == 0)
atomicAdd(&mean_arr_gpu[f], warp_mean / size);
}
__global__ void binarize_weights_mean_kernel(float *weights, int n, int size, float *binary, float *mean_arr_gpu)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int f = i / size;
if (f >= n) return;
float mean = mean_arr_gpu[f];
binary[i] = (weights[i] > 0) ? mean : -mean;
}
void fast_binarize_weights_gpu(float *weights, int n, int size, float *binary, float *mean_arr_gpu)
{
if (size % 32 == 0) {
size_t gridsize = n * size;
const int num_blocks = get_number_of_blocks(gridsize, BLOCK);// gridsize / BLOCK + 1;
set_zero_kernel << <(n/BLOCK + 1), BLOCK, 0, get_cuda_stream() >> > (mean_arr_gpu, n);
reduce_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (weights, n, size, mean_arr_gpu);
binarize_weights_mean_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (weights, n, size, binary, mean_arr_gpu);
CHECK_CUDA(hipPeekAtLastError());
}
else {
binarize_weights_gpu(weights, n, size, binary);
}
}
__global__ void cuda_f32_to_f16(float* input_f32, size_t size, half *output_f16)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f16[idx] = __float2half(input_f32[idx]);
//if (idx < size) output_f16[idx] = __float2half_rn(input_f32[idx]); // can't be compiled on Linux without casting
// __float2half_ru, __float2half_rd, __float2half_rz, __float2half_rn
//if (idx < size) *((unsigned short *)output_f16 + idx) = __float2half(input_f32[idx]);
}
void cuda_convert_f32_to_f16(float* input_f32, size_t size, float *output_f16) {
hipLaunchKernelGGL(( cuda_f32_to_f16) , dim3(get_number_of_blocks(size, BLOCK)), dim3(BLOCK), 0, get_cuda_stream() , input_f32, size, (half *)output_f16);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void cuda_f16_to_f32(half* input_f16, size_t size, float *output_f32)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f32[idx] = __half2float(input_f16[idx]);
//if (idx < size) output_f32[idx] = __half2float(*((unsigned short *)input_f16 + idx));
}
void cuda_convert_f16_to_f32(float* input_f16, size_t size, float *output_f32) {
hipLaunchKernelGGL(( cuda_f16_to_f32) , dim3(get_number_of_blocks(size, BLOCK)), dim3(BLOCK), 0, get_cuda_stream() , (half *)input_f16, size, output_f32);
CHECK_CUDA(hipPeekAtLastError());
}
half *cuda_make_f16_from_f32_array(float *src, size_t n)
{
half *dst16;
size_t size = sizeof(half)*n;
CHECK_CUDA(hipMalloc((void **)&dst16, size));
if (src) {
assert(n > 0);
cuda_convert_f32_to_f16(src, n, (float *)dst16);
}
if (!dst16) error("Cuda malloc failed", DARKNET_LOC);
return dst16;
}
void forward_convolutional_layer_gpu(convolutional_layer l, network_state state)
{
if (l.train == 0) state.train = 0;
if (l.stream >= 0) {
switch_stream(l.stream);
}
if (l.wait_stream_id >= 0) {
wait_stream(l.wait_stream_id);
}
//fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
if(l.binary){
binarize_weights_gpu(l.weights_gpu, l.n, (l.c / l.groups)*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if(l.xnor){
if (!l.align_bit_weights_gpu || state.train) {
//binarize_weights_gpu(l.weights_gpu, l.n, (l.c / l.groups)*l.size*l.size, l.binary_weights_gpu);
fast_binarize_weights_gpu(l.weights_gpu, l.n, (l.c / l.groups)*l.size*l.size, l.binary_weights_gpu, l.mean_arr_gpu);
}
if (l.align_bit_weights_gpu && !state.train && l.c >= 32 && l.stride_x == l.stride_y)
{
//return;
//hipError_t status = hipSuccess;
//int input_size = l.c*l.h*l.w*l.batch;
int m = l.n / l.groups;
int k = l.size*l.size*l.c / l.groups;
int n = l.out_w*l.out_h;
//float * a = l.weights_gpu;
// int i, j;
// for(i = 0; i < l.batch; ++i){
// for (j = 0; j < l.groups; ++j) {
int ldb_align = l.lda_align;
size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8;
//size_t t_intput_size = new_ldb * n;
//size_t t_bit_input_size = t_intput_size / 8;// +1;
if (l.c % 32 == 0)
{
//printf("\n\n l.index = %d, l.w = %d, l.c = %d, l.n = %d, l.stride = %d, l.pad = %d - new XNOR \n", l.index, l.w, l.c, l.n, l.stride, l.pad);
//printf("l.align_workspace_size = %d, (l.c * l.w * l.h) = %d \n", l.align_workspace_size, (l.c * l.w * l.h));
//float *intput_cpu = (float *)calloc(l.inputs, sizeof(float));
// state.input
//hipMemcpy(intput_cpu, state.input, l.inputs * sizeof(float), hipMemcpyDefault);
int ldb_align = l.lda_align;
size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8;
//size_t t_intput_size = new_ldb * l.bit_align;// n;
//size_t t_bit_input_size = t_intput_size / 8;// +1;
const int new_c = l.c / 32;
//float *re_packed_input = (float *)calloc(l.c * l.w * l.h, sizeof(float));
//uint32_t *bin_re_packed_input = (uint32_t *)calloc(new_c * l.w * l.h + 1, sizeof(uint32_t));
// float32x4 by channel (as in cuDNN)
//repack_input(intput_cpu, re_packed_input, l.w, l.h, l.c);
// 32 x floats -> 1 x uint32_t
//float_to_bit(re_packed_input, (uint8_t *)bin_re_packed_input, l.c * l.w * l.h);
//hipDeviceSynchronize();
//start_timer();
repack_input_gpu_bin(state.input, (uint32_t *)l.align_workspace_gpu, l.w, l.h, l.c);
//repack_input_gpu(state.input, state.workspace, l.w, l.h, l.c);
// 32 x floats -> 1 x uint32_t
//float_to_bit_gpu(state.workspace, (unsigned char *)l.align_workspace_gpu, l.c * l.w * l.h);// l.align_workspace_size);
//hipDeviceSynchronize();
//stop_timer_and_show_name("repack_input_gpu + float_to_bit_gpu");
//free(re_packed_input);
// slow - convolution the packed inputs and weights: float x 32 by channel (as in cuDNN)
//convolution_repacked((uint32_t *)bin_re_packed_input, (uint32_t *)l.align_bit_weights, l.output,
// l.w, l.h, l.c, l.n, l.size, l.pad, l.new_lda, l.mean_arr);
// // then exit from if()
//float *b = state.workspace;
//float *b = (float *)calloc(100 * 1024 * 1024, sizeof(float));
//float *c = l.output;
//memset(c, 0, l.outputs * sizeof(float));
//im2col_cpu_custom((float *)bin_re_packed_input, new_c, l.h, l.w, l.size, l.stride, l.pad, b);
//hipMemcpy(l.align_workspace_gpu, bin_re_packed_input, (new_c * l.w * l.h + 1) * sizeof(uint32_t), hipMemcpyDefault);
//start_timer();
im2col_ongpu(l.align_workspace_gpu, new_c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
//hipDeviceSynchronize();
//stop_timer_and_show_name("im2col_ongpu");
//free(bin_re_packed_input);
int new_k = l.size*l.size*l.c / 32;
// good for (l.c == 64)
//gemm_nn_bin_32bit_packed(m, n, new_k, 1,
// l.align_bit_weights, l.new_lda/32,
// b, n,
// c, n, l.mean_arr);
// // then exit from if()
//size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8;
//size_t t_intput_size = new_ldb * l.bit_align;// n;
//size_t t_bit_input_size = t_intput_size / 8;// +1;
//char *t_bit_input = (char *)calloc(t_bit_input_size, sizeof(char));
//transpose_uint32((uint32_t *)b, (uint32_t *)t_bit_input, new_k, n, n, new_ldb);
//hipMemcpy(l.transposed_align_workspace_gpu, t_bit_input, t_bit_input_size * sizeof(char), hipMemcpyDefault);
//hipMemcpy(state.workspace, b, t_bit_input_size * sizeof(char), hipMemcpyDefault);
//printf("\n n = %d, n % 32 = %d, new_ldb = %d, new_ldb % 32 = %d \n", n, n % 32, new_ldb, new_ldb % 32);
//start_timer();
transpose_uint32_gpu((uint32_t *)state.workspace, (uint32_t *)l.transposed_align_workspace_gpu, new_k, n, n, new_ldb);
//hipDeviceSynchronize();
//stop_timer_and_show_name("transpose_uint32_gpu");
//hipDeviceSynchronize();
//stop_timer_and_show_name("repack_input_gpu_bin + im2col_ongpu + transpose_uint32_gpu_2");
//start_timer();
gemm_nn_custom_bin_mean_transposed_gpu(m, n, k,
(unsigned char *)l.align_bit_weights_gpu, new_ldb, (unsigned char *)l.transposed_align_workspace_gpu,
new_ldb, l.output_gpu, n, l.mean_arr_gpu, l.biases_gpu, l.activation == LEAKY,
l.bin_conv_shortcut_in_gpu, l.bin_conv_shortcut_out_gpu);
//hipDeviceSynchronize();
//stop_timer_and_show_name("gemm_nn_custom_bin_mean_transposed_gpu");
// the main GEMM function
//gemm_nn_custom_bin_mean_transposed(m, n, k, 1, (uint8_t *)l.align_bit_weights, new_ldb, (uint8_t *)t_bit_input, new_ldb, c, n, l.mean_arr);
//add_bias(l.output, l.biases, l.batch, l.n, l.out_h*l.out_w);
//hipMemcpy(l.output_gpu, l.output, l.outputs * sizeof(float), hipMemcpyDefault);
// // alternative GEMM
//gemm_nn_bin_transposed_32bit_packed(m, n, new_k, 1,
// l.align_bit_weights, l.new_lda/32,
// t_bit_input, new_ldb / 32,
// c, n, l.mean_arr);
//free(t_bit_input);
//free(b);
}
else
{
//printf("\n\n l.index = %d, l.w = %d, l.c = %d, l.n = %d, l.stride = %d, l.pad = %d - old XNOR \n", l.index, l.w, l.c, l.n, l.stride, l.pad);
//hipDeviceSynchronize();
int i = 0;
/*
// if (l.stride == 1 && l.c >= 256 && l.size > 1)
if (l.stride == 1 && l.c >= 1024 && l.size > 1 && 0)// && l.w >= 13) // disabled
{
// stride=1 only
//start_timer();
im2col_align_bin_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace, l.bit_align);
//hipDeviceSynchronize();
//stop_timer_and_show_name("im2col_align_bin_ongpu");
}
else*/
{
//start_timer();
im2col_align_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, l.align_workspace_gpu, l.bit_align);
//hipDeviceSynchronize();
//stop_timer_and_show_name("im2col_align_ongpu");
//getchar();
// should be optimized
//start_timer();
float_to_bit_gpu(l.align_workspace_gpu, (unsigned char *)state.workspace, l.align_workspace_size);
//hipDeviceSynchronize();
//stop_timer_and_show_name("float_to_bit_gpu");
}
//start_timer();
transpose_bin_gpu((unsigned char *)state.workspace, (unsigned char *)l.transposed_align_workspace_gpu, k, n, l.bit_align, new_ldb, 8);
//hipDeviceSynchronize();
//stop_timer_and_show_name("transpose_bin_gpu");
//hipDeviceSynchronize();
//stop_timer_and_show_name("im2col_align_ongpu + float_to_bit_gpu + transpose_bin_gpu");
// should be optimized
//if(0) {//if (k > 1000) { // sequentially input-shared - BAD
// gemm_nn_custom_bin_mean_transposed_sequentially_gpu(m, n, k,
// (unsigned char *)l.align_bit_weights_gpu, new_ldb, (unsigned char *)l.transposed_align_workspace_gpu, new_ldb, l.output_gpu, n, l.mean_arr_gpu);
//}
//else { // coalescing & weights-shared-memory - GOOD
//start_timer();
gemm_nn_custom_bin_mean_transposed_gpu(m, n, k,
(unsigned char *)l.align_bit_weights_gpu, new_ldb, (unsigned char *)l.transposed_align_workspace_gpu,
new_ldb, l.output_gpu, n, l.mean_arr_gpu, l.biases_gpu, l.activation == LEAKY,
l.bin_conv_shortcut_in_gpu, l.bin_conv_shortcut_out_gpu);
//hipDeviceSynchronize();
//stop_timer_and_show_name("gemm_nn_custom_bin_mean_transposed_gpu");
//}
//hipDeviceSynchronize();
//check_error(status);
//getchar();
}
/*
{
float_to_bit_gpu(state.input, (unsigned char *)l.align_workspace_gpu, input_size);
convolve_bin_gpu(l.align_workspace_gpu, (float *)l.align_bit_weights_gpu, l.output_gpu, l.w, l.h, l.c, l.n, l.size, l.pad, l.new_lda, l.mean_arr_gpu);
//convolve_gpu(state.input, l.weights_gpu, l.output_gpu, l.w, l.h, l.c, l.n, l.size, l.pad);
//hipDeviceSynchronize();
//check_error(status);
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
}
*/
//add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
if (l.activation == SWISH) activate_array_swish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu);
else if (l.activation == MISH) activate_array_mish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu);
else if (l.activation == HARD_MISH) activate_array_hard_mish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu);
else if (l.activation == NORM_CHAN) activate_array_normalize_channels_ongpu(l.output_gpu, l.outputs*l.batch, l.batch, l.out_c, l.out_w*l.out_h, l.output_gpu);
else if (l.activation == NORM_CHAN_SOFTMAX) activate_array_normalize_channels_softmax_ongpu(l.output_gpu, l.outputs*l.batch, l.batch, l.out_c, l.out_w*l.out_h, l.output_gpu, 0);
else if (l.activation == NORM_CHAN_SOFTMAX_MAXVAL) activate_array_normalize_channels_softmax_ongpu(l.output_gpu, l.outputs*l.batch, l.batch, l.out_c, l.out_w*l.out_h, l.output_gpu, 1);
else if (l.activation != LINEAR && l.activation != LEAKY) activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
//if(l.activation != LINEAR && l.activation != LEAKY) activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
//if (l.binary || l.xnor) swap_binary(&l);
//hipDeviceSynchronize();
return;
}
}
if (l.xnor) {
swap_binary(&l);
binarize_gpu(state.input, l.c*l.h*l.w*l.batch, l.binary_input_gpu);
state.input = l.binary_input_gpu;
}
//fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
#ifdef CUDNN
//float one = 1; // alpha[0], beta[0] is float for HALF and FLOAT
float alpha = 1, beta = 0;
//#ifdef CUDNN_HALF
//if (state.use_mixed_precision) {
int iteration_num = get_current_iteration(state.net); // (*state.net.seen) / (state.net.batch*state.net.subdivisions);
if (state.index != 0 && state.net.cudnn_half && !l.xnor && (!state.train || (iteration_num > 3 * state.net.burn_in) && state.net.loss_scale != 1) &&
(l.c / l.groups) % 8 == 0 && l.n % 8 == 0 && l.groups <= 1 && l.size > 1)
{
//printf("\n CUDNN_HALF!!! state.index = %d \n", state.index);
// Note: For improved performance it is advised to use beta[0] = 0.0.
// For Tensor Core: cudnnSetConvolutionMathType() where cudnnMathType_t mathType = CUDNN_TENSOR_OP_MATH;
// 1. or CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM and use CUDNN_DATA_HALF
// 2. or CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED
// More: http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#tensor_ops
const size_t input16_size = l.batch*l.c*l.w*l.h;
const size_t output16_size = l.batch*l.out_c*l.out_h*l.out_w;
if (*state.net.max_input16_size < input16_size) {
//printf("\n input16_size: cur = %zu \t max = %zu \n", input16_size, *state.net.max_input16_size);
*state.net.max_input16_size = input16_size;
if (*state.net.input16_gpu) cuda_free(*state.net.input16_gpu);
assert(*state.net.max_input16_size > 0);
*state.net.input16_gpu = (float *)cuda_make_f16_from_f32_array(NULL, *state.net.max_input16_size);
}
float *input16 = *state.net.input16_gpu;
if (*state.net.max_output16_size < output16_size) {
*state.net.max_output16_size = output16_size;
if (*state.net.output16_gpu) cuda_free(*state.net.output16_gpu);
assert(*state.net.max_output16_size > 0);
*state.net.output16_gpu = (float *)cuda_make_f16_from_f32_array(NULL, *state.net.max_output16_size);
}
float *output16 = *state.net.output16_gpu;
assert(input16_size > 0);
cuda_convert_f32_to_f16(state.input, input16_size, input16);
//fill_ongpu(output16_size / 2, 0, (float *)output16, 1);
CHECK_CUDNN(cudnnConvolutionForward(cudnn_handle(),
&alpha,
l.srcTensorDesc16,
input16,
l.weightDesc16,
l.weights_gpu16,
l.convDesc,
l.fw_algo16,
state.workspace,
l.workspace_size,
&beta,
l.dstTensorDesc16,
output16));
if (l.batch_normalize)
{
if (state.train && !state.net.adversarial) // Training
{
simple_copy_ongpu(l.outputs*l.batch / 2, output16, l.x_gpu);
//copy_ongpu(l.outputs*l.batch / 2, output16, 1, l.x_gpu, 1);
//hipMemcpyAsync(l.x_gpu, output16, l.outputs*l.batch*sizeof(half), hipMemcpyDefault, get_cuda_stream());
float one = 1.0f;
float zero = 0.0f;
// Batch-normalization can still take FP16 inputs and outputs, saving half the bandwidth
// compared to FP32, it's just that the statistics and value adjustment should be done in FP32.
CHECK_CUDNN(cudnnBatchNormalizationForwardTraining(cudnn_handle(),
CUDNN_BATCHNORM_SPATIAL,
&one,
&zero,
l.normDstTensorDescF16,
l.x_gpu, // input
l.normDstTensorDescF16,
output16, // output
l.normTensorDesc,
l.scales_gpu, // input
l.biases_gpu, // input
.01,
l.rolling_mean_gpu, // input/output (should be FP32)
l.rolling_variance_gpu, // input/output (should be FP32)
.00001,
l.mean_gpu, // output (should be FP32) - optional cache to speedup cudnnBatchNormalizationBackward()
l.variance_gpu)); // output (should be FP32) - optional cache to speedup cudnnBatchNormalizationBackward()
cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu);
//forward_batchnorm_layer_gpu(l, state);
}
else // Detection
{
cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu);
normalize_gpu(l.output_gpu, l.rolling_mean_gpu, l.rolling_variance_gpu, l.batch, l.out_c, l.out_h*l.out_w);
scale_bias_gpu(l.output_gpu, l.scales_gpu, l.batch, l.out_c, l.out_h*l.out_w);
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.out_c, l.out_w*l.out_h);
}
}
else // BIAS only
{
cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu);
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
}
}
else {
//#else
/*
int input_nan_inf = is_nan_or_inf(state.input, l.inputs * l.batch);
printf("\n is_nan_or_inf(state.input) = %d \n", input_nan_inf);
if (input_nan_inf) getchar();
int weights_nan_inf = is_nan_or_inf(l.weights_gpu, l.nweights);
printf("\n is_nan_or_inf(l.weights_gpu) = %d \n", weights_nan_inf);
if (weights_nan_inf) getchar();
*/
CHECK_CUDNN(cudnnConvolutionForward(cudnn_handle(),
&alpha, //&one,
l.srcTensorDesc,
state.input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&beta, //&one,
l.dstTensorDesc,
l.output_gpu));
//hipDeviceSynchronize();
if (l.batch_normalize) {
forward_batchnorm_layer_gpu(l, state);
}
else {
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
}
//#endif // CUDNN_HALF
}
#else
fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
int i, j;
int m = l.n / l.groups;
int k = l.size*l.size*l.c / l.groups;
int n = l.out_w*l.out_h;
for(i = 0; i < l.batch; ++i){
for (j = 0; j < l.groups; ++j) {
//float *im = state.input + i*l.c*l.h*l.w;
float *im = state.input + (i*l.groups + j)*l.c / l.groups*l.h*l.w;
float *a = l.weights_gpu + j*l.nweights / l.groups;
float *b = state.workspace;
float *c = l.output_gpu + (i*l.groups + j)*n*m;
if (l.size == 1 && l.stride == 1 && l.dilation == 1) {
b = im;
}
else {
//im2col_ongpu(im, l.c / l.groups, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
im2col_gpu_ext(im, // input
l.c / l.groups, // input channels
l.h, l.w, // input size (h, w)
l.size, l.size, // kernel size (h, w)
l.pad * l.dilation, l.pad * l.dilation, // padding (h, w)
l.stride_y, l.stride_x, // stride (h, w)
l.dilation, l.dilation, // dilation (h, w)
state.workspace); // output
}
//gemm_ongpu(0, 0, m, n, k, 1., a, k, b, n, 1., c + i*m*n, n);
gemm_ongpu(0, 0, m, n, k, 1, a, k, b, n, 1, c, n);
}
}
if (l.batch_normalize) {
forward_batchnorm_layer_gpu(l, state);
}
else {
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
}
#endif
//#ifndef CUDNN_HALF
//#endif // no CUDNN_HALF
if (l.activation == SWISH) activate_array_swish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu);
else if (l.activation == MISH) activate_array_mish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu);
else if (l.activation == HARD_MISH) activate_array_hard_mish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu);
else if (l.activation == NORM_CHAN) activate_array_normalize_channels_ongpu(l.output_gpu, l.outputs*l.batch, l.batch, l.out_c, l.out_w*l.out_h, l.output_gpu);
else if (l.activation == NORM_CHAN_SOFTMAX) activate_array_normalize_channels_softmax_ongpu(l.output_gpu, l.outputs*l.batch, l.batch, l.out_c, l.out_w*l.out_h, l.output_gpu, 0);
else if (l.activation == NORM_CHAN_SOFTMAX_MAXVAL) activate_array_normalize_channels_softmax_ongpu(l.output_gpu, l.outputs*l.batch, l.batch, l.out_c, l.out_w*l.out_h, l.output_gpu, 1);
else if (l.activation != LINEAR) activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
//if(l.dot > 0) dot_error_gpu(l);
if(l.binary || l.xnor) swap_binary(&l);
//hipDeviceSynchronize(); // for correct profiling of performance
if (state.net.try_fix_nan) {
fix_nan_and_inf(l.output_gpu, l.outputs*l.batch);
}
if(l.assisted_excitation && state.train) assisted_excitation_forward_gpu(l, state);
if (l.antialiasing) {
network_state s = { 0 };
s.train = state.train;
s.workspace = state.workspace;
s.net = state.net;
if (!state.train) s.index = state.index; // don't use TC for training (especially without cuda_convert_f32_to_f16() )
s.input = l.output_gpu;
forward_convolutional_layer_gpu(*(l.input_layer), s);
simple_copy_ongpu(l.outputs*l.batch, l.output_gpu, l.input_antialiasing_gpu);
simple_copy_ongpu(l.input_layer->outputs*l.input_layer->batch, l.input_layer->output_gpu, l.output_gpu);
}
if (l.coordconv) {
coord_conv_gpu(l.output_gpu, l.outputs*l.batch, l.out_w, l.out_h, l.out_c, l.batch, 0);
}
}
void backward_convolutional_layer_gpu(convolutional_layer l, network_state state)
{
if (l.coordconv) {
coord_conv_gpu(l.delta_gpu, l.outputs*l.batch, l.out_w, l.out_h, l.out_c, l.batch, 1);
}
if (l.antialiasing) {
network_state s = { 0 };
s.train = state.train;
s.workspace = state.workspace;
s.net = state.net;
s.delta = l.delta_gpu; // s.delta will be returned to l.delta_gpu
s.input = l.input_antialiasing_gpu;
//if (!state.train) s.index = state.index; // don't use TC for training (especially without cuda_convert_f32_to_f16() )
simple_copy_ongpu(l.input_layer->outputs*l.input_layer->batch, l.delta_gpu, l.input_layer->delta_gpu);
backward_convolutional_layer_gpu(*(l.input_layer), s);
simple_copy_ongpu(l.outputs*l.batch, l.input_antialiasing_gpu, l.output_gpu);
}
if(state.net.try_fix_nan) constrain_ongpu(l.outputs*l.batch, 1, l.delta_gpu, 1);
if (l.activation == SWISH) gradient_array_swish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.delta_gpu);
else if (l.activation == MISH) gradient_array_mish_ongpu(l.outputs*l.batch, l.activation_input_gpu, l.delta_gpu);
else if (l.activation == HARD_MISH) gradient_array_hard_mish_ongpu(l.outputs*l.batch, l.activation_input_gpu, l.delta_gpu);
else if (l.activation == NORM_CHAN_SOFTMAX || l.activation == NORM_CHAN_SOFTMAX_MAXVAL) gradient_array_normalize_channels_softmax_ongpu(l.output_gpu, l.outputs*l.batch, l.batch, l.out_c, l.out_w*l.out_h, l.delta_gpu);
else if (l.activation == NORM_CHAN) gradient_array_normalize_channels_ongpu(l.output_gpu, l.outputs*l.batch, l.batch, l.out_c, l.out_w*l.out_h, l.delta_gpu);
else gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
if (!l.batch_normalize)
backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
//#ifndef CUDNN_HALF
//if(l.batch_normalize){
// backward_batchnorm_layer_gpu(l, state);
//} else {
// //backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
//}
//#endif // no CUDNN_HALF
float *original_input = state.input;
if(l.xnor) state.input = l.binary_input_gpu;
#ifdef CUDNN
float one = 1.f;
float alpha = 1, beta = 0;
//#ifdef CUDNN_HALF
int iteration_num = get_current_iteration(state.net); //(*state.net.seen) / (state.net.batch*state.net.subdivisions);
if (state.index != 0 && state.net.cudnn_half && !l.xnor && (!state.train || (iteration_num > 3 * state.net.burn_in) && state.net.loss_scale != 1) &&
(l.c / l.groups) % 8 == 0 && l.n % 8 == 0 && l.groups <= 1 && l.size > 1)
{
const size_t input16_size = l.batch*l.c*l.w*l.h;
const size_t delta16_size = l.batch*l.n*l.out_w*l.out_h;
if (*state.net.max_input16_size < input16_size) {
*state.net.max_input16_size = input16_size;
if (*state.net.input16_gpu) cuda_free(*state.net.input16_gpu);
assert(*state.net.max_input16_size > 0);
*state.net.input16_gpu = (float *)cuda_make_f16_from_f32_array(NULL, *state.net.max_input16_size);
}
float *input16 = *state.net.input16_gpu;
if (*state.net.max_output16_size < delta16_size) {
*state.net.max_output16_size = delta16_size;
if (*state.net.output16_gpu) cuda_free(*state.net.output16_gpu);
assert(*state.net.max_output16_size > 0);
*state.net.output16_gpu = (float *)cuda_make_f16_from_f32_array(NULL, *state.net.max_output16_size);
}
float *delta16 = *state.net.output16_gpu;
assert(input16_size > 0);
assert(delta16_size > 0);
cuda_convert_f32_to_f16(state.input, input16_size, input16);
cuda_convert_f32_to_f16(l.delta_gpu, delta16_size, delta16);
if (l.batch_normalize) {
//if (!state.train) {
// l.mean_gpu = l.rolling_mean_gpu;
// l.variance_gpu = l.rolling_variance_gpu;
//}
float one = 1.0f;
float zero = 0.0f;
CHECK_CUDNN(cudnnBatchNormalizationBackward(cudnn_handle(),
CUDNN_BATCHNORM_SPATIAL,
&one,
&zero,
&one,
&one,
l.normDstTensorDescF16,
l.x_gpu, // input (input in BN-forward-inference)
l.normDstTensorDescF16,
delta16, // input
l.normDstTensorDescF16,
l.output_gpu, //l.x_norm_gpu, // output (new delta)
l.normTensorDesc,
l.scales_gpu, // input (should be FP32)
l.scale_updates_gpu, // output (should be FP32)
l.bias_updates_gpu, // output (should be FP32)
.00001,
l.mean_gpu, // input (should be FP32)
l.variance_gpu)); // input (should be FP32)
simple_copy_ongpu(l.outputs*l.batch / 2, l.output_gpu, delta16);
//copy_ongpu(l.outputs*l.batch / 2, l.x_norm_gpu, 1, delta16, 1);
//hipMemcpyAsync(delta16, l.x_norm_gpu, l.outputs*l.batch * sizeof(half), hipMemcpyDefault, get_cuda_stream());
}
else
{
//backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
}
// convert input: state.input (x), l.delta_gpu (y) from fp32 to fp16
// get output: l.weight_updates_gpu (dw) and convert it to fp32 (ONLY if it is fp16)
// calculate conv weight updates
// Already: l.weight_updates_gpu = (l.weight_updates_gpu - l.weight*decay*batch*subdivision)*momentum
// so we should copy f32 to f16, or compute: f16=(w_up - w*d*b*s)*m
assert((l.nweights) > 0);
cuda_convert_f32_to_f16(l.weight_updates_gpu, l.nweights, l.weight_updates_gpu16);
if (!state.net.adversarial && !l.train_only_bn) {
CHECK_CUDNN(cudnnConvolutionBackwardFilter(cudnn_handle(),
&one,
l.srcTensorDesc16,
input16, //state.input,
l.ddstTensorDesc16,
delta16, //l.delta_gpu,
l.convDesc,
l.bf_algo16,
state.workspace,
l.workspace_size,
&one,
l.dweightDesc16,
l.weight_updates_gpu16)); // l.weight_updates_gpu);
cuda_convert_f16_to_f32(l.weight_updates_gpu16, l.nweights, l.weight_updates_gpu);
}
if (state.delta) {
if (l.binary || l.xnor) swap_binary(&l);
// http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnConvolutionBackwardData
// calculate delta for the next layer
// convert input: l.weights_gpu (w), l.delta_gpu (dy) from fp32 to fp16
// get output: state.delta (dx) and convert it to fp32 (ONLY if it is fp16)
CHECK_CUDNN(cudnnConvolutionBackwardData(cudnn_handle(),
&alpha,
l.weightDesc16,
l.weights_gpu16, //l.weights_gpu,
l.ddstTensorDesc16,
delta16, //l.delta_gpu,
l.convDesc,
l.bd_algo16,
state.workspace,
l.workspace_size,
&beta,
l.dsrcTensorDesc16,
input16)); // state.delta);
cuda_convert_f16_to_f32(input16, input16_size, state.delta);
if (l.binary || l.xnor) swap_binary(&l);
if (l.xnor) gradient_array_ongpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, state.delta);
}
}
else {
//#else // CUDNN_HALF
if(l.batch_normalize){
backward_batchnorm_layer_gpu(l, state);
}
if (!state.net.adversarial && !l.train_only_bn) {
float *old_input = state.input;
/*
if (l.reverse) {
if (*state.net.max_output16_size < l.inputs*l.batch) {
*state.net.max_output16_size = l.inputs*l.batch;
if (*state.net.output16_gpu) cuda_free(*state.net.output16_gpu);
assert(*state.net.max_output16_size > 0);
*state.net.output16_gpu = cuda_make_array(NULL, *state.net.max_output16_size);
}
float clip = 0.0;
float divider = 1.0;
float abs_add = 1.0;
mult_inverse_array_gpu(state.input, *state.net.output16_gpu, l.inputs*l.batch, l.reverse, divider, clip, abs_add);
state.input = *state.net.output16_gpu;
}
*/
// calculate conv weight updates
// if used: beta=1 then loss decreases faster
CHECK_CUDNN(cudnnConvolutionBackwardFilter(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bf_algo,
state.workspace,
l.workspace_size,
&one,
l.dweightDesc,
l.weight_updates_gpu));
state.input = old_input;
}
if (state.delta) {
if (l.binary || l.xnor) swap_binary(&l);
float *old_weights = l.weights_gpu;
/*
if (l.reverse) {
if (*state.net.max_output16_size < l.nweights) {
*state.net.max_output16_size = l.nweights;
if (*state.net.output16_gpu && *state.net.max_output16_size > 0) cuda_free(*state.net.output16_gpu);
assert(*state.net.max_output16_size > 0);
*state.net.output16_gpu = cuda_make_array(NULL, l.nweights);
}
float clip = 0.0;
float divider = 1.0;
float abs_add = 1.0;
mult_inverse_array_gpu(l.weights_gpu, *state.net.output16_gpu, l.nweights, l.reverse, divider, clip, abs_add);
l.weights_gpu = *state.net.output16_gpu;
}
*/
// http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnConvolutionBackwardData
// calculate delta for the next layer
CHECK_CUDNN(cudnnConvolutionBackwardData(cudnn_handle(),
&one,
l.weightDesc,
l.weights_gpu,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bd_algo,
state.workspace,
l.workspace_size,
&one,
l.dsrcTensorDesc,
state.delta));
l.weights_gpu = old_weights;
if (l.binary || l.xnor) swap_binary(&l);
if (l.xnor) gradient_array_ongpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, state.delta);
}
}
//#endif // CUDNN_HALF
#else // CUDNN
if (l.batch_normalize) {
backward_batchnorm_layer_gpu(l, state);
}
int m = l.n / l.groups;
int n = l.size*l.size*l.c / l.groups;
int k = l.out_w*l.out_h;
int i, j;
for(i = 0; i < l.batch; ++i){
for (j = 0; j < l.groups; ++j) {
float * a = l.delta_gpu + (i*l.groups + j)*m*k;
float * b = state.workspace;
float * c = l.weight_updates_gpu + j*l.nweights / l.groups;
float *im = state.input + (i*l.groups + j)*l.c / l.groups*l.h*l.w;
if (!state.net.adversarial && !l.train_only_bn) {
//im2col_ongpu(im, l.c / l.groups, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
im2col_gpu_ext(im, // input
l.c / l.groups, // input channels
l.h, l.w, // input size (h, w)
l.size, l.size, // kernel size (h, w)
l.pad * l.dilation, l.pad * l.dilation, // padding (h, w)
l.stride_y, l.stride_x, // stride (h, w)
l.dilation, l.dilation, // dilation (h, w)
state.workspace); // output
//gemm_ongpu(0, 1, m, n, k, 1, a + i*m*k, k, b, k, 1, c, n);
gemm_ongpu(0, 1, m, n, k, 1, a, k, b, k, 1, c, n);
}
if (state.delta) {
if (l.binary || l.xnor) swap_binary(&l);
float * a = l.weights_gpu + j*l.nweights / l.groups;
float * b = l.delta_gpu + (i*l.groups + j)*m*k;
float * c = state.workspace;
//gemm_ongpu(1, 0, n, k, m, 1, a, n, b + i*k*m, k, 0, c, k);
gemm_ongpu(1, 0, n, k, m, 1, a, n, b, k, 0, c, k);
float *delta = state.delta + (i*l.groups + j)*l.c / l.groups*l.h*l.w;
//col2im_ongpu(state.workspace, l.c / l.groups, l.h, l.w, l.size, l.stride, l.pad, delta);
col2im_gpu_ext(
state.workspace, // input
l.c / l.groups, // input channels
l.h, l.w, // input size (h, w)
l.size, l.size, // kernel size (h, w)
l.pad * l.dilation, l.pad * l.dilation, // padding size (h, w)
l.stride_y, l.stride_x, // stride size (h, w)
l.dilation, l.dilation, // dilation size (h, w)
delta); // output (delta)
if (l.binary || l.xnor) {
swap_binary(&l);
}
if (l.xnor) gradient_array_ongpu(original_input + i*l.c*l.h*l.w, l.c*l.h*l.w, HARDTAN, state.delta + i*l.c*l.h*l.w);
}
}
}
#endif
if (state.net.try_fix_nan) {
if (state.delta) {
reset_nan_and_inf(state.delta, l.inputs * l.batch);
}
int size = l.nweights;
reset_nan_and_inf(l.weight_updates_gpu, size);
fix_nan_and_inf(l.weights_gpu, size);
}
}
__global__ void calc_avg_activation_kernel(float *src, float *dst, int size, int channels, int batches)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int xy = i % size;
int b = i / size;
if (i < size*batches) {
dst[i] = 0;
for (int c = 0; c < channels; ++c) {
dst[i] += src[xy + size*(c + channels*b)];
}
dst[i] = dst[i] / channels;
}
}
void calc_avg_activation_gpu(float *src, float *dst, int size, int channels, int batches)
{
const int num_blocks = get_number_of_blocks(size*batches, BLOCK);
calc_avg_activation_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (src, dst, size, channels, batches);
}
__global__ void assisted_activation_kernel(float alpha, float *output, float *gt_gpu, float *a_avg_gpu, int size, int channels, int batches)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int xy = i % size;
int b = i / size;
if (b < batches) {
for (int c = 0; c < channels; ++c) {
output[xy + size*(c + channels*b)] += alpha * gt_gpu[i] * a_avg_gpu[i];
//output[xy + size*(c + channels*b)] += gt_gpu[i] * a_avg_gpu[i];
//output[xy + size*(c + channels*b)] += gt_gpu[i] * output[xy + size*(c + channels*b)];
//output[xy + size*(c + channels*b)] = a_avg_gpu[i];
}
}
}
void assisted_activation_gpu(float alpha, float *output, float *gt_gpu, float *a_avg_gpu, int size, int channels, int batches)
{
const int num_blocks = get_number_of_blocks(size*batches, BLOCK);
assisted_activation_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (alpha, output, gt_gpu, a_avg_gpu, size, channels, batches);
}
__global__ void assisted_activation2_kernel(float alpha, float *output, float *gt_gpu, float *a_avg_gpu, int size, int channels, int batches)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int xy = i % size;
int b = i / size;
float beta = 1 - alpha;
if (b < batches) {
for (int c = 0; c < channels; ++c) {
if(gt_gpu[i] == 0)
output[xy + size*(c + channels*b)] *= beta;
}
}
}
void assisted_activation2_gpu(float alpha, float *output, float *gt_gpu, float *a_avg_gpu, int size, int channels, int batches)
{
const int num_blocks = get_number_of_blocks(size*batches, BLOCK);
assisted_activation2_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (alpha, output, gt_gpu, a_avg_gpu, size, channels, batches);
}
void assisted_excitation_forward_gpu(convolutional_layer l, network_state state)
{
const int iteration_num = get_current_iteration(state.net); //(*state.net.seen) / (state.net.batch*state.net.subdivisions);
// epoch
//const float epoch = (float)(*state.net.seen) / state.net.train_images_num;
// calculate alpha
//const float alpha = (1 + cos(3.141592 * iteration_num)) / (2 * state.net.max_batches);
//const float alpha = (1 + cos(3.141592 * epoch)) / (2 * state.net.max_batches);
float alpha = (1 + cos(3.141592 * iteration_num / state.net.max_batches)) / 2;
//float alpha = (1 + cos(3.141592 * iteration_num / state.net.max_batches));
if (l.assisted_excitation == 1) {
if (iteration_num > state.net.max_batches / 2) return;
}
else {
if (iteration_num < state.net.burn_in) return;
else
if (iteration_num > l.assisted_excitation) return;
else
alpha = (1 + cos(3.141592 * iteration_num / (state.net.burn_in + l.assisted_excitation))) / 2; // from 1 to 0
}
//printf("\n epoch = %f, alpha = %f, seen = %d, max_batches = %d, train_images_num = %d \n",
// epoch, alpha, (*state.net.seen), state.net.max_batches, state.net.train_images_num);
//const int size = l.outputs * l.batch;
float *a_avg = (float *)calloc(l.out_w * l.out_h * l.batch, sizeof(float));
float *gt = (float *)calloc(l.out_w * l.out_h * l.batch, sizeof(float));
int b;
int w, h;
l.max_boxes = state.net.num_boxes;
l.truths = l.max_boxes*(4 + 1);
int num_truth = l.batch*l.truths;
float *truth_cpu = (float *)calloc(num_truth, sizeof(float));
cuda_pull_array(state.truth, truth_cpu, num_truth);
//hipStreamSynchronize(get_cuda_stream());
//CHECK_CUDA(hipPeekAtLastError());
for (b = 0; b < l.batch; ++b)
{
// calculate G
int t;
for (t = 0; t < state.net.num_boxes; ++t) {
box truth = float_to_box_stride(truth_cpu + t*(4 + 1) + b*l.truths, 1);
if (!truth.x) break; // continue;
float beta = 0;
//float beta = 1 - alpha; // from 0 to 1
float dw = (1 - truth.w) * beta;
float dh = (1 - truth.h) * beta;
//printf(" alpha = %f, beta = %f, truth.w = %f, dw = %f, tw+dw = %f, l.out_w = %d \n", alpha, beta, truth.w, dw, truth.w+dw, l.out_w);
int left = floorf((truth.x - (dw + truth.w) / 2) * l.out_w);
int right = ceilf((truth.x + (dw + truth.w) / 2) * l.out_w);
int top = floorf((truth.y - (dh + truth.h) / 2) * l.out_h);
int bottom = ceilf((truth.y + (dh + truth.h) / 2) * l.out_h);
if (left < 0) left = 0;
if (top < 0) top = 0;
if (right > l.out_w) right = l.out_w;
if (bottom > l.out_h) bottom = l.out_h;
for (w = left; w <= right; w++) {
for (h = top; h < bottom; h++) {
gt[w + l.out_w * h + l.out_w*l.out_h*b] = 1;
}
}
}
}
cuda_push_array(l.gt_gpu, gt, l.out_w * l.out_h * l.batch);
//hipStreamSynchronize(get_cuda_stream());
//CHECK_CUDA(hipPeekAtLastError());
// calc avg_output on GPU - for whole batch
calc_avg_activation_gpu(l.output_gpu, l.a_avg_gpu, l.out_w * l.out_h, l.out_c, l.batch);
//hipStreamSynchronize(get_cuda_stream());
//CHECK_CUDA(hipPeekAtLastError());
// calc new output
//assisted_activation2_gpu(1, l.output_gpu, l.gt_gpu, l.a_avg_gpu, l.out_w * l.out_h, l.out_c, l.batch); // AE3: gt increases (beta = 1 - alpha = 0)
//assisted_activation2_gpu(alpha, l.output_gpu, l.gt_gpu, l.a_avg_gpu, l.out_w * l.out_h, l.out_c, l.batch);
assisted_activation_gpu(alpha, l.output_gpu, l.gt_gpu, l.a_avg_gpu, l.out_w * l.out_h, l.out_c, l.batch);
//hipStreamSynchronize(get_cuda_stream());
//CHECK_CUDA(hipPeekAtLastError());
/*
for (b = 0; b < l.batch; ++b)
{
// calculate average A
for (w = 0; w < l.out_w; w++) {
for (h = 0; h < l.out_h; h++) {
for (c = 0; c < l.out_c; c++) {
a_avg[w + l.out_w*(h + l.out_h*b)] += l.output[w + l.out_w*(h + l.out_h*(c + l.out_c*b))];
}
a_avg[w + l.out_w*(h + l.out_h*b)] /= l.out_c; // a_avg / d
}
}
}
// change activation
for (b = 0; b < l.batch; ++b)
{
for (w = 0; w < l.out_w; w++) {
for (h = 0; h < l.out_h; h++) {
for (c = 0; c < l.out_c; c++)
{
// a = a + alpha(t) + e(c,i,j) = a + alpha(t) + g(i,j) * avg_a(i,j) / channels
l.output[w + l.out_w*(h + l.out_h*(c + l.out_c*b))] +=
alpha *
g[w + l.out_w*(h + l.out_h*b)] *
a_avg[w + l.out_w*(h + l.out_h*b)];
//l.output[w + l.out_w*(h + l.out_h*(c + l.out_c*b))] =
// alpha * g[w + l.out_w*(h + l.out_h*b)] * a_avg[w + l.out_w*(h + l.out_h*b)];
}
}
}
}
*/
if (0) // visualize ground truth
{
#ifdef OPENCV
cuda_pull_array(l.output_gpu, l.output, l.outputs * l.batch);
hipStreamSynchronize(get_cuda_stream());
CHECK_CUDA(hipPeekAtLastError());
for (b = 0; b < l.batch; ++b)
{
printf(" Assisted Excitation alpha = %f \n", alpha);
image img = float_to_image(l.out_w, l.out_h, 1, >[l.out_w*l.out_h*b]);
char buff[100];
sprintf(buff, "a_excitation_gt_%d", b);
show_image_cv(img, buff);
//image img2 = float_to_image(l.out_w, l.out_h, 1, &l.output[l.out_w*l.out_h*l.out_c*b]);
image img2 = float_to_image_scaled(l.out_w, l.out_h, 1, &l.output[l.out_w*l.out_h*l.out_c*b]);
char buff2[100];
sprintf(buff2, "a_excitation_output_%d", b);
show_image_cv(img2, buff2);
/*
int c = l.out_c;
if (c > 4) c = 4;
image img3 = float_to_image(l.out_w, l.out_h, c, &l.output[l.out_w*l.out_h*l.out_c*b]);
image dc = collapse_image_layers(img3, 1);
char buff3[100];
sprintf(buff3, "a_excitation_act_collapsed_%d", b);
show_image_cv(dc, buff3);
*/
wait_key_cv(5);
}
wait_until_press_key_cv();
#endif // OPENCV
}
free(truth_cpu);
free(gt);
free(a_avg);
}
void pull_convolutional_layer(convolutional_layer l)
{
cuda_pull_array_async(l.weights_gpu, l.weights, l.nweights);
cuda_pull_array_async(l.biases_gpu, l.biases, l.n);
if (l.weight_updates_gpu) cuda_pull_array_async(l.weight_updates_gpu, l.weight_updates, l.nweights);
if (l.bias_updates_gpu) cuda_pull_array_async(l.bias_updates_gpu, l.bias_updates, l.n);
if (l.batch_normalize){
cuda_pull_array_async(l.scales_gpu, l.scales, l.n);
cuda_pull_array_async(l.rolling_mean_gpu, l.rolling_mean, l.n);
cuda_pull_array_async(l.rolling_variance_gpu, l.rolling_variance, l.n);
}
if (l.adam){
cuda_pull_array_async(l.m_gpu, l.m, l.nweights);
cuda_pull_array_async(l.v_gpu, l.v, l.nweights);
}
CHECK_CUDA(hipPeekAtLastError());
hipStreamSynchronize(get_cuda_stream());
}
void push_convolutional_layer(convolutional_layer l)
{
cuda_push_array(l.weights_gpu, l.weights, l.nweights);
#ifdef CUDNN_HALF
assert(l.nweights > 0);
cuda_convert_f32_to_f16(l.weights_gpu, l.nweights, l.weights_gpu16);
#endif
cuda_push_array(l.biases_gpu, l.biases, l.n);
if (l.train) {
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.nweights);
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n);
}
if (l.batch_normalize){
cuda_push_array(l.scales_gpu, l.scales, l.n);
cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.n);
cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.n);
}
if (l.adam){
cuda_push_array(l.m_gpu, l.m, l.nweights);
cuda_push_array(l.v_gpu, l.v, l.nweights);
}
CHECK_CUDA(hipPeekAtLastError());
}
void update_convolutional_layer_gpu(layer l, int batch, float learning_rate_init, float momentum, float decay, float loss_scale)
{
/*
for (int angle = 0; angle < 360; angle++) {
printf(" angle = %d \n", angle);
smooth_rotate_weights_kernel(l.weights_gpu, l.weight_deform_gpu, l.nweights, l.n, l.size, angle, 0);
cuda_pull_array(l.weight_deform_gpu, l.weights, l.nweights);
visualize_convolutional_layer(l, "weights", NULL);
wait_key_cv(10);
}
*/
if (l.deform) {
//for (l.angle = 0; l.angle < 360; l.angle += 1)
//{
//stretch_weights_gpu(l.weight_updates_gpu, l.weight_deform_gpu, l.nweights, l.n, l.size, l.angle/180, 1);
//else simple_copy_ongpu(l.nweights, l.weight_updates_gpu, l.weight_deform_gpu);
if (l.rotate) rotate_weights_gpu(l.weight_updates_gpu, l.weight_deform_gpu, l.nweights, l.n, l.size, 1);
else if (l.sway) sway_and_flip_weights_gpu(l.weight_updates_gpu, l.weight_deform_gpu, l.nweights, l.n, l.size, l.angle, 1);
else if (l.stretch) stretch_weights_gpu(l.weight_updates_gpu, l.weight_deform_gpu, l.nweights, l.n, l.size, 0, 1);
else if (l.stretch_sway) stretch_sway_flip_weights_gpu(l.weight_updates_gpu, l.weight_deform_gpu, l.nweights, l.n, l.size, l.angle, 1);
//simple_copy_ongpu(l.nweights, l.weight_updates_gpu, l.weight_deform_gpu);
reduce_and_expand_array_gpu(l.weight_deform_gpu, l.weight_updates_gpu, l.nweights, 4);
//printf(" angle = %f \n", l.angle);
//cuda_pull_array(l.weight_deform_gpu, l.weights, l.nweights);
//visualize_convolutional_layer(l, "weights", NULL);
//wait_key_cv(10);
//}
}
// Loss scale for Mixed-Precision on Tensor-Cores
float learning_rate = learning_rate_init*l.learning_rate_scale / loss_scale;
//float momentum = a.momentum;
//float decay = a.decay;
//int batch = a.batch;
reset_nan_and_inf(l.weight_updates_gpu, l.nweights);
fix_nan_and_inf(l.weights_gpu, l.nweights);
// Gradient Centralization
if (l.grad_centr && l.batch_normalize) {
// weights[filters][channels][height][width]
// for(filters) w[f] = w[f] - mean(w[c][h][w])
gradient_centralization_gpu(l.size, l.size, l.c / l.groups, l.n, l.weight_updates_gpu);
}
if (l.adam) {
//adam_update_gpu(l.weights_gpu, l.weight_updates_gpu, l.m_gpu, l.v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.nweights, batch, a.t);
adam_update_gpu(l.weights_gpu, l.weight_updates_gpu, l.m_gpu, l.v_gpu, l.B1, l.B2, l.eps, decay, learning_rate, l.nweights, batch, l.t);
adam_update_gpu(l.biases_gpu, l.bias_updates_gpu, l.bias_m_gpu, l.bias_v_gpu, l.B1, l.B2, l.eps, decay, learning_rate, l.n, batch, l.t);
if (l.scales_gpu) {
adam_update_gpu(l.scales_gpu, l.scale_updates_gpu, l.scale_m_gpu, l.scale_v_gpu, l.B1, l.B2, l.eps, decay, learning_rate, l.n, batch, l.t);
}
}
else {
//axpy_ongpu(l.nweights, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1);
//axpy_ongpu(l.nweights, learning_rate / batch, l.weight_updates_gpu, 1, l.weights_gpu, 1);
//scal_ongpu(l.nweights, momentum, l.weight_updates_gpu, 1);
float *old_weight_updates_gpu = l.weight_updates_gpu;
if (l.reverse) {
float clip = 0.0;
float divider = 1.0;
float abs_add = 1.0;
mult_inverse_array_gpu(l.weight_updates_gpu, l.output_gpu, l.inputs*l.batch, l.reverse, divider, clip, abs_add);
l.weight_updates_gpu = l.output_gpu;
}
axpy_ongpu(l.nweights, -decay*batch*loss_scale, l.weights_gpu, 1, l.weight_updates_gpu, 1);
axpy_ongpu(l.nweights, learning_rate / batch, l.weight_updates_gpu, 1, l.weights_gpu, 1);
l.weight_updates_gpu = old_weight_updates_gpu;
scal_ongpu(l.nweights, momentum, l.weight_updates_gpu, 1);
axpy_ongpu(l.n, learning_rate / batch, l.bias_updates_gpu, 1, l.biases_gpu, 1);
scal_ongpu(l.n, momentum, l.bias_updates_gpu, 1);
if (l.scales_gpu) {
axpy_ongpu(l.n, learning_rate / batch, l.scale_updates_gpu, 1, l.scales_gpu, 1);
scal_ongpu(l.n, momentum, l.scale_updates_gpu, 1);
}
}
if (l.deform) {
//for (l.angle = 0; l.angle < 360; l.angle += 4)
//{
expand_array_gpu(l.weights_gpu, l.weight_deform_gpu, l.nweights, 4);
//simple_copy_ongpu(l.nweights, l.weight_deform_gpu, l.weights_gpu);
if (l.rotate) rotate_weights_gpu(l.weight_deform_gpu, l.weights_gpu, l.nweights, l.n, l.size, 0);
else if (l.sway) sway_and_flip_weights_gpu(l.weight_deform_gpu, l.weights_gpu, l.nweights, l.n, l.size, l.angle, 0);
else if (l.stretch) stretch_weights_gpu(l.weight_deform_gpu, l.weights_gpu, l.nweights, l.n, l.size, 0, 0);
else if (l.stretch_sway) stretch_sway_flip_weights_gpu(l.weight_deform_gpu, l.weights_gpu, l.nweights, l.n, l.size, l.angle, 0);
//printf(" angle = %f, reverse = %d \n", l.angle, 0);
//cuda_pull_array(l.weights_gpu, l.weights, l.nweights);
//visualize_convolutional_layer(l, "weights", NULL);
//wait_key_cv(10);
//}
}
if (l.clip) {
constrain_ongpu(l.nweights, l.clip, l.weights_gpu, 1);
}
}
/*
void update_convolutional_layer_gpu(convolutional_layer layer, int batch, float learning_rate, float momentum, float decay)
{
int size = layer.size*layer.size*layer.c*layer.n;
axpy_ongpu(layer.n, learning_rate/batch, layer.bias_updates_gpu, 1, layer.biases_gpu, 1);
scal_ongpu(layer.n, momentum, layer.bias_updates_gpu, 1);
if(layer.scales_gpu){
axpy_ongpu(layer.n, learning_rate/batch, layer.scale_updates_gpu, 1, layer.scales_gpu, 1);
scal_ongpu(layer.n, momentum, layer.scale_updates_gpu, 1);
}
if(layer.adam){
scal_ongpu(size, layer.B1, layer.m_gpu, 1);
scal_ongpu(size, layer.B2, layer.v_gpu, 1);
axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, -(1-layer.B1), layer.weight_updates_gpu, 1, layer.m_gpu, 1);
mul_ongpu(size, layer.weight_updates_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, (1-layer.B2), layer.weight_updates_gpu, 1, layer.v_gpu, 1);
adam_gpu(size, layer.weights_gpu, layer.m_gpu, layer.v_gpu, layer.B1, layer.B2, learning_rate/batch, layer.eps, layer.t+1);
fill_ongpu(size, 0, layer.weight_updates_gpu, 1);
}else{
axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1); // wu = wu - w*decay*batch
axpy_ongpu(size, learning_rate/batch, layer.weight_updates_gpu, 1, layer.weights_gpu, 1); // w = w + wu*lr/batch
scal_ongpu(size, momentum, layer.weight_updates_gpu, 1); // wu = wu*momentum // wu = (wu - w*decay*batch)*momentum
// w = w + (wu - w*decay*batch)*lr/batch = w + wu*lr/batch - w*decay*lr = w*(1-decay*lr) + wu*lr/batch
//wu_prev = (wu_old - w_old*decay*batch)*momentum
//weights_update = weights_update_new + (weights_update_old - weights_old*decay*batch)*momentum - weights_new*decay*batch =
// = weights_update_new + weights_update_old*momentum - weights_old*decay*batch*momentum - weights_new*decay*batch
// = weights_update_new + weights_update_old*momentum - (weights_old*momentum + weights_new)*decay*batch
//------------- RESULT --------------
// weights_update = weights_update_new + weights_update_old*momentum - (weights_old*momentum + weights_new)*decay*batch
//-----------------------------------
// weights_newest = weights_new + (weights_update_new + weights_update_old*momentum - (weights_old*momentum + weights_new)*decay*batch)*lr/batch
// = weights_new + weights_update_new*lr/batch + weights_update_old*momentum*lr/batch - weights_old*momentum*decay*batch*lr/batch - weights_new*decay*batch*lr/batch
// = weights_new + weights_update_new*lr/batch + weights_update_old*momentum*lr/batch - weights_old*momentum*decay*lr - weights_new*decay*lr
// = weights_new*(1 - decay*lr) - weights_old*momentum*decay*lr + (weights_update_new + weights_update_old*momentum)*lr/batch
//------------- RESULT --------------
// weights_newest = weights_new*(1 - decay*lr) - weights_old*momentum*(decay*lr) + (weights_update_new + weights_update_old*momentum)*lr/batch =
// = weights_new - (weights_new + weights_old*momentum)*decay*lr + (weights_update_new + weights_update_old*momentum)*lr / batch
//-----------------------------------
}
}
*/
| 48b5a465f85ab4b6cc15069e5447ec0a2a68fde4.cu | #include <cuda_runtime.h>
#include <curand.h>
#include <cublas_v2.h>
#include "convolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "dark_cuda.h"
#include "box.h"
__global__ void binarize_kernel(float *x, int n, float *binary)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= n) return;
binary[i] = (x[i] >= 0) ? 1 : -1;
}
void binarize_gpu(float *x, int n, float *binary)
{
binarize_kernel<<<cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >>>(x, n, binary);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void binarize_input_kernel(float *input, int n, int size, float *binary)
{
int s = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (s >= size) return;
int i = 0;
float mean = 0;
for(i = 0; i < n; ++i){
mean += fabs(input[i*size + s]);
}
mean = mean / n;
for(i = 0; i < n; ++i){
binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean;
}
}
void binarize_input_gpu(float *input, int n, int size, float *binary)
{
binarize_input_kernel<<<cuda_gridsize(size), BLOCK, 0, get_cuda_stream() >>>(input, n, size, binary);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary)
{
int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (f >= n) return;
int i = 0;
float mean = 0;
for (i = 0; i < size; ++i) {
mean += fabs(weights[f*size + i]);
}
mean = mean / size;
for (i = 0; i < size; ++i) {
binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;
//binary[f*size + i] = weights[f*size + i];
}
}
void binarize_weights_gpu(float *weights, int n, int size, float *binary)
{
binarize_weights_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> >(weights, n, size, binary);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void set_zero_kernel(float *src, int size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) src[i] = 0;
}
__inline__ __device__
float warpAllReduceSum(float val) {
for (int mask = WARP_SIZE / 2; mask > 0; mask /= 2)
#if CUDART_VERSION >= 9000
val += __shfl_xor_sync(0xffffffff, val, mask);
#else
val += __shfl_xor(val, mask);
#endif
return val;
}
// only if (size % 32 == 0)
__global__ void reduce_kernel(float *weights, int n, int size, float *mean_arr_gpu)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int f = i / size;
if (f >= n) return;
float warp_mean = warpAllReduceSum(fabs(weights[i]));
if(i % 32 == 0)
atomicAdd(&mean_arr_gpu[f], warp_mean / size);
}
__global__ void binarize_weights_mean_kernel(float *weights, int n, int size, float *binary, float *mean_arr_gpu)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int f = i / size;
if (f >= n) return;
float mean = mean_arr_gpu[f];
binary[i] = (weights[i] > 0) ? mean : -mean;
}
void fast_binarize_weights_gpu(float *weights, int n, int size, float *binary, float *mean_arr_gpu)
{
if (size % 32 == 0) {
size_t gridsize = n * size;
const int num_blocks = get_number_of_blocks(gridsize, BLOCK);// gridsize / BLOCK + 1;
set_zero_kernel << <(n/BLOCK + 1), BLOCK, 0, get_cuda_stream() >> > (mean_arr_gpu, n);
reduce_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (weights, n, size, mean_arr_gpu);
binarize_weights_mean_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (weights, n, size, binary, mean_arr_gpu);
CHECK_CUDA(cudaPeekAtLastError());
}
else {
binarize_weights_gpu(weights, n, size, binary);
}
}
__global__ void cuda_f32_to_f16(float* input_f32, size_t size, half *output_f16)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f16[idx] = __float2half(input_f32[idx]);
//if (idx < size) output_f16[idx] = __float2half_rn(input_f32[idx]); // can't be compiled on Linux without casting
// __float2half_ru, __float2half_rd, __float2half_rz, __float2half_rn
//if (idx < size) *((unsigned short *)output_f16 + idx) = __float2half(input_f32[idx]);
}
void cuda_convert_f32_to_f16(float* input_f32, size_t size, float *output_f16) {
cuda_f32_to_f16 <<< get_number_of_blocks(size, BLOCK), BLOCK, 0, get_cuda_stream() >>> (input_f32, size, (half *)output_f16);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void cuda_f16_to_f32(half* input_f16, size_t size, float *output_f32)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f32[idx] = __half2float(input_f16[idx]);
//if (idx < size) output_f32[idx] = __half2float(*((unsigned short *)input_f16 + idx));
}
void cuda_convert_f16_to_f32(float* input_f16, size_t size, float *output_f32) {
cuda_f16_to_f32 <<< get_number_of_blocks(size, BLOCK), BLOCK, 0, get_cuda_stream() >>> ((half *)input_f16, size, output_f32);
CHECK_CUDA(cudaPeekAtLastError());
}
half *cuda_make_f16_from_f32_array(float *src, size_t n)
{
half *dst16;
size_t size = sizeof(half)*n;
CHECK_CUDA(cudaMalloc((void **)&dst16, size));
if (src) {
assert(n > 0);
cuda_convert_f32_to_f16(src, n, (float *)dst16);
}
if (!dst16) error("Cuda malloc failed", DARKNET_LOC);
return dst16;
}
void forward_convolutional_layer_gpu(convolutional_layer l, network_state state)
{
if (l.train == 0) state.train = 0;
if (l.stream >= 0) {
switch_stream(l.stream);
}
if (l.wait_stream_id >= 0) {
wait_stream(l.wait_stream_id);
}
//fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
if(l.binary){
binarize_weights_gpu(l.weights_gpu, l.n, (l.c / l.groups)*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if(l.xnor){
if (!l.align_bit_weights_gpu || state.train) {
//binarize_weights_gpu(l.weights_gpu, l.n, (l.c / l.groups)*l.size*l.size, l.binary_weights_gpu);
fast_binarize_weights_gpu(l.weights_gpu, l.n, (l.c / l.groups)*l.size*l.size, l.binary_weights_gpu, l.mean_arr_gpu);
}
if (l.align_bit_weights_gpu && !state.train && l.c >= 32 && l.stride_x == l.stride_y)
{
//return;
//cudaError_t status = cudaSuccess;
//int input_size = l.c*l.h*l.w*l.batch;
int m = l.n / l.groups;
int k = l.size*l.size*l.c / l.groups;
int n = l.out_w*l.out_h;
//float * a = l.weights_gpu;
// int i, j;
// for(i = 0; i < l.batch; ++i){
// for (j = 0; j < l.groups; ++j) {
int ldb_align = l.lda_align;
size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8;
//size_t t_intput_size = new_ldb * n;
//size_t t_bit_input_size = t_intput_size / 8;// +1;
if (l.c % 32 == 0)
{
//printf("\n\n l.index = %d, l.w = %d, l.c = %d, l.n = %d, l.stride = %d, l.pad = %d - new XNOR \n", l.index, l.w, l.c, l.n, l.stride, l.pad);
//printf("l.align_workspace_size = %d, (l.c * l.w * l.h) = %d \n", l.align_workspace_size, (l.c * l.w * l.h));
//float *intput_cpu = (float *)calloc(l.inputs, sizeof(float));
// state.input
//cudaMemcpy(intput_cpu, state.input, l.inputs * sizeof(float), cudaMemcpyDefault);
int ldb_align = l.lda_align;
size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8;
//size_t t_intput_size = new_ldb * l.bit_align;// n;
//size_t t_bit_input_size = t_intput_size / 8;// +1;
const int new_c = l.c / 32;
//float *re_packed_input = (float *)calloc(l.c * l.w * l.h, sizeof(float));
//uint32_t *bin_re_packed_input = (uint32_t *)calloc(new_c * l.w * l.h + 1, sizeof(uint32_t));
// float32x4 by channel (as in cuDNN)
//repack_input(intput_cpu, re_packed_input, l.w, l.h, l.c);
// 32 x floats -> 1 x uint32_t
//float_to_bit(re_packed_input, (uint8_t *)bin_re_packed_input, l.c * l.w * l.h);
//cudaDeviceSynchronize();
//start_timer();
repack_input_gpu_bin(state.input, (uint32_t *)l.align_workspace_gpu, l.w, l.h, l.c);
//repack_input_gpu(state.input, state.workspace, l.w, l.h, l.c);
// 32 x floats -> 1 x uint32_t
//float_to_bit_gpu(state.workspace, (unsigned char *)l.align_workspace_gpu, l.c * l.w * l.h);// l.align_workspace_size);
//cudaDeviceSynchronize();
//stop_timer_and_show_name("repack_input_gpu + float_to_bit_gpu");
//free(re_packed_input);
// slow - convolution the packed inputs and weights: float x 32 by channel (as in cuDNN)
//convolution_repacked((uint32_t *)bin_re_packed_input, (uint32_t *)l.align_bit_weights, l.output,
// l.w, l.h, l.c, l.n, l.size, l.pad, l.new_lda, l.mean_arr);
// // then exit from if()
//float *b = state.workspace;
//float *b = (float *)calloc(100 * 1024 * 1024, sizeof(float));
//float *c = l.output;
//memset(c, 0, l.outputs * sizeof(float));
//im2col_cpu_custom((float *)bin_re_packed_input, new_c, l.h, l.w, l.size, l.stride, l.pad, b);
//cudaMemcpy(l.align_workspace_gpu, bin_re_packed_input, (new_c * l.w * l.h + 1) * sizeof(uint32_t), cudaMemcpyDefault);
//start_timer();
im2col_ongpu(l.align_workspace_gpu, new_c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
//cudaDeviceSynchronize();
//stop_timer_and_show_name("im2col_ongpu");
//free(bin_re_packed_input);
int new_k = l.size*l.size*l.c / 32;
// good for (l.c == 64)
//gemm_nn_bin_32bit_packed(m, n, new_k, 1,
// l.align_bit_weights, l.new_lda/32,
// b, n,
// c, n, l.mean_arr);
// // then exit from if()
//size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8;
//size_t t_intput_size = new_ldb * l.bit_align;// n;
//size_t t_bit_input_size = t_intput_size / 8;// +1;
//char *t_bit_input = (char *)calloc(t_bit_input_size, sizeof(char));
//transpose_uint32((uint32_t *)b, (uint32_t *)t_bit_input, new_k, n, n, new_ldb);
//cudaMemcpy(l.transposed_align_workspace_gpu, t_bit_input, t_bit_input_size * sizeof(char), cudaMemcpyDefault);
//cudaMemcpy(state.workspace, b, t_bit_input_size * sizeof(char), cudaMemcpyDefault);
//printf("\n n = %d, n % 32 = %d, new_ldb = %d, new_ldb % 32 = %d \n", n, n % 32, new_ldb, new_ldb % 32);
//start_timer();
transpose_uint32_gpu((uint32_t *)state.workspace, (uint32_t *)l.transposed_align_workspace_gpu, new_k, n, n, new_ldb);
//cudaDeviceSynchronize();
//stop_timer_and_show_name("transpose_uint32_gpu");
//cudaDeviceSynchronize();
//stop_timer_and_show_name("repack_input_gpu_bin + im2col_ongpu + transpose_uint32_gpu_2");
//start_timer();
gemm_nn_custom_bin_mean_transposed_gpu(m, n, k,
(unsigned char *)l.align_bit_weights_gpu, new_ldb, (unsigned char *)l.transposed_align_workspace_gpu,
new_ldb, l.output_gpu, n, l.mean_arr_gpu, l.biases_gpu, l.activation == LEAKY,
l.bin_conv_shortcut_in_gpu, l.bin_conv_shortcut_out_gpu);
//cudaDeviceSynchronize();
//stop_timer_and_show_name("gemm_nn_custom_bin_mean_transposed_gpu");
// the main GEMM function
//gemm_nn_custom_bin_mean_transposed(m, n, k, 1, (uint8_t *)l.align_bit_weights, new_ldb, (uint8_t *)t_bit_input, new_ldb, c, n, l.mean_arr);
//add_bias(l.output, l.biases, l.batch, l.n, l.out_h*l.out_w);
//cudaMemcpy(l.output_gpu, l.output, l.outputs * sizeof(float), cudaMemcpyDefault);
// // alternative GEMM
//gemm_nn_bin_transposed_32bit_packed(m, n, new_k, 1,
// l.align_bit_weights, l.new_lda/32,
// t_bit_input, new_ldb / 32,
// c, n, l.mean_arr);
//free(t_bit_input);
//free(b);
}
else
{
//printf("\n\n l.index = %d, l.w = %d, l.c = %d, l.n = %d, l.stride = %d, l.pad = %d - old XNOR \n", l.index, l.w, l.c, l.n, l.stride, l.pad);
//cudaDeviceSynchronize();
int i = 0;
/*
// if (l.stride == 1 && l.c >= 256 && l.size > 1)
if (l.stride == 1 && l.c >= 1024 && l.size > 1 && 0)// && l.w >= 13) // disabled
{
// stride=1 only
//start_timer();
im2col_align_bin_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace, l.bit_align);
//cudaDeviceSynchronize();
//stop_timer_and_show_name("im2col_align_bin_ongpu");
}
else*/
{
//start_timer();
im2col_align_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, l.align_workspace_gpu, l.bit_align);
//cudaDeviceSynchronize();
//stop_timer_and_show_name("im2col_align_ongpu");
//getchar();
// should be optimized
//start_timer();
float_to_bit_gpu(l.align_workspace_gpu, (unsigned char *)state.workspace, l.align_workspace_size);
//cudaDeviceSynchronize();
//stop_timer_and_show_name("float_to_bit_gpu");
}
//start_timer();
transpose_bin_gpu((unsigned char *)state.workspace, (unsigned char *)l.transposed_align_workspace_gpu, k, n, l.bit_align, new_ldb, 8);
//cudaDeviceSynchronize();
//stop_timer_and_show_name("transpose_bin_gpu");
//cudaDeviceSynchronize();
//stop_timer_and_show_name("im2col_align_ongpu + float_to_bit_gpu + transpose_bin_gpu");
// should be optimized
//if(0) {//if (k > 1000) { // sequentially input-shared - BAD
// gemm_nn_custom_bin_mean_transposed_sequentially_gpu(m, n, k,
// (unsigned char *)l.align_bit_weights_gpu, new_ldb, (unsigned char *)l.transposed_align_workspace_gpu, new_ldb, l.output_gpu, n, l.mean_arr_gpu);
//}
//else { // coalescing & weights-shared-memory - GOOD
//start_timer();
gemm_nn_custom_bin_mean_transposed_gpu(m, n, k,
(unsigned char *)l.align_bit_weights_gpu, new_ldb, (unsigned char *)l.transposed_align_workspace_gpu,
new_ldb, l.output_gpu, n, l.mean_arr_gpu, l.biases_gpu, l.activation == LEAKY,
l.bin_conv_shortcut_in_gpu, l.bin_conv_shortcut_out_gpu);
//cudaDeviceSynchronize();
//stop_timer_and_show_name("gemm_nn_custom_bin_mean_transposed_gpu");
//}
//cudaDeviceSynchronize();
//check_error(status);
//getchar();
}
/*
{
float_to_bit_gpu(state.input, (unsigned char *)l.align_workspace_gpu, input_size);
convolve_bin_gpu(l.align_workspace_gpu, (float *)l.align_bit_weights_gpu, l.output_gpu, l.w, l.h, l.c, l.n, l.size, l.pad, l.new_lda, l.mean_arr_gpu);
//convolve_gpu(state.input, l.weights_gpu, l.output_gpu, l.w, l.h, l.c, l.n, l.size, l.pad);
//cudaDeviceSynchronize();
//check_error(status);
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
}
*/
//add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
if (l.activation == SWISH) activate_array_swish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu);
else if (l.activation == MISH) activate_array_mish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu);
else if (l.activation == HARD_MISH) activate_array_hard_mish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu);
else if (l.activation == NORM_CHAN) activate_array_normalize_channels_ongpu(l.output_gpu, l.outputs*l.batch, l.batch, l.out_c, l.out_w*l.out_h, l.output_gpu);
else if (l.activation == NORM_CHAN_SOFTMAX) activate_array_normalize_channels_softmax_ongpu(l.output_gpu, l.outputs*l.batch, l.batch, l.out_c, l.out_w*l.out_h, l.output_gpu, 0);
else if (l.activation == NORM_CHAN_SOFTMAX_MAXVAL) activate_array_normalize_channels_softmax_ongpu(l.output_gpu, l.outputs*l.batch, l.batch, l.out_c, l.out_w*l.out_h, l.output_gpu, 1);
else if (l.activation != LINEAR && l.activation != LEAKY) activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
//if(l.activation != LINEAR && l.activation != LEAKY) activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
//if (l.binary || l.xnor) swap_binary(&l);
//cudaDeviceSynchronize();
return;
}
}
if (l.xnor) {
swap_binary(&l);
binarize_gpu(state.input, l.c*l.h*l.w*l.batch, l.binary_input_gpu);
state.input = l.binary_input_gpu;
}
//fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
#ifdef CUDNN
//float one = 1; // alpha[0], beta[0] is float for HALF and FLOAT
float alpha = 1, beta = 0;
//#ifdef CUDNN_HALF
//if (state.use_mixed_precision) {
int iteration_num = get_current_iteration(state.net); // (*state.net.seen) / (state.net.batch*state.net.subdivisions);
if (state.index != 0 && state.net.cudnn_half && !l.xnor && (!state.train || (iteration_num > 3 * state.net.burn_in) && state.net.loss_scale != 1) &&
(l.c / l.groups) % 8 == 0 && l.n % 8 == 0 && l.groups <= 1 && l.size > 1)
{
//printf("\n CUDNN_HALF!!! state.index = %d \n", state.index);
// Note: For improved performance it is advised to use beta[0] = 0.0.
// For Tensor Core: cudnnSetConvolutionMathType() where cudnnMathType_t mathType = CUDNN_TENSOR_OP_MATH;
// 1. or CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM and use CUDNN_DATA_HALF
// 2. or CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED
// More: http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#tensor_ops
const size_t input16_size = l.batch*l.c*l.w*l.h;
const size_t output16_size = l.batch*l.out_c*l.out_h*l.out_w;
if (*state.net.max_input16_size < input16_size) {
//printf("\n input16_size: cur = %zu \t max = %zu \n", input16_size, *state.net.max_input16_size);
*state.net.max_input16_size = input16_size;
if (*state.net.input16_gpu) cuda_free(*state.net.input16_gpu);
assert(*state.net.max_input16_size > 0);
*state.net.input16_gpu = (float *)cuda_make_f16_from_f32_array(NULL, *state.net.max_input16_size);
}
float *input16 = *state.net.input16_gpu;
if (*state.net.max_output16_size < output16_size) {
*state.net.max_output16_size = output16_size;
if (*state.net.output16_gpu) cuda_free(*state.net.output16_gpu);
assert(*state.net.max_output16_size > 0);
*state.net.output16_gpu = (float *)cuda_make_f16_from_f32_array(NULL, *state.net.max_output16_size);
}
float *output16 = *state.net.output16_gpu;
assert(input16_size > 0);
cuda_convert_f32_to_f16(state.input, input16_size, input16);
//fill_ongpu(output16_size / 2, 0, (float *)output16, 1);
CHECK_CUDNN(cudnnConvolutionForward(cudnn_handle(),
&alpha,
l.srcTensorDesc16,
input16,
l.weightDesc16,
l.weights_gpu16,
l.convDesc,
l.fw_algo16,
state.workspace,
l.workspace_size,
&beta,
l.dstTensorDesc16,
output16));
if (l.batch_normalize)
{
if (state.train && !state.net.adversarial) // Training
{
simple_copy_ongpu(l.outputs*l.batch / 2, output16, l.x_gpu);
//copy_ongpu(l.outputs*l.batch / 2, output16, 1, l.x_gpu, 1);
//cudaMemcpyAsync(l.x_gpu, output16, l.outputs*l.batch*sizeof(half), cudaMemcpyDefault, get_cuda_stream());
float one = 1.0f;
float zero = 0.0f;
// Batch-normalization can still take FP16 inputs and outputs, saving half the bandwidth
// compared to FP32, it's just that the statistics and value adjustment should be done in FP32.
CHECK_CUDNN(cudnnBatchNormalizationForwardTraining(cudnn_handle(),
CUDNN_BATCHNORM_SPATIAL,
&one,
&zero,
l.normDstTensorDescF16,
l.x_gpu, // input
l.normDstTensorDescF16,
output16, // output
l.normTensorDesc,
l.scales_gpu, // input
l.biases_gpu, // input
.01,
l.rolling_mean_gpu, // input/output (should be FP32)
l.rolling_variance_gpu, // input/output (should be FP32)
.00001,
l.mean_gpu, // output (should be FP32) - optional cache to speedup cudnnBatchNormalizationBackward()
l.variance_gpu)); // output (should be FP32) - optional cache to speedup cudnnBatchNormalizationBackward()
cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu);
//forward_batchnorm_layer_gpu(l, state);
}
else // Detection
{
cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu);
normalize_gpu(l.output_gpu, l.rolling_mean_gpu, l.rolling_variance_gpu, l.batch, l.out_c, l.out_h*l.out_w);
scale_bias_gpu(l.output_gpu, l.scales_gpu, l.batch, l.out_c, l.out_h*l.out_w);
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.out_c, l.out_w*l.out_h);
}
}
else // BIAS only
{
cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu);
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
}
}
else {
//#else
/*
int input_nan_inf = is_nan_or_inf(state.input, l.inputs * l.batch);
printf("\n is_nan_or_inf(state.input) = %d \n", input_nan_inf);
if (input_nan_inf) getchar();
int weights_nan_inf = is_nan_or_inf(l.weights_gpu, l.nweights);
printf("\n is_nan_or_inf(l.weights_gpu) = %d \n", weights_nan_inf);
if (weights_nan_inf) getchar();
*/
CHECK_CUDNN(cudnnConvolutionForward(cudnn_handle(),
&alpha, //&one,
l.srcTensorDesc,
state.input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&beta, //&one,
l.dstTensorDesc,
l.output_gpu));
//cudaDeviceSynchronize();
if (l.batch_normalize) {
forward_batchnorm_layer_gpu(l, state);
}
else {
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
}
//#endif // CUDNN_HALF
}
#else
fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
int i, j;
int m = l.n / l.groups;
int k = l.size*l.size*l.c / l.groups;
int n = l.out_w*l.out_h;
for(i = 0; i < l.batch; ++i){
for (j = 0; j < l.groups; ++j) {
//float *im = state.input + i*l.c*l.h*l.w;
float *im = state.input + (i*l.groups + j)*l.c / l.groups*l.h*l.w;
float *a = l.weights_gpu + j*l.nweights / l.groups;
float *b = state.workspace;
float *c = l.output_gpu + (i*l.groups + j)*n*m;
if (l.size == 1 && l.stride == 1 && l.dilation == 1) {
b = im;
}
else {
//im2col_ongpu(im, l.c / l.groups, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
im2col_gpu_ext(im, // input
l.c / l.groups, // input channels
l.h, l.w, // input size (h, w)
l.size, l.size, // kernel size (h, w)
l.pad * l.dilation, l.pad * l.dilation, // padding (h, w)
l.stride_y, l.stride_x, // stride (h, w)
l.dilation, l.dilation, // dilation (h, w)
state.workspace); // output
}
//gemm_ongpu(0, 0, m, n, k, 1., a, k, b, n, 1., c + i*m*n, n);
gemm_ongpu(0, 0, m, n, k, 1, a, k, b, n, 1, c, n);
}
}
if (l.batch_normalize) {
forward_batchnorm_layer_gpu(l, state);
}
else {
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
}
#endif
//#ifndef CUDNN_HALF
//#endif // no CUDNN_HALF
if (l.activation == SWISH) activate_array_swish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu);
else if (l.activation == MISH) activate_array_mish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu);
else if (l.activation == HARD_MISH) activate_array_hard_mish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu);
else if (l.activation == NORM_CHAN) activate_array_normalize_channels_ongpu(l.output_gpu, l.outputs*l.batch, l.batch, l.out_c, l.out_w*l.out_h, l.output_gpu);
else if (l.activation == NORM_CHAN_SOFTMAX) activate_array_normalize_channels_softmax_ongpu(l.output_gpu, l.outputs*l.batch, l.batch, l.out_c, l.out_w*l.out_h, l.output_gpu, 0);
else if (l.activation == NORM_CHAN_SOFTMAX_MAXVAL) activate_array_normalize_channels_softmax_ongpu(l.output_gpu, l.outputs*l.batch, l.batch, l.out_c, l.out_w*l.out_h, l.output_gpu, 1);
else if (l.activation != LINEAR) activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
//if(l.dot > 0) dot_error_gpu(l);
if(l.binary || l.xnor) swap_binary(&l);
//cudaDeviceSynchronize(); // for correct profiling of performance
if (state.net.try_fix_nan) {
fix_nan_and_inf(l.output_gpu, l.outputs*l.batch);
}
if(l.assisted_excitation && state.train) assisted_excitation_forward_gpu(l, state);
if (l.antialiasing) {
network_state s = { 0 };
s.train = state.train;
s.workspace = state.workspace;
s.net = state.net;
if (!state.train) s.index = state.index; // don't use TC for training (especially without cuda_convert_f32_to_f16() )
s.input = l.output_gpu;
forward_convolutional_layer_gpu(*(l.input_layer), s);
simple_copy_ongpu(l.outputs*l.batch, l.output_gpu, l.input_antialiasing_gpu);
simple_copy_ongpu(l.input_layer->outputs*l.input_layer->batch, l.input_layer->output_gpu, l.output_gpu);
}
if (l.coordconv) {
coord_conv_gpu(l.output_gpu, l.outputs*l.batch, l.out_w, l.out_h, l.out_c, l.batch, 0);
}
}
void backward_convolutional_layer_gpu(convolutional_layer l, network_state state)
{
if (l.coordconv) {
coord_conv_gpu(l.delta_gpu, l.outputs*l.batch, l.out_w, l.out_h, l.out_c, l.batch, 1);
}
if (l.antialiasing) {
network_state s = { 0 };
s.train = state.train;
s.workspace = state.workspace;
s.net = state.net;
s.delta = l.delta_gpu; // s.delta will be returned to l.delta_gpu
s.input = l.input_antialiasing_gpu;
//if (!state.train) s.index = state.index; // don't use TC for training (especially without cuda_convert_f32_to_f16() )
simple_copy_ongpu(l.input_layer->outputs*l.input_layer->batch, l.delta_gpu, l.input_layer->delta_gpu);
backward_convolutional_layer_gpu(*(l.input_layer), s);
simple_copy_ongpu(l.outputs*l.batch, l.input_antialiasing_gpu, l.output_gpu);
}
if(state.net.try_fix_nan) constrain_ongpu(l.outputs*l.batch, 1, l.delta_gpu, 1);
if (l.activation == SWISH) gradient_array_swish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.delta_gpu);
else if (l.activation == MISH) gradient_array_mish_ongpu(l.outputs*l.batch, l.activation_input_gpu, l.delta_gpu);
else if (l.activation == HARD_MISH) gradient_array_hard_mish_ongpu(l.outputs*l.batch, l.activation_input_gpu, l.delta_gpu);
else if (l.activation == NORM_CHAN_SOFTMAX || l.activation == NORM_CHAN_SOFTMAX_MAXVAL) gradient_array_normalize_channels_softmax_ongpu(l.output_gpu, l.outputs*l.batch, l.batch, l.out_c, l.out_w*l.out_h, l.delta_gpu);
else if (l.activation == NORM_CHAN) gradient_array_normalize_channels_ongpu(l.output_gpu, l.outputs*l.batch, l.batch, l.out_c, l.out_w*l.out_h, l.delta_gpu);
else gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
if (!l.batch_normalize)
backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
//#ifndef CUDNN_HALF
//if(l.batch_normalize){
// backward_batchnorm_layer_gpu(l, state);
//} else {
// //backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
//}
//#endif // no CUDNN_HALF
float *original_input = state.input;
if(l.xnor) state.input = l.binary_input_gpu;
#ifdef CUDNN
float one = 1.f;
float alpha = 1, beta = 0;
//#ifdef CUDNN_HALF
int iteration_num = get_current_iteration(state.net); //(*state.net.seen) / (state.net.batch*state.net.subdivisions);
if (state.index != 0 && state.net.cudnn_half && !l.xnor && (!state.train || (iteration_num > 3 * state.net.burn_in) && state.net.loss_scale != 1) &&
(l.c / l.groups) % 8 == 0 && l.n % 8 == 0 && l.groups <= 1 && l.size > 1)
{
const size_t input16_size = l.batch*l.c*l.w*l.h;
const size_t delta16_size = l.batch*l.n*l.out_w*l.out_h;
if (*state.net.max_input16_size < input16_size) {
*state.net.max_input16_size = input16_size;
if (*state.net.input16_gpu) cuda_free(*state.net.input16_gpu);
assert(*state.net.max_input16_size > 0);
*state.net.input16_gpu = (float *)cuda_make_f16_from_f32_array(NULL, *state.net.max_input16_size);
}
float *input16 = *state.net.input16_gpu;
if (*state.net.max_output16_size < delta16_size) {
*state.net.max_output16_size = delta16_size;
if (*state.net.output16_gpu) cuda_free(*state.net.output16_gpu);
assert(*state.net.max_output16_size > 0);
*state.net.output16_gpu = (float *)cuda_make_f16_from_f32_array(NULL, *state.net.max_output16_size);
}
float *delta16 = *state.net.output16_gpu;
assert(input16_size > 0);
assert(delta16_size > 0);
cuda_convert_f32_to_f16(state.input, input16_size, input16);
cuda_convert_f32_to_f16(l.delta_gpu, delta16_size, delta16);
if (l.batch_normalize) {
//if (!state.train) {
// l.mean_gpu = l.rolling_mean_gpu;
// l.variance_gpu = l.rolling_variance_gpu;
//}
float one = 1.0f;
float zero = 0.0f;
CHECK_CUDNN(cudnnBatchNormalizationBackward(cudnn_handle(),
CUDNN_BATCHNORM_SPATIAL,
&one,
&zero,
&one,
&one,
l.normDstTensorDescF16,
l.x_gpu, // input (input in BN-forward-inference)
l.normDstTensorDescF16,
delta16, // input
l.normDstTensorDescF16,
l.output_gpu, //l.x_norm_gpu, // output (new delta)
l.normTensorDesc,
l.scales_gpu, // input (should be FP32)
l.scale_updates_gpu, // output (should be FP32)
l.bias_updates_gpu, // output (should be FP32)
.00001,
l.mean_gpu, // input (should be FP32)
l.variance_gpu)); // input (should be FP32)
simple_copy_ongpu(l.outputs*l.batch / 2, l.output_gpu, delta16);
//copy_ongpu(l.outputs*l.batch / 2, l.x_norm_gpu, 1, delta16, 1);
//cudaMemcpyAsync(delta16, l.x_norm_gpu, l.outputs*l.batch * sizeof(half), cudaMemcpyDefault, get_cuda_stream());
}
else
{
//backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
}
// convert input: state.input (x), l.delta_gpu (y) from fp32 to fp16
// get output: l.weight_updates_gpu (dw) and convert it to fp32 (ONLY if it is fp16)
// calculate conv weight updates
// Already: l.weight_updates_gpu = (l.weight_updates_gpu - l.weight*decay*batch*subdivision)*momentum
// so we should copy f32 to f16, or compute: f16=(w_up - w*d*b*s)*m
assert((l.nweights) > 0);
cuda_convert_f32_to_f16(l.weight_updates_gpu, l.nweights, l.weight_updates_gpu16);
if (!state.net.adversarial && !l.train_only_bn) {
CHECK_CUDNN(cudnnConvolutionBackwardFilter(cudnn_handle(),
&one,
l.srcTensorDesc16,
input16, //state.input,
l.ddstTensorDesc16,
delta16, //l.delta_gpu,
l.convDesc,
l.bf_algo16,
state.workspace,
l.workspace_size,
&one,
l.dweightDesc16,
l.weight_updates_gpu16)); // l.weight_updates_gpu);
cuda_convert_f16_to_f32(l.weight_updates_gpu16, l.nweights, l.weight_updates_gpu);
}
if (state.delta) {
if (l.binary || l.xnor) swap_binary(&l);
// http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnConvolutionBackwardData
// calculate delta for the next layer
// convert input: l.weights_gpu (w), l.delta_gpu (dy) from fp32 to fp16
// get output: state.delta (dx) and convert it to fp32 (ONLY if it is fp16)
CHECK_CUDNN(cudnnConvolutionBackwardData(cudnn_handle(),
&alpha,
l.weightDesc16,
l.weights_gpu16, //l.weights_gpu,
l.ddstTensorDesc16,
delta16, //l.delta_gpu,
l.convDesc,
l.bd_algo16,
state.workspace,
l.workspace_size,
&beta,
l.dsrcTensorDesc16,
input16)); // state.delta);
cuda_convert_f16_to_f32(input16, input16_size, state.delta);
if (l.binary || l.xnor) swap_binary(&l);
if (l.xnor) gradient_array_ongpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, state.delta);
}
}
else {
//#else // CUDNN_HALF
if(l.batch_normalize){
backward_batchnorm_layer_gpu(l, state);
}
if (!state.net.adversarial && !l.train_only_bn) {
float *old_input = state.input;
/*
if (l.reverse) {
if (*state.net.max_output16_size < l.inputs*l.batch) {
*state.net.max_output16_size = l.inputs*l.batch;
if (*state.net.output16_gpu) cuda_free(*state.net.output16_gpu);
assert(*state.net.max_output16_size > 0);
*state.net.output16_gpu = cuda_make_array(NULL, *state.net.max_output16_size);
}
float clip = 0.0;
float divider = 1.0;
float abs_add = 1.0;
mult_inverse_array_gpu(state.input, *state.net.output16_gpu, l.inputs*l.batch, l.reverse, divider, clip, abs_add);
state.input = *state.net.output16_gpu;
}
*/
// calculate conv weight updates
// if used: beta=1 then loss decreases faster
CHECK_CUDNN(cudnnConvolutionBackwardFilter(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bf_algo,
state.workspace,
l.workspace_size,
&one,
l.dweightDesc,
l.weight_updates_gpu));
state.input = old_input;
}
if (state.delta) {
if (l.binary || l.xnor) swap_binary(&l);
float *old_weights = l.weights_gpu;
/*
if (l.reverse) {
if (*state.net.max_output16_size < l.nweights) {
*state.net.max_output16_size = l.nweights;
if (*state.net.output16_gpu && *state.net.max_output16_size > 0) cuda_free(*state.net.output16_gpu);
assert(*state.net.max_output16_size > 0);
*state.net.output16_gpu = cuda_make_array(NULL, l.nweights);
}
float clip = 0.0;
float divider = 1.0;
float abs_add = 1.0;
mult_inverse_array_gpu(l.weights_gpu, *state.net.output16_gpu, l.nweights, l.reverse, divider, clip, abs_add);
l.weights_gpu = *state.net.output16_gpu;
}
*/
// http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnConvolutionBackwardData
// calculate delta for the next layer
CHECK_CUDNN(cudnnConvolutionBackwardData(cudnn_handle(),
&one,
l.weightDesc,
l.weights_gpu,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bd_algo,
state.workspace,
l.workspace_size,
&one,
l.dsrcTensorDesc,
state.delta));
l.weights_gpu = old_weights;
if (l.binary || l.xnor) swap_binary(&l);
if (l.xnor) gradient_array_ongpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, state.delta);
}
}
//#endif // CUDNN_HALF
#else // CUDNN
if (l.batch_normalize) {
backward_batchnorm_layer_gpu(l, state);
}
int m = l.n / l.groups;
int n = l.size*l.size*l.c / l.groups;
int k = l.out_w*l.out_h;
int i, j;
for(i = 0; i < l.batch; ++i){
for (j = 0; j < l.groups; ++j) {
float * a = l.delta_gpu + (i*l.groups + j)*m*k;
float * b = state.workspace;
float * c = l.weight_updates_gpu + j*l.nweights / l.groups;
float *im = state.input + (i*l.groups + j)*l.c / l.groups*l.h*l.w;
if (!state.net.adversarial && !l.train_only_bn) {
//im2col_ongpu(im, l.c / l.groups, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
im2col_gpu_ext(im, // input
l.c / l.groups, // input channels
l.h, l.w, // input size (h, w)
l.size, l.size, // kernel size (h, w)
l.pad * l.dilation, l.pad * l.dilation, // padding (h, w)
l.stride_y, l.stride_x, // stride (h, w)
l.dilation, l.dilation, // dilation (h, w)
state.workspace); // output
//gemm_ongpu(0, 1, m, n, k, 1, a + i*m*k, k, b, k, 1, c, n);
gemm_ongpu(0, 1, m, n, k, 1, a, k, b, k, 1, c, n);
}
if (state.delta) {
if (l.binary || l.xnor) swap_binary(&l);
float * a = l.weights_gpu + j*l.nweights / l.groups;
float * b = l.delta_gpu + (i*l.groups + j)*m*k;
float * c = state.workspace;
//gemm_ongpu(1, 0, n, k, m, 1, a, n, b + i*k*m, k, 0, c, k);
gemm_ongpu(1, 0, n, k, m, 1, a, n, b, k, 0, c, k);
float *delta = state.delta + (i*l.groups + j)*l.c / l.groups*l.h*l.w;
//col2im_ongpu(state.workspace, l.c / l.groups, l.h, l.w, l.size, l.stride, l.pad, delta);
col2im_gpu_ext(
state.workspace, // input
l.c / l.groups, // input channels
l.h, l.w, // input size (h, w)
l.size, l.size, // kernel size (h, w)
l.pad * l.dilation, l.pad * l.dilation, // padding size (h, w)
l.stride_y, l.stride_x, // stride size (h, w)
l.dilation, l.dilation, // dilation size (h, w)
delta); // output (delta)
if (l.binary || l.xnor) {
swap_binary(&l);
}
if (l.xnor) gradient_array_ongpu(original_input + i*l.c*l.h*l.w, l.c*l.h*l.w, HARDTAN, state.delta + i*l.c*l.h*l.w);
}
}
}
#endif
if (state.net.try_fix_nan) {
if (state.delta) {
reset_nan_and_inf(state.delta, l.inputs * l.batch);
}
int size = l.nweights;
reset_nan_and_inf(l.weight_updates_gpu, size);
fix_nan_and_inf(l.weights_gpu, size);
}
}
__global__ void calc_avg_activation_kernel(float *src, float *dst, int size, int channels, int batches)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int xy = i % size;
int b = i / size;
if (i < size*batches) {
dst[i] = 0;
for (int c = 0; c < channels; ++c) {
dst[i] += src[xy + size*(c + channels*b)];
}
dst[i] = dst[i] / channels;
}
}
void calc_avg_activation_gpu(float *src, float *dst, int size, int channels, int batches)
{
const int num_blocks = get_number_of_blocks(size*batches, BLOCK);
calc_avg_activation_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (src, dst, size, channels, batches);
}
__global__ void assisted_activation_kernel(float alpha, float *output, float *gt_gpu, float *a_avg_gpu, int size, int channels, int batches)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int xy = i % size;
int b = i / size;
if (b < batches) {
for (int c = 0; c < channels; ++c) {
output[xy + size*(c + channels*b)] += alpha * gt_gpu[i] * a_avg_gpu[i];
//output[xy + size*(c + channels*b)] += gt_gpu[i] * a_avg_gpu[i];
//output[xy + size*(c + channels*b)] += gt_gpu[i] * output[xy + size*(c + channels*b)];
//output[xy + size*(c + channels*b)] = a_avg_gpu[i];
}
}
}
void assisted_activation_gpu(float alpha, float *output, float *gt_gpu, float *a_avg_gpu, int size, int channels, int batches)
{
const int num_blocks = get_number_of_blocks(size*batches, BLOCK);
assisted_activation_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (alpha, output, gt_gpu, a_avg_gpu, size, channels, batches);
}
__global__ void assisted_activation2_kernel(float alpha, float *output, float *gt_gpu, float *a_avg_gpu, int size, int channels, int batches)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int xy = i % size;
int b = i / size;
float beta = 1 - alpha;
if (b < batches) {
for (int c = 0; c < channels; ++c) {
if(gt_gpu[i] == 0)
output[xy + size*(c + channels*b)] *= beta;
}
}
}
void assisted_activation2_gpu(float alpha, float *output, float *gt_gpu, float *a_avg_gpu, int size, int channels, int batches)
{
const int num_blocks = get_number_of_blocks(size*batches, BLOCK);
assisted_activation2_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (alpha, output, gt_gpu, a_avg_gpu, size, channels, batches);
}
void assisted_excitation_forward_gpu(convolutional_layer l, network_state state)
{
const int iteration_num = get_current_iteration(state.net); //(*state.net.seen) / (state.net.batch*state.net.subdivisions);
// epoch
//const float epoch = (float)(*state.net.seen) / state.net.train_images_num;
// calculate alpha
//const float alpha = (1 + cos(3.141592 * iteration_num)) / (2 * state.net.max_batches);
//const float alpha = (1 + cos(3.141592 * epoch)) / (2 * state.net.max_batches);
float alpha = (1 + cos(3.141592 * iteration_num / state.net.max_batches)) / 2;
//float alpha = (1 + cos(3.141592 * iteration_num / state.net.max_batches));
if (l.assisted_excitation == 1) {
if (iteration_num > state.net.max_batches / 2) return;
}
else {
if (iteration_num < state.net.burn_in) return;
else
if (iteration_num > l.assisted_excitation) return;
else
alpha = (1 + cos(3.141592 * iteration_num / (state.net.burn_in + l.assisted_excitation))) / 2; // from 1 to 0
}
//printf("\n epoch = %f, alpha = %f, seen = %d, max_batches = %d, train_images_num = %d \n",
// epoch, alpha, (*state.net.seen), state.net.max_batches, state.net.train_images_num);
//const int size = l.outputs * l.batch;
float *a_avg = (float *)calloc(l.out_w * l.out_h * l.batch, sizeof(float));
float *gt = (float *)calloc(l.out_w * l.out_h * l.batch, sizeof(float));
int b;
int w, h;
l.max_boxes = state.net.num_boxes;
l.truths = l.max_boxes*(4 + 1);
int num_truth = l.batch*l.truths;
float *truth_cpu = (float *)calloc(num_truth, sizeof(float));
cuda_pull_array(state.truth, truth_cpu, num_truth);
//cudaStreamSynchronize(get_cuda_stream());
//CHECK_CUDA(cudaPeekAtLastError());
for (b = 0; b < l.batch; ++b)
{
// calculate G
int t;
for (t = 0; t < state.net.num_boxes; ++t) {
box truth = float_to_box_stride(truth_cpu + t*(4 + 1) + b*l.truths, 1);
if (!truth.x) break; // continue;
float beta = 0;
//float beta = 1 - alpha; // from 0 to 1
float dw = (1 - truth.w) * beta;
float dh = (1 - truth.h) * beta;
//printf(" alpha = %f, beta = %f, truth.w = %f, dw = %f, tw+dw = %f, l.out_w = %d \n", alpha, beta, truth.w, dw, truth.w+dw, l.out_w);
int left = floorf((truth.x - (dw + truth.w) / 2) * l.out_w);
int right = ceilf((truth.x + (dw + truth.w) / 2) * l.out_w);
int top = floorf((truth.y - (dh + truth.h) / 2) * l.out_h);
int bottom = ceilf((truth.y + (dh + truth.h) / 2) * l.out_h);
if (left < 0) left = 0;
if (top < 0) top = 0;
if (right > l.out_w) right = l.out_w;
if (bottom > l.out_h) bottom = l.out_h;
for (w = left; w <= right; w++) {
for (h = top; h < bottom; h++) {
gt[w + l.out_w * h + l.out_w*l.out_h*b] = 1;
}
}
}
}
cuda_push_array(l.gt_gpu, gt, l.out_w * l.out_h * l.batch);
//cudaStreamSynchronize(get_cuda_stream());
//CHECK_CUDA(cudaPeekAtLastError());
// calc avg_output on GPU - for whole batch
calc_avg_activation_gpu(l.output_gpu, l.a_avg_gpu, l.out_w * l.out_h, l.out_c, l.batch);
//cudaStreamSynchronize(get_cuda_stream());
//CHECK_CUDA(cudaPeekAtLastError());
// calc new output
//assisted_activation2_gpu(1, l.output_gpu, l.gt_gpu, l.a_avg_gpu, l.out_w * l.out_h, l.out_c, l.batch); // AE3: gt increases (beta = 1 - alpha = 0)
//assisted_activation2_gpu(alpha, l.output_gpu, l.gt_gpu, l.a_avg_gpu, l.out_w * l.out_h, l.out_c, l.batch);
assisted_activation_gpu(alpha, l.output_gpu, l.gt_gpu, l.a_avg_gpu, l.out_w * l.out_h, l.out_c, l.batch);
//cudaStreamSynchronize(get_cuda_stream());
//CHECK_CUDA(cudaPeekAtLastError());
/*
for (b = 0; b < l.batch; ++b)
{
// calculate average A
for (w = 0; w < l.out_w; w++) {
for (h = 0; h < l.out_h; h++) {
for (c = 0; c < l.out_c; c++) {
a_avg[w + l.out_w*(h + l.out_h*b)] += l.output[w + l.out_w*(h + l.out_h*(c + l.out_c*b))];
}
a_avg[w + l.out_w*(h + l.out_h*b)] /= l.out_c; // a_avg / d
}
}
}
// change activation
for (b = 0; b < l.batch; ++b)
{
for (w = 0; w < l.out_w; w++) {
for (h = 0; h < l.out_h; h++) {
for (c = 0; c < l.out_c; c++)
{
// a = a + alpha(t) + e(c,i,j) = a + alpha(t) + g(i,j) * avg_a(i,j) / channels
l.output[w + l.out_w*(h + l.out_h*(c + l.out_c*b))] +=
alpha *
g[w + l.out_w*(h + l.out_h*b)] *
a_avg[w + l.out_w*(h + l.out_h*b)];
//l.output[w + l.out_w*(h + l.out_h*(c + l.out_c*b))] =
// alpha * g[w + l.out_w*(h + l.out_h*b)] * a_avg[w + l.out_w*(h + l.out_h*b)];
}
}
}
}
*/
if (0) // visualize ground truth
{
#ifdef OPENCV
cuda_pull_array(l.output_gpu, l.output, l.outputs * l.batch);
cudaStreamSynchronize(get_cuda_stream());
CHECK_CUDA(cudaPeekAtLastError());
for (b = 0; b < l.batch; ++b)
{
printf(" Assisted Excitation alpha = %f \n", alpha);
image img = float_to_image(l.out_w, l.out_h, 1, >[l.out_w*l.out_h*b]);
char buff[100];
sprintf(buff, "a_excitation_gt_%d", b);
show_image_cv(img, buff);
//image img2 = float_to_image(l.out_w, l.out_h, 1, &l.output[l.out_w*l.out_h*l.out_c*b]);
image img2 = float_to_image_scaled(l.out_w, l.out_h, 1, &l.output[l.out_w*l.out_h*l.out_c*b]);
char buff2[100];
sprintf(buff2, "a_excitation_output_%d", b);
show_image_cv(img2, buff2);
/*
int c = l.out_c;
if (c > 4) c = 4;
image img3 = float_to_image(l.out_w, l.out_h, c, &l.output[l.out_w*l.out_h*l.out_c*b]);
image dc = collapse_image_layers(img3, 1);
char buff3[100];
sprintf(buff3, "a_excitation_act_collapsed_%d", b);
show_image_cv(dc, buff3);
*/
wait_key_cv(5);
}
wait_until_press_key_cv();
#endif // OPENCV
}
free(truth_cpu);
free(gt);
free(a_avg);
}
void pull_convolutional_layer(convolutional_layer l)
{
cuda_pull_array_async(l.weights_gpu, l.weights, l.nweights);
cuda_pull_array_async(l.biases_gpu, l.biases, l.n);
if (l.weight_updates_gpu) cuda_pull_array_async(l.weight_updates_gpu, l.weight_updates, l.nweights);
if (l.bias_updates_gpu) cuda_pull_array_async(l.bias_updates_gpu, l.bias_updates, l.n);
if (l.batch_normalize){
cuda_pull_array_async(l.scales_gpu, l.scales, l.n);
cuda_pull_array_async(l.rolling_mean_gpu, l.rolling_mean, l.n);
cuda_pull_array_async(l.rolling_variance_gpu, l.rolling_variance, l.n);
}
if (l.adam){
cuda_pull_array_async(l.m_gpu, l.m, l.nweights);
cuda_pull_array_async(l.v_gpu, l.v, l.nweights);
}
CHECK_CUDA(cudaPeekAtLastError());
cudaStreamSynchronize(get_cuda_stream());
}
void push_convolutional_layer(convolutional_layer l)
{
cuda_push_array(l.weights_gpu, l.weights, l.nweights);
#ifdef CUDNN_HALF
assert(l.nweights > 0);
cuda_convert_f32_to_f16(l.weights_gpu, l.nweights, l.weights_gpu16);
#endif
cuda_push_array(l.biases_gpu, l.biases, l.n);
if (l.train) {
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.nweights);
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n);
}
if (l.batch_normalize){
cuda_push_array(l.scales_gpu, l.scales, l.n);
cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.n);
cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.n);
}
if (l.adam){
cuda_push_array(l.m_gpu, l.m, l.nweights);
cuda_push_array(l.v_gpu, l.v, l.nweights);
}
CHECK_CUDA(cudaPeekAtLastError());
}
void update_convolutional_layer_gpu(layer l, int batch, float learning_rate_init, float momentum, float decay, float loss_scale)
{
/*
for (int angle = 0; angle < 360; angle++) {
printf(" angle = %d \n", angle);
smooth_rotate_weights_kernel(l.weights_gpu, l.weight_deform_gpu, l.nweights, l.n, l.size, angle, 0);
cuda_pull_array(l.weight_deform_gpu, l.weights, l.nweights);
visualize_convolutional_layer(l, "weights", NULL);
wait_key_cv(10);
}
*/
if (l.deform) {
//for (l.angle = 0; l.angle < 360; l.angle += 1)
//{
//stretch_weights_gpu(l.weight_updates_gpu, l.weight_deform_gpu, l.nweights, l.n, l.size, l.angle/180, 1);
//else simple_copy_ongpu(l.nweights, l.weight_updates_gpu, l.weight_deform_gpu);
if (l.rotate) rotate_weights_gpu(l.weight_updates_gpu, l.weight_deform_gpu, l.nweights, l.n, l.size, 1);
else if (l.sway) sway_and_flip_weights_gpu(l.weight_updates_gpu, l.weight_deform_gpu, l.nweights, l.n, l.size, l.angle, 1);
else if (l.stretch) stretch_weights_gpu(l.weight_updates_gpu, l.weight_deform_gpu, l.nweights, l.n, l.size, 0, 1);
else if (l.stretch_sway) stretch_sway_flip_weights_gpu(l.weight_updates_gpu, l.weight_deform_gpu, l.nweights, l.n, l.size, l.angle, 1);
//simple_copy_ongpu(l.nweights, l.weight_updates_gpu, l.weight_deform_gpu);
reduce_and_expand_array_gpu(l.weight_deform_gpu, l.weight_updates_gpu, l.nweights, 4);
//printf(" angle = %f \n", l.angle);
//cuda_pull_array(l.weight_deform_gpu, l.weights, l.nweights);
//visualize_convolutional_layer(l, "weights", NULL);
//wait_key_cv(10);
//}
}
// Loss scale for Mixed-Precision on Tensor-Cores
float learning_rate = learning_rate_init*l.learning_rate_scale / loss_scale;
//float momentum = a.momentum;
//float decay = a.decay;
//int batch = a.batch;
reset_nan_and_inf(l.weight_updates_gpu, l.nweights);
fix_nan_and_inf(l.weights_gpu, l.nweights);
// Gradient Centralization
if (l.grad_centr && l.batch_normalize) {
// weights[filters][channels][height][width]
// for(filters) w[f] = w[f] - mean(w[c][h][w])
gradient_centralization_gpu(l.size, l.size, l.c / l.groups, l.n, l.weight_updates_gpu);
}
if (l.adam) {
//adam_update_gpu(l.weights_gpu, l.weight_updates_gpu, l.m_gpu, l.v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.nweights, batch, a.t);
adam_update_gpu(l.weights_gpu, l.weight_updates_gpu, l.m_gpu, l.v_gpu, l.B1, l.B2, l.eps, decay, learning_rate, l.nweights, batch, l.t);
adam_update_gpu(l.biases_gpu, l.bias_updates_gpu, l.bias_m_gpu, l.bias_v_gpu, l.B1, l.B2, l.eps, decay, learning_rate, l.n, batch, l.t);
if (l.scales_gpu) {
adam_update_gpu(l.scales_gpu, l.scale_updates_gpu, l.scale_m_gpu, l.scale_v_gpu, l.B1, l.B2, l.eps, decay, learning_rate, l.n, batch, l.t);
}
}
else {
//axpy_ongpu(l.nweights, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1);
//axpy_ongpu(l.nweights, learning_rate / batch, l.weight_updates_gpu, 1, l.weights_gpu, 1);
//scal_ongpu(l.nweights, momentum, l.weight_updates_gpu, 1);
float *old_weight_updates_gpu = l.weight_updates_gpu;
if (l.reverse) {
float clip = 0.0;
float divider = 1.0;
float abs_add = 1.0;
mult_inverse_array_gpu(l.weight_updates_gpu, l.output_gpu, l.inputs*l.batch, l.reverse, divider, clip, abs_add);
l.weight_updates_gpu = l.output_gpu;
}
axpy_ongpu(l.nweights, -decay*batch*loss_scale, l.weights_gpu, 1, l.weight_updates_gpu, 1);
axpy_ongpu(l.nweights, learning_rate / batch, l.weight_updates_gpu, 1, l.weights_gpu, 1);
l.weight_updates_gpu = old_weight_updates_gpu;
scal_ongpu(l.nweights, momentum, l.weight_updates_gpu, 1);
axpy_ongpu(l.n, learning_rate / batch, l.bias_updates_gpu, 1, l.biases_gpu, 1);
scal_ongpu(l.n, momentum, l.bias_updates_gpu, 1);
if (l.scales_gpu) {
axpy_ongpu(l.n, learning_rate / batch, l.scale_updates_gpu, 1, l.scales_gpu, 1);
scal_ongpu(l.n, momentum, l.scale_updates_gpu, 1);
}
}
if (l.deform) {
//for (l.angle = 0; l.angle < 360; l.angle += 4)
//{
expand_array_gpu(l.weights_gpu, l.weight_deform_gpu, l.nweights, 4);
//simple_copy_ongpu(l.nweights, l.weight_deform_gpu, l.weights_gpu);
if (l.rotate) rotate_weights_gpu(l.weight_deform_gpu, l.weights_gpu, l.nweights, l.n, l.size, 0);
else if (l.sway) sway_and_flip_weights_gpu(l.weight_deform_gpu, l.weights_gpu, l.nweights, l.n, l.size, l.angle, 0);
else if (l.stretch) stretch_weights_gpu(l.weight_deform_gpu, l.weights_gpu, l.nweights, l.n, l.size, 0, 0);
else if (l.stretch_sway) stretch_sway_flip_weights_gpu(l.weight_deform_gpu, l.weights_gpu, l.nweights, l.n, l.size, l.angle, 0);
//printf(" angle = %f, reverse = %d \n", l.angle, 0);
//cuda_pull_array(l.weights_gpu, l.weights, l.nweights);
//visualize_convolutional_layer(l, "weights", NULL);
//wait_key_cv(10);
//}
}
if (l.clip) {
constrain_ongpu(l.nweights, l.clip, l.weights_gpu, 1);
}
}
/*
void update_convolutional_layer_gpu(convolutional_layer layer, int batch, float learning_rate, float momentum, float decay)
{
int size = layer.size*layer.size*layer.c*layer.n;
axpy_ongpu(layer.n, learning_rate/batch, layer.bias_updates_gpu, 1, layer.biases_gpu, 1);
scal_ongpu(layer.n, momentum, layer.bias_updates_gpu, 1);
if(layer.scales_gpu){
axpy_ongpu(layer.n, learning_rate/batch, layer.scale_updates_gpu, 1, layer.scales_gpu, 1);
scal_ongpu(layer.n, momentum, layer.scale_updates_gpu, 1);
}
if(layer.adam){
scal_ongpu(size, layer.B1, layer.m_gpu, 1);
scal_ongpu(size, layer.B2, layer.v_gpu, 1);
axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, -(1-layer.B1), layer.weight_updates_gpu, 1, layer.m_gpu, 1);
mul_ongpu(size, layer.weight_updates_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, (1-layer.B2), layer.weight_updates_gpu, 1, layer.v_gpu, 1);
adam_gpu(size, layer.weights_gpu, layer.m_gpu, layer.v_gpu, layer.B1, layer.B2, learning_rate/batch, layer.eps, layer.t+1);
fill_ongpu(size, 0, layer.weight_updates_gpu, 1);
}else{
axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1); // wu = wu - w*decay*batch
axpy_ongpu(size, learning_rate/batch, layer.weight_updates_gpu, 1, layer.weights_gpu, 1); // w = w + wu*lr/batch
scal_ongpu(size, momentum, layer.weight_updates_gpu, 1); // wu = wu*momentum // wu = (wu - w*decay*batch)*momentum
// w = w + (wu - w*decay*batch)*lr/batch = w + wu*lr/batch - w*decay*lr = w*(1-decay*lr) + wu*lr/batch
//wu_prev = (wu_old - w_old*decay*batch)*momentum
//weights_update = weights_update_new + (weights_update_old - weights_old*decay*batch)*momentum - weights_new*decay*batch =
// = weights_update_new + weights_update_old*momentum - weights_old*decay*batch*momentum - weights_new*decay*batch
// = weights_update_new + weights_update_old*momentum - (weights_old*momentum + weights_new)*decay*batch
//------------- RESULT --------------
// weights_update = weights_update_new + weights_update_old*momentum - (weights_old*momentum + weights_new)*decay*batch
//-----------------------------------
// weights_newest = weights_new + (weights_update_new + weights_update_old*momentum - (weights_old*momentum + weights_new)*decay*batch)*lr/batch
// = weights_new + weights_update_new*lr/batch + weights_update_old*momentum*lr/batch - weights_old*momentum*decay*batch*lr/batch - weights_new*decay*batch*lr/batch
// = weights_new + weights_update_new*lr/batch + weights_update_old*momentum*lr/batch - weights_old*momentum*decay*lr - weights_new*decay*lr
// = weights_new*(1 - decay*lr) - weights_old*momentum*decay*lr + (weights_update_new + weights_update_old*momentum)*lr/batch
//------------- RESULT --------------
// weights_newest = weights_new*(1 - decay*lr) - weights_old*momentum*(decay*lr) + (weights_update_new + weights_update_old*momentum)*lr/batch =
// = weights_new - (weights_new + weights_old*momentum)*decay*lr + (weights_update_new + weights_update_old*momentum)*lr / batch
//-----------------------------------
}
}
*/
|
0285718f25b9e2bee79a821d6d9202b411c62c1d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @brief Implementation file for matrix broadcasting
*
* @file matrix_broadcast.cu
* @author David Chan
* @date 2018-04-04
* Copyright (c) 2018, Regents of the University of California
*/
#include "include/util/matrix_broadcast_utils.h"
// Performs the operation matrix[i, :] = binary_op(matrix[i, :],
// alpha * vector) for each row i in the matrix
template<typename BinaryFunction, typename T>
__global__ void tsnecuda::util::BroadcastRowVector(
T * __restrict__ d_matrix,
const T * __restrict__ d_vector,
const int N,
const int M,
BinaryFunction binary_operation,
const T alpha) {
const int TID = threadIdx.x + blockIdx.x * blockDim.x;
const int i = TID % N;
const int j = TID / N;
if (j < M) d_matrix[j * N + i] = binary_operation(d_matrix[j * N + i],
alpha * d_vector[j]);
}
// Performs the operation matrix[:, j] = binary_op(matrix[:, j],
// alpha * vector) for each col i in the matrix
template<typename BinaryFunction, typename T>
__global__ void tsnecuda::util::BroadcastColumnVector(
T * __restrict__ d_matrix,
const T * __restrict__ d_vector,
const int N,
const int M,
BinaryFunction binary_operation,
const T alpha) {
const int TID = threadIdx.x + blockIdx.x * blockDim.x;
const int i = TID % N;
const int j = TID / N;
if (j < M) d_matrix[j * N + i] = binary_operation(d_matrix[j * N + i],
alpha * d_vector[i]);
}
template<typename BinaryFunction, typename T>
void tsnecuda::util::BroadcastMatrixVector(
thrust::device_vector<T> &d_matrix,
const thrust::device_vector<T> &d_vector,
const int N,
const int M,
BinaryFunction binary_operation,
const int axis,
const T alpha) {
// Checks to make sure dimensions are correct
assert(d_matrix.size() >= N * M);
assert((axis == 0 && d_vector.size() >= N) ||
(axis == 1 && d_vector.size() >= M));
const int kBlockSize = 32;
const int kNumBlocks = iDivUp(N * M, kBlockSize);
if (axis == 0) {
hipLaunchKernelGGL(( tsnecuda::util::BroadcastColumnVector), dim3(kNumBlocks), dim3(kBlockSize), 0, 0,
thrust::raw_pointer_cast(d_matrix.data()),
thrust::raw_pointer_cast(d_vector.data()),
N, M, binary_operation, alpha);
} else {
hipLaunchKernelGGL(( tsnecuda::util::BroadcastRowVector), dim3(kNumBlocks), dim3(kBlockSize), 0, 0,
thrust::raw_pointer_cast(d_matrix.data()),
thrust::raw_pointer_cast(d_vector.data()),
N, M, binary_operation, alpha);
}
}
// Explicit instantiations of the method
template void tsnecuda::util::BroadcastMatrixVector<thrust::divides<float>, float>(
thrust::device_vector<float> &d_matrix,
const thrust::device_vector<float> &d_vector,
const int N,
const int M,
thrust::divides<float> binary_operation,
const int axis,
const float alpha);
template void tsnecuda::util::BroadcastMatrixVector<thrust::minus<float>, float>(
thrust::device_vector<float> &d_matrix,
const thrust::device_vector<float> &d_vector,
const int N,
const int M,
thrust::minus<float> binary_operation,
const int axis,
const float alpha);
| 0285718f25b9e2bee79a821d6d9202b411c62c1d.cu | /**
* @brief Implementation file for matrix broadcasting
*
* @file matrix_broadcast.cu
* @author David Chan
* @date 2018-04-04
* Copyright (c) 2018, Regents of the University of California
*/
#include "include/util/matrix_broadcast_utils.h"
// Performs the operation matrix[i, :] = binary_op(matrix[i, :],
// alpha * vector) for each row i in the matrix
template<typename BinaryFunction, typename T>
__global__ void tsnecuda::util::BroadcastRowVector(
T * __restrict__ d_matrix,
const T * __restrict__ d_vector,
const int N,
const int M,
BinaryFunction binary_operation,
const T alpha) {
const int TID = threadIdx.x + blockIdx.x * blockDim.x;
const int i = TID % N;
const int j = TID / N;
if (j < M) d_matrix[j * N + i] = binary_operation(d_matrix[j * N + i],
alpha * d_vector[j]);
}
// Performs the operation matrix[:, j] = binary_op(matrix[:, j],
// alpha * vector) for each col i in the matrix
template<typename BinaryFunction, typename T>
__global__ void tsnecuda::util::BroadcastColumnVector(
T * __restrict__ d_matrix,
const T * __restrict__ d_vector,
const int N,
const int M,
BinaryFunction binary_operation,
const T alpha) {
const int TID = threadIdx.x + blockIdx.x * blockDim.x;
const int i = TID % N;
const int j = TID / N;
if (j < M) d_matrix[j * N + i] = binary_operation(d_matrix[j * N + i],
alpha * d_vector[i]);
}
template<typename BinaryFunction, typename T>
void tsnecuda::util::BroadcastMatrixVector(
thrust::device_vector<T> &d_matrix,
const thrust::device_vector<T> &d_vector,
const int N,
const int M,
BinaryFunction binary_operation,
const int axis,
const T alpha) {
// Checks to make sure dimensions are correct
assert(d_matrix.size() >= N * M);
assert((axis == 0 && d_vector.size() >= N) ||
(axis == 1 && d_vector.size() >= M));
const int kBlockSize = 32;
const int kNumBlocks = iDivUp(N * M, kBlockSize);
if (axis == 0) {
tsnecuda::util::BroadcastColumnVector<<<kNumBlocks, kBlockSize>>>(
thrust::raw_pointer_cast(d_matrix.data()),
thrust::raw_pointer_cast(d_vector.data()),
N, M, binary_operation, alpha);
} else {
tsnecuda::util::BroadcastRowVector<<<kNumBlocks, kBlockSize>>>(
thrust::raw_pointer_cast(d_matrix.data()),
thrust::raw_pointer_cast(d_vector.data()),
N, M, binary_operation, alpha);
}
}
// Explicit instantiations of the method
template void tsnecuda::util::BroadcastMatrixVector<thrust::divides<float>, float>(
thrust::device_vector<float> &d_matrix,
const thrust::device_vector<float> &d_vector,
const int N,
const int M,
thrust::divides<float> binary_operation,
const int axis,
const float alpha);
template void tsnecuda::util::BroadcastMatrixVector<thrust::minus<float>, float>(
thrust::device_vector<float> &d_matrix,
const thrust::device_vector<float> &d_vector,
const int N,
const int M,
thrust::minus<float> binary_operation,
const int axis,
const float alpha);
|
6453d0218d29d6381b08bbdbf418f446f0be8d8f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* This example demonstrates how to use the Cuda OpenGL bindings with the
* runtime API.
* Device code.
*/
//
#ifndef _SIMPLEGL_KERNEL_H_
#define _SIMPLEGL_KERNEL_H_
///////////////////////////////////////////////////////////////////////////////
//! Simple kernel to modify vertex positions in sine wave pattern
//! @param data data in global memory
///////////////////////////////////////////////////////////////////////////////
__global__ void setup_kernel(float4* pos, float4* vel,unsigned int width, unsigned int height, unsigned int depth,float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int z = blockIdx.z*blockDim.z + threadIdx.z;
// calculate uv coordinates
float u = x / (float) width;
float v = y / (float) height;
u = u*2.0f - 1.0f;
v = v*2.0f - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float w = (z / (float) depth); //sinf(u*freq + time) * cosf(v*freq + time) * 0.5f;
w = w*2.0f-1.0f;
pos[(x*depth +y)*width+z] = make_float4( 1+u*4,1+v*4,1+w*4, 1 );
vel[(x*depth +y)*width+z] = make_float4( 0, 0, 0, 0);
return;
}
///////////////////////////////////////////////////////////////////////////////
//! Simple kernel to modify vertex positions in sine wave pattern
//! @param data data in global memory
///////////////////////////////////////////////////////////////////////////////
__global__ void kernel(float4* pos, float4* vel,unsigned int width, unsigned int height, unsigned int depth,float time, bool init)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int z = blockIdx.z*blockDim.z + threadIdx.z;
// calculate uv coordinates
float u = x / (float) width;
float v = y / (float) height;
u = u*2.0f - 1.0f;
v = v*2.0f - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float w = (z / (float) depth); //sinf(u*freq + time) * cosf(v*freq + time) * 0.5f;
w = w*2.0f-1.0f;
float angle_rad = 2*3.14159*u;
float radius = v;
if( init )
{
pos[(x*depth +y)*width+z] = make_float4( 1+u*4,1+v*4,1+w*4, 1 );
vel[(x*depth +y)*width+z] = make_float4( 0, 0, 0, 0);
return;
}
else
{
float4 old_pos = pos[(x*depth +y)*width+z];
float4 old_vel = vel[(x*depth +y)*width+z];
float radius_squared = old_pos.x * old_pos.x + old_pos.y * old_pos.y + old_pos.z * old_pos.z;
/*float4 accel = make_float4(-old_pos.x / (radius_squared),
-old_pos.y / (radius_squared),
-old_pos.z / (radius_squared),
0);
*/
float4 accel = make_float4(-old_pos.x / (radius_squared*1000000),
-old_pos.y / (radius_squared*1000000),
-old_pos.z / (radius_squared*1000000),
0);
// accel.x -= v*0.0001;// (0.001)*(cos(angle_rad+0.01)-cos(angle_rad));
// accel.y += u*0.0001;//(0.001)*(sin(angle_rad+0.01)-sin(angle_rad));
//vel[y*width+x] = make_float4( (v)*cos(angle_rad+0.001)-v*cos(angle_rad), (v)*sin(angle_rad+0.001) - v*sin(angle_rad), 0, 1);
float4 new_vel = make_float4(old_vel.x + accel.x*time,
old_vel.y + accel.y*time,
old_vel.z + accel.z*time,0);
float4 new_pos = make_float4( old_pos.x + old_vel.x * time + 0.5 * accel.x*time*time,
old_pos.y + old_vel.y * time+ 0.5 * accel.y*time*time,
old_pos.z + old_vel.z * time+ 0.5 * accel.z*time*time,
1);
/* float4 new_pos = make_float4( old_pos.x + old_vel.x / 100000,
old_pos.y+ old_vel.y / 100000,
w,
1);*/
pos[(x*depth +y)*width+z] = new_pos;
vel[(x*depth +y)*width+z] = new_vel;
}
}
///////////////////////////////////////////////////////////////////////////////
//! Simple kernel to modify vertex positions in sine wave pattern
//! @param data data in global memory
///////////////////////////////////////////////////////////////////////////////
__global__ void kernel_EM(float4* pos, float4* vel,unsigned int width, unsigned int height, unsigned int depth,float time, bool init)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int z = blockIdx.z*blockDim.z + threadIdx.z;
// calculate uv coordinates
float u = x / (float) width;
float v = y / (float) height;
u = u*2.0f - 1.0f;
v = v*2.0f - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float w = (z / (float) depth); //sinf(u*freq + time) * cosf(v*freq + time) * 0.5f;
w = w*2.0f-1.0f;
float angle_rad = 2*3.14159*u;
float radius = v;
if( init )
{
pos[(z*depth +y)*width+x] = make_float4( u/2,v/2,w/2, 1 );
vel[(z*depth +y)*width+x] = make_float4( 0, 0, 0, 0);
return;
}
else
{
float4 old_pos = pos[(z*depth +y)*width+x];
float4 old_vel = vel[(z*depth +y)*width+x];
/* non random way of reinjecting particles that are out of "meaningful" range
if( old_pos.x >= 5 || old_pos.y >= 5 || old_pos.z >= 5 )
{
float4 new_pos = make_float4(0.0f, 0.5f, 2.0f, 0.0f );
float4 new_vel = make_float4(0.0002f, 0.0f, 0.0002f, 0.0f);
pos[(z*depth +y)*width+x] = new_pos;
vel[(z*depth +y)*width+x] = new_vel;
}
else*/
if( old_pos.x >= 5 || old_pos.y >= 5 || old_pos.z >= 5 )
{
float4 new_pos = make_float4(u, v, w, 0.0f );
float4 new_vel = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
pos[(z*depth +y)*width+x] = new_pos;
vel[(z*depth +y)*width+x] = new_vel;
}
else
{
float radius_squared = old_pos.x * old_pos.x + old_pos.y * old_pos.y + old_pos.z * old_pos.z;
/*float4 accel = make_float4(-old_pos.x / (radius_squared),
-old_pos.y / (radius_squared),
-old_pos.z / (radius_squared),
0);
*/
// electric field
float constant = 1/1000000;
float4 accel = make_float4(-old_pos.x / (radius_squared*10000000),
-old_pos.y / (radius_squared*10000000),
-old_pos.z / (radius_squared*10000000),
0);
// magnetic field
// F = [constant 1] scalar(q1 * q2 /r^2) * vector( v1 X (v2 X (pq1-pq2)))
// the equation can be rephrased as
// a = [constant] * (v1 X (v2 X (pq1-pq2)))/ r ^ 2
// with [constant] as [constant 1] * q1*q2
// and [constant 1] from the above equation is 1/ 1000000
// now the field is *generated* by a moving particle and stationary attractors
// would not generate any kind of field.
// To model the field of a planet we can best approximate by a circular current carrying loop,
// it should be obvious that a complete field should be given by the integral of all loops through
// the sphere, however in practice such an integral is probably impossible, the field could be modelled
// by discrete loops.
// or it can be modelled like a bar magnet through the center. This can be treated as 2 attractive fields
// 1 positive and 1 negative...
// Dipole
// for a dipole of 2 point charges of charge +Q and -Q seperated by a distance d on the z axis
// the net Potential generated by these point charges is just the sum of the 2 point charges
// given V = potential, v1 = [vector from attractor +Q to particle]
// v2 = [vector from attractor -Q to particle]
// [constant 3] = magnetic field rescaling
// r1 = length(v1)
// r2 = length(v2)
// V = [constant 3] * Q / r1 - [constant 3] * Q / r1
// V = [constant 3] * Q * (r2-r1)/(r2*r1)
float4 Q1_pos = make_float4( 0.0f, 0.1f, 0.0f, 0.0f ); // say 10 units upwards
float4 Q2_pos = make_float4( 0.0f, -0.1f, 0.0f, 0.0f ); // 10 units downwards
float4 Q1_to_part = make_float4( old_pos.x - Q1_pos.x,
old_pos.y - Q1_pos.y,
old_pos.z - Q1_pos.z, 0.0f);
float4 Q2_to_part = make_float4( old_pos.x - Q2_pos.x,
old_pos.y - Q2_pos.y,
old_pos.z - Q2_pos.z, 0.0f);
// now these vectors are important, however the need to take the gradient of the potential to further
// improve this forumla is inconvenient, so I am going to computer the attractors using the same forumla
// as above, constant * Q / r^2 for each charge, taking into account the signs of the charge.
// how this is reinterpreted as a magnetic field will be shown below.
float r1_squared = ( Q1_to_part.x*Q1_to_part.x + Q1_to_part.y*Q1_to_part.y + Q1_to_part.z*Q1_to_part.z);
float r2_squared = ( Q2_to_part.x*Q2_to_part.x + Q2_to_part.y*Q2_to_part.y + Q2_to_part.z*Q2_to_part.z);
constant =1/100000000000; // changing the constant
// now I need the lengths of the lines between
float4 accel_pre_mag_positive = make_float4(-(Q1_to_part.x) / (r1_squared*1000),
-(Q1_to_part.y) / (r1_squared*1000),
-(Q1_to_part.z) / (r1_squared*1000),
0);
float4 accel_pre_mag_negative = make_float4((Q2_to_part.x) / (r2_squared*1000),
(Q2_to_part.y) / (r2_squared*1000),
(Q2_to_part.z) / (r2_squared*1000),
0);
// summing the 2 fields
float4 accel_pre_mag = make_float4( accel_pre_mag_positive.x + accel_pre_mag_negative.x,
accel_pre_mag_positive.y + accel_pre_mag_negative.y,
accel_pre_mag_positive.z + accel_pre_mag_negative.z, 0.0f);
// now that is the field force calculated for the dipole. To mix the magnetic force with the electric
// force, we need to use the *Lorentz Force Law* equation
// F = qE + q(v X B)
// now assuming B[scaled] is given by accel_pre_mag, we need
// current_veloctiy [cross] B
// using the cross product notation,
// |i j k
// |a1 a2 a3
// |b1 b2 b3
//
float i1 = old_vel.y * accel_pre_mag.z - old_vel.z * accel_pre_mag.y;
float j1 = old_vel.x * accel_pre_mag.z - old_vel.z * accel_pre_mag.x;
float k1 = old_vel.x * accel_pre_mag.y - old_vel.y * accel_pre_mag.x;
//float4 accel = make_float4(accel_pre_mag.x,accel_pre_mag.y,accel_pre_mag.z,1.0f);
// now add this to accel to complete the lorentz force...
accel.x += i1;
accel.y += j1;
accel.z += k1;
//accel.x += 0.01*accel_pre_mag.x;
//accel.y += 0.01*accel_pre_mag.y;
//accel.z += 0.01*accel_pre_mag.z;
float4 new_vel = make_float4(old_vel.x + accel.x*time,
old_vel.y + accel.y*time,
old_vel.z + accel.z*time,0);
float4 new_pos = make_float4( old_pos.x + old_vel.x * time + 0.5 * accel.x*time*time,
old_pos.y + old_vel.y * time+ 0.5 * accel.y*time*time,
old_pos.z + old_vel.z * time+ 0.5 * accel.z*time*time,
1);
/* float4 new_pos = make_float4( old_pos.x + old_vel.x / 100000,
old_pos.y+ old_vel.y / 100000,
w,
1);*/
pos[(z*depth +y)*width+x] = new_pos;
vel[(z*depth +y)*width+x] = new_vel;
}
}
}
// Wrapper for the __global__ call that sets up the kernel call
extern "C" void launch_kernel(float4* pos, float4* vel, unsigned int mesh_width, unsigned int mesh_height, unsigned int depth,float time, bool init)
{
// execute the kernel
dim3 block(8, 8, 8);
dim3 grid(mesh_width / block.x, mesh_height / block.y, depth/ block.z);
//kernel<<< grid, block>>>(pos, vel,mesh_width, mesh_height,depth, time, init);
hipLaunchKernelGGL(( kernel_EM), dim3(grid), dim3(block), 0, 0, pos, vel,mesh_width, mesh_height,depth, time, init);
}
extern "C" void call_setup_kernel(float4* pos, float4* vel,unsigned int width, unsigned int height, unsigned int depth,float time)
{
// execute the kernel
dim3 block(8, 8, 8);
dim3 grid(width / block.x, height / block.y, depth/ block.z);
//kernel<<< grid, block>>>(pos, vel,mesh_width, mesh_height,depth, time, init);
hipLaunchKernelGGL(( setup_kernel), dim3(grid), dim3(block), 0, 0, pos, vel,width, height,depth, time);
}
#endif // #ifndef _SIMPLEGL_KERNEL_H_
| 6453d0218d29d6381b08bbdbf418f446f0be8d8f.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* This example demonstrates how to use the Cuda OpenGL bindings with the
* runtime API.
* Device code.
*/
//
#ifndef _SIMPLEGL_KERNEL_H_
#define _SIMPLEGL_KERNEL_H_
///////////////////////////////////////////////////////////////////////////////
//! Simple kernel to modify vertex positions in sine wave pattern
//! @param data data in global memory
///////////////////////////////////////////////////////////////////////////////
__global__ void setup_kernel(float4* pos, float4* vel,unsigned int width, unsigned int height, unsigned int depth,float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int z = blockIdx.z*blockDim.z + threadIdx.z;
// calculate uv coordinates
float u = x / (float) width;
float v = y / (float) height;
u = u*2.0f - 1.0f;
v = v*2.0f - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float w = (z / (float) depth); //sinf(u*freq + time) * cosf(v*freq + time) * 0.5f;
w = w*2.0f-1.0f;
pos[(x*depth +y)*width+z] = make_float4( 1+u*4,1+v*4,1+w*4, 1 );
vel[(x*depth +y)*width+z] = make_float4( 0, 0, 0, 0);
return;
}
///////////////////////////////////////////////////////////////////////////////
//! Simple kernel to modify vertex positions in sine wave pattern
//! @param data data in global memory
///////////////////////////////////////////////////////////////////////////////
__global__ void kernel(float4* pos, float4* vel,unsigned int width, unsigned int height, unsigned int depth,float time, bool init)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int z = blockIdx.z*blockDim.z + threadIdx.z;
// calculate uv coordinates
float u = x / (float) width;
float v = y / (float) height;
u = u*2.0f - 1.0f;
v = v*2.0f - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float w = (z / (float) depth); //sinf(u*freq + time) * cosf(v*freq + time) * 0.5f;
w = w*2.0f-1.0f;
float angle_rad = 2*3.14159*u;
float radius = v;
if( init )
{
pos[(x*depth +y)*width+z] = make_float4( 1+u*4,1+v*4,1+w*4, 1 );
vel[(x*depth +y)*width+z] = make_float4( 0, 0, 0, 0);
return;
}
else
{
float4 old_pos = pos[(x*depth +y)*width+z];
float4 old_vel = vel[(x*depth +y)*width+z];
float radius_squared = old_pos.x * old_pos.x + old_pos.y * old_pos.y + old_pos.z * old_pos.z;
/*float4 accel = make_float4(-old_pos.x / (radius_squared),
-old_pos.y / (radius_squared),
-old_pos.z / (radius_squared),
0);
*/
float4 accel = make_float4(-old_pos.x / (radius_squared*1000000),
-old_pos.y / (radius_squared*1000000),
-old_pos.z / (radius_squared*1000000),
0);
// accel.x -= v*0.0001;// (0.001)*(cos(angle_rad+0.01)-cos(angle_rad));
// accel.y += u*0.0001;//(0.001)*(sin(angle_rad+0.01)-sin(angle_rad));
//vel[y*width+x] = make_float4( (v)*cos(angle_rad+0.001)-v*cos(angle_rad), (v)*sin(angle_rad+0.001) - v*sin(angle_rad), 0, 1);
float4 new_vel = make_float4(old_vel.x + accel.x*time,
old_vel.y + accel.y*time,
old_vel.z + accel.z*time,0);
float4 new_pos = make_float4( old_pos.x + old_vel.x * time + 0.5 * accel.x*time*time,
old_pos.y + old_vel.y * time+ 0.5 * accel.y*time*time,
old_pos.z + old_vel.z * time+ 0.5 * accel.z*time*time,
1);
/* float4 new_pos = make_float4( old_pos.x + old_vel.x / 100000,
old_pos.y+ old_vel.y / 100000,
w,
1);*/
pos[(x*depth +y)*width+z] = new_pos;
vel[(x*depth +y)*width+z] = new_vel;
}
}
///////////////////////////////////////////////////////////////////////////////
//! Simple kernel to modify vertex positions in sine wave pattern
//! @param data data in global memory
///////////////////////////////////////////////////////////////////////////////
__global__ void kernel_EM(float4* pos, float4* vel,unsigned int width, unsigned int height, unsigned int depth,float time, bool init)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int z = blockIdx.z*blockDim.z + threadIdx.z;
// calculate uv coordinates
float u = x / (float) width;
float v = y / (float) height;
u = u*2.0f - 1.0f;
v = v*2.0f - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float w = (z / (float) depth); //sinf(u*freq + time) * cosf(v*freq + time) * 0.5f;
w = w*2.0f-1.0f;
float angle_rad = 2*3.14159*u;
float radius = v;
if( init )
{
pos[(z*depth +y)*width+x] = make_float4( u/2,v/2,w/2, 1 );
vel[(z*depth +y)*width+x] = make_float4( 0, 0, 0, 0);
return;
}
else
{
float4 old_pos = pos[(z*depth +y)*width+x];
float4 old_vel = vel[(z*depth +y)*width+x];
/* non random way of reinjecting particles that are out of "meaningful" range
if( old_pos.x >= 5 || old_pos.y >= 5 || old_pos.z >= 5 )
{
float4 new_pos = make_float4(0.0f, 0.5f, 2.0f, 0.0f );
float4 new_vel = make_float4(0.0002f, 0.0f, 0.0002f, 0.0f);
pos[(z*depth +y)*width+x] = new_pos;
vel[(z*depth +y)*width+x] = new_vel;
}
else*/
if( old_pos.x >= 5 || old_pos.y >= 5 || old_pos.z >= 5 )
{
float4 new_pos = make_float4(u, v, w, 0.0f );
float4 new_vel = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
pos[(z*depth +y)*width+x] = new_pos;
vel[(z*depth +y)*width+x] = new_vel;
}
else
{
float radius_squared = old_pos.x * old_pos.x + old_pos.y * old_pos.y + old_pos.z * old_pos.z;
/*float4 accel = make_float4(-old_pos.x / (radius_squared),
-old_pos.y / (radius_squared),
-old_pos.z / (radius_squared),
0);
*/
// electric field
float constant = 1/1000000;
float4 accel = make_float4(-old_pos.x / (radius_squared*10000000),
-old_pos.y / (radius_squared*10000000),
-old_pos.z / (radius_squared*10000000),
0);
// magnetic field
// F = [constant 1] scalar(q1 * q2 /r^2) * vector( v1 X (v2 X (pq1-pq2)))
// the equation can be rephrased as
// a = [constant] * (v1 X (v2 X (pq1-pq2)))/ r ^ 2
// with [constant] as [constant 1] * q1*q2
// and [constant 1] from the above equation is 1/ 1000000
// now the field is *generated* by a moving particle and stationary attractors
// would not generate any kind of field.
// To model the field of a planet we can best approximate by a circular current carrying loop,
// it should be obvious that a complete field should be given by the integral of all loops through
// the sphere, however in practice such an integral is probably impossible, the field could be modelled
// by discrete loops.
// or it can be modelled like a bar magnet through the center. This can be treated as 2 attractive fields
// 1 positive and 1 negative...
// Dipole
// for a dipole of 2 point charges of charge +Q and -Q seperated by a distance d on the z axis
// the net Potential generated by these point charges is just the sum of the 2 point charges
// given V = potential, v1 = [vector from attractor +Q to particle]
// v2 = [vector from attractor -Q to particle]
// [constant 3] = magnetic field rescaling
// r1 = length(v1)
// r2 = length(v2)
// V = [constant 3] * Q / r1 - [constant 3] * Q / r1
// V = [constant 3] * Q * (r2-r1)/(r2*r1)
float4 Q1_pos = make_float4( 0.0f, 0.1f, 0.0f, 0.0f ); // say 10 units upwards
float4 Q2_pos = make_float4( 0.0f, -0.1f, 0.0f, 0.0f ); // 10 units downwards
float4 Q1_to_part = make_float4( old_pos.x - Q1_pos.x,
old_pos.y - Q1_pos.y,
old_pos.z - Q1_pos.z, 0.0f);
float4 Q2_to_part = make_float4( old_pos.x - Q2_pos.x,
old_pos.y - Q2_pos.y,
old_pos.z - Q2_pos.z, 0.0f);
// now these vectors are important, however the need to take the gradient of the potential to further
// improve this forumla is inconvenient, so I am going to computer the attractors using the same forumla
// as above, constant * Q / r^2 for each charge, taking into account the signs of the charge.
// how this is reinterpreted as a magnetic field will be shown below.
float r1_squared = ( Q1_to_part.x*Q1_to_part.x + Q1_to_part.y*Q1_to_part.y + Q1_to_part.z*Q1_to_part.z);
float r2_squared = ( Q2_to_part.x*Q2_to_part.x + Q2_to_part.y*Q2_to_part.y + Q2_to_part.z*Q2_to_part.z);
constant =1/100000000000; // changing the constant
// now I need the lengths of the lines between
float4 accel_pre_mag_positive = make_float4(-(Q1_to_part.x) / (r1_squared*1000),
-(Q1_to_part.y) / (r1_squared*1000),
-(Q1_to_part.z) / (r1_squared*1000),
0);
float4 accel_pre_mag_negative = make_float4((Q2_to_part.x) / (r2_squared*1000),
(Q2_to_part.y) / (r2_squared*1000),
(Q2_to_part.z) / (r2_squared*1000),
0);
// summing the 2 fields
float4 accel_pre_mag = make_float4( accel_pre_mag_positive.x + accel_pre_mag_negative.x,
accel_pre_mag_positive.y + accel_pre_mag_negative.y,
accel_pre_mag_positive.z + accel_pre_mag_negative.z, 0.0f);
// now that is the field force calculated for the dipole. To mix the magnetic force with the electric
// force, we need to use the *Lorentz Force Law* equation
// F = qE + q(v X B)
// now assuming B[scaled] is given by accel_pre_mag, we need
// current_veloctiy [cross] B
// using the cross product notation,
// |i j k
// |a1 a2 a3
// |b1 b2 b3
//
float i1 = old_vel.y * accel_pre_mag.z - old_vel.z * accel_pre_mag.y;
float j1 = old_vel.x * accel_pre_mag.z - old_vel.z * accel_pre_mag.x;
float k1 = old_vel.x * accel_pre_mag.y - old_vel.y * accel_pre_mag.x;
//float4 accel = make_float4(accel_pre_mag.x,accel_pre_mag.y,accel_pre_mag.z,1.0f);
// now add this to accel to complete the lorentz force...
accel.x += i1;
accel.y += j1;
accel.z += k1;
//accel.x += 0.01*accel_pre_mag.x;
//accel.y += 0.01*accel_pre_mag.y;
//accel.z += 0.01*accel_pre_mag.z;
float4 new_vel = make_float4(old_vel.x + accel.x*time,
old_vel.y + accel.y*time,
old_vel.z + accel.z*time,0);
float4 new_pos = make_float4( old_pos.x + old_vel.x * time + 0.5 * accel.x*time*time,
old_pos.y + old_vel.y * time+ 0.5 * accel.y*time*time,
old_pos.z + old_vel.z * time+ 0.5 * accel.z*time*time,
1);
/* float4 new_pos = make_float4( old_pos.x + old_vel.x / 100000,
old_pos.y+ old_vel.y / 100000,
w,
1);*/
pos[(z*depth +y)*width+x] = new_pos;
vel[(z*depth +y)*width+x] = new_vel;
}
}
}
// Wrapper for the __global__ call that sets up the kernel call
extern "C" void launch_kernel(float4* pos, float4* vel, unsigned int mesh_width, unsigned int mesh_height, unsigned int depth,float time, bool init)
{
// execute the kernel
dim3 block(8, 8, 8);
dim3 grid(mesh_width / block.x, mesh_height / block.y, depth/ block.z);
//kernel<<< grid, block>>>(pos, vel,mesh_width, mesh_height,depth, time, init);
kernel_EM<<< grid, block>>>(pos, vel,mesh_width, mesh_height,depth, time, init);
}
extern "C" void call_setup_kernel(float4* pos, float4* vel,unsigned int width, unsigned int height, unsigned int depth,float time)
{
// execute the kernel
dim3 block(8, 8, 8);
dim3 grid(width / block.x, height / block.y, depth/ block.z);
//kernel<<< grid, block>>>(pos, vel,mesh_width, mesh_height,depth, time, init);
setup_kernel<<< grid, block>>>(pos, vel,width, height,depth, time);
}
#endif // #ifndef _SIMPLEGL_KERNEL_H_
|
09f7ca5135d29f572aa1a51970c1f9ee4cbf6e46.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
__gloabl__ void f(int *z)
{
}
int main(void)
{
int z;
int *dev_z;
hipMalloc((void**)&dev_z, sizeof(int));
hipLaunchKernelGGL(( fun), dim3(1),dim3(1), 0, 0, dev_z);
hipMemcpy(&z, dev_z, sizeof(int), hipMemcpyDeviceToHost)
hipFree(dev_z);
return f();
}
| 09f7ca5135d29f572aa1a51970c1f9ee4cbf6e46.cu | #include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
__gloabl__ void f(int *z)
{
}
int main(void)
{
int z;
int *dev_z;
cudaMalloc((void**)&dev_z, sizeof(int));
fun<<<1,1>>>(dev_z);
cudaMemcpy(&z, dev_z, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_z);
return f();
}
|
c5db6bc3fb33ce2ccfc7a5d8f743eaf962e3bd6a.hip | // !!! This is a file automatically generated by hipify!!!
//#include"../crab_cavity.h"
#include"crab_cavity.h"
//#include"../linear_map.h"
#include"linear_map.h"
//#include"../Lorentz_boost.h"
#include"Lorentz_boost.h"
#include"../beam.h"
#include"beam.h"
#include"gtrack.h"
#include<math.h>
#include<thrust/device_vector.h>
#include<thrust/copy.h>
#include<thrust/reduce.h>
#include<thrust/for_each.h>
#include<thrust/iterator/zip_iterator.h>
#include<thrust/tuple.h>
#include<iostream>
#include<sys/time.h>
using std::cout;using std::endl;
double now(){
struct timeval tp;
gettimeofday(&tp,NULL);
return (double)tp.tv_sec+(double)tp.tv_usec*1e-6;
}
int test(double *x, double *px, double *y, double *py, double *z, double *pz, size_t N){
thrust::device_vector<double> vx(N), vpx(N), vy(N), vpy(N), vz(N), vpz(N);
thrust::copy(x,x+N,vx.begin());
thrust::copy(px,px+N,vpx.begin());
thrust::copy(y,y+N,vy.begin());
thrust::copy(py,py+N,vpy.begin());
thrust::copy(z,z+N,vz.begin());
thrust::copy(pz,pz+N,vpz.begin());
auto first=thrust::make_zip_iterator(thrust::make_tuple(vx.begin(),vpx.begin(),vy.begin(),vpy.begin(),vz.begin(),vpz.begin()));
auto last=thrust::make_zip_iterator(thrust::make_tuple(vx.end(),vpx.end(),vy.end(),vpy.end(),vz.end(),vpz.end()));
gtrack_functor gfun;
double strength=tan(12.5e-3)/sqrt(0.9*1300);
ThinCrabCavity tcc(strength,200e6);
gfun.thin_crab_cavity_before_IP=gThinCrabCavity(tcc);
gfun.thin_crab_cavity_after_IP=gThinCrabCavity(tcc);
LinearX MX1(0.9,0.0,1300.0,0.0,-M_PI/2.0);
LinearX MX2(1300.0,0.0,0.9,0.0, M_PI/2.0);
LinearX MX3(0.9,0.0,1300.0,0.0, M_PI/2.0);
LinearX MX4(1300.0,0.0,0.9,0.0,-M_PI/2.0);
gfun.MX1=gLinearX(MX1);gfun.MX2=gLinearX(MX2);
gfun.MX3=gLinearX(MX3);gfun.MX4=gLinearX(MX4);
LorentzBoost lb(12.5e-3);
gfun.lb=gLorentzBoost(lb);
RevLorentzBoost rlb(12.5e-3);
gfun.rlb=gRevLorentzBoost(rlb);
Linear6D ot({0.9,0.18,0.07/6.6e-4},{0.0,0.0},{0.228,0.210,0.01},{2.0,2.0});
gfun.oneturn=gLinear6D(ot);
ThinStrongBeam tsb(-9e-10,{0.7,0.07},{0.0,0.0},{120e-6,20e-6});
GaussianStrongBeam gsb(4,tsb);
gsb.set_equal_area(0.02);
gfun.gsb=gGaussianStrongBeam(gsb);
// test speed
cout.precision(16);
cout.flags(std::ios::scientific);
double t0,t1;
t0=now();
/*
for(size_t j=0;j<1000;++j){
thrust::for_each(first,last,gfun);
hipDeviceSynchronize();
}
*/
thrust::for_each(first,last,gfun);
hipDeviceSynchronize();
t1=now();
cout<<"gpu: "<<t1-t0<<" seconds."<<endl;
t0=now();
for(size_t j=0;j<1000;++j){
for(size_t i=0;i<N;++i){
tcc.Pass(x[i],px[i],y[i],py[i],z[i],pz[i]);
MX1.Pass(x[i],px[i],y[i],py[i],z[i],pz[i]);
MX2.Pass(x[i],px[i],y[i],py[i],z[i],pz[i]);
lb.Pass(x[i],px[i],y[i],py[i],z[i],pz[i]);
gsb.Pass(x[i],px[i],y[i],py[i],z[i],pz[i]);
rlb.Pass(x[i],px[i],y[i],py[i],z[i],pz[i]);
MX3.Pass(x[i],px[i],y[i],py[i],z[i],pz[i]);
MX4.Pass(x[i],px[i],y[i],py[i],z[i],pz[i]);
tcc.Pass(x[i],px[i],y[i],py[i],z[i],pz[i]);
ot.Pass(x[i],px[i],y[i],py[i],z[i],pz[i]);
}
}
t1=now();
cout<<"cpu: "<<t1-t0<<" seconds."<<endl;
// test correctness
double sumx=thrust::reduce(vx.begin(),vx.end());
double sumpx=thrust::reduce(vpx.begin(),vpx.end());
double sumy=thrust::reduce(vy.begin(),vy.end());
double sumpy=thrust::reduce(vpy.begin(),vpy.end());
double sumz=thrust::reduce(vz.begin(),vz.end());
double sumpz=thrust::reduce(vpz.begin(),vpz.end());
cout.precision(16);
cout.flags(std::ios::scientific);
cout<<sumx<<"\t"<<sumpx<<"\t"<<sumy<<"\t"<<sumpy<<"\t"<<sumz<<"\t"<<sumpz<<endl;
sumx=0.0;sumpx=0.0;
sumy=0.0;sumpy=0.0;
sumz=0.0;sumpz=0.0;
for(size_t i=0;i<N;++i){
sumx+=x[i];sumpx+=px[i];
sumy+=y[i];sumpy+=py[i];
sumz+=z[i];sumpz+=pz[i];
}
cout<<sumx<<"\t"<<sumpx<<"\t"<<sumy<<"\t"<<sumpy<<"\t"<<sumz<<"\t"<<sumpz<<endl;
return 0;
}
| c5db6bc3fb33ce2ccfc7a5d8f743eaf962e3bd6a.cu | //#include"../crab_cavity.h"
#include"crab_cavity.h"
//#include"../linear_map.h"
#include"linear_map.h"
//#include"../Lorentz_boost.h"
#include"Lorentz_boost.h"
#include"../beam.h"
#include"beam.h"
#include"gtrack.h"
#include<math.h>
#include<thrust/device_vector.h>
#include<thrust/copy.h>
#include<thrust/reduce.h>
#include<thrust/for_each.h>
#include<thrust/iterator/zip_iterator.h>
#include<thrust/tuple.h>
#include<iostream>
#include<sys/time.h>
using std::cout;using std::endl;
double now(){
struct timeval tp;
gettimeofday(&tp,NULL);
return (double)tp.tv_sec+(double)tp.tv_usec*1e-6;
}
int test(double *x, double *px, double *y, double *py, double *z, double *pz, size_t N){
thrust::device_vector<double> vx(N), vpx(N), vy(N), vpy(N), vz(N), vpz(N);
thrust::copy(x,x+N,vx.begin());
thrust::copy(px,px+N,vpx.begin());
thrust::copy(y,y+N,vy.begin());
thrust::copy(py,py+N,vpy.begin());
thrust::copy(z,z+N,vz.begin());
thrust::copy(pz,pz+N,vpz.begin());
auto first=thrust::make_zip_iterator(thrust::make_tuple(vx.begin(),vpx.begin(),vy.begin(),vpy.begin(),vz.begin(),vpz.begin()));
auto last=thrust::make_zip_iterator(thrust::make_tuple(vx.end(),vpx.end(),vy.end(),vpy.end(),vz.end(),vpz.end()));
gtrack_functor gfun;
double strength=tan(12.5e-3)/sqrt(0.9*1300);
ThinCrabCavity tcc(strength,200e6);
gfun.thin_crab_cavity_before_IP=gThinCrabCavity(tcc);
gfun.thin_crab_cavity_after_IP=gThinCrabCavity(tcc);
LinearX MX1(0.9,0.0,1300.0,0.0,-M_PI/2.0);
LinearX MX2(1300.0,0.0,0.9,0.0, M_PI/2.0);
LinearX MX3(0.9,0.0,1300.0,0.0, M_PI/2.0);
LinearX MX4(1300.0,0.0,0.9,0.0,-M_PI/2.0);
gfun.MX1=gLinearX(MX1);gfun.MX2=gLinearX(MX2);
gfun.MX3=gLinearX(MX3);gfun.MX4=gLinearX(MX4);
LorentzBoost lb(12.5e-3);
gfun.lb=gLorentzBoost(lb);
RevLorentzBoost rlb(12.5e-3);
gfun.rlb=gRevLorentzBoost(rlb);
Linear6D ot({0.9,0.18,0.07/6.6e-4},{0.0,0.0},{0.228,0.210,0.01},{2.0,2.0});
gfun.oneturn=gLinear6D(ot);
ThinStrongBeam tsb(-9e-10,{0.7,0.07},{0.0,0.0},{120e-6,20e-6});
GaussianStrongBeam gsb(4,tsb);
gsb.set_equal_area(0.02);
gfun.gsb=gGaussianStrongBeam(gsb);
// test speed
cout.precision(16);
cout.flags(std::ios::scientific);
double t0,t1;
t0=now();
/*
for(size_t j=0;j<1000;++j){
thrust::for_each(first,last,gfun);
cudaDeviceSynchronize();
}
*/
thrust::for_each(first,last,gfun);
cudaDeviceSynchronize();
t1=now();
cout<<"gpu: "<<t1-t0<<" seconds."<<endl;
t0=now();
for(size_t j=0;j<1000;++j){
for(size_t i=0;i<N;++i){
tcc.Pass(x[i],px[i],y[i],py[i],z[i],pz[i]);
MX1.Pass(x[i],px[i],y[i],py[i],z[i],pz[i]);
MX2.Pass(x[i],px[i],y[i],py[i],z[i],pz[i]);
lb.Pass(x[i],px[i],y[i],py[i],z[i],pz[i]);
gsb.Pass(x[i],px[i],y[i],py[i],z[i],pz[i]);
rlb.Pass(x[i],px[i],y[i],py[i],z[i],pz[i]);
MX3.Pass(x[i],px[i],y[i],py[i],z[i],pz[i]);
MX4.Pass(x[i],px[i],y[i],py[i],z[i],pz[i]);
tcc.Pass(x[i],px[i],y[i],py[i],z[i],pz[i]);
ot.Pass(x[i],px[i],y[i],py[i],z[i],pz[i]);
}
}
t1=now();
cout<<"cpu: "<<t1-t0<<" seconds."<<endl;
// test correctness
double sumx=thrust::reduce(vx.begin(),vx.end());
double sumpx=thrust::reduce(vpx.begin(),vpx.end());
double sumy=thrust::reduce(vy.begin(),vy.end());
double sumpy=thrust::reduce(vpy.begin(),vpy.end());
double sumz=thrust::reduce(vz.begin(),vz.end());
double sumpz=thrust::reduce(vpz.begin(),vpz.end());
cout.precision(16);
cout.flags(std::ios::scientific);
cout<<sumx<<"\t"<<sumpx<<"\t"<<sumy<<"\t"<<sumpy<<"\t"<<sumz<<"\t"<<sumpz<<endl;
sumx=0.0;sumpx=0.0;
sumy=0.0;sumpy=0.0;
sumz=0.0;sumpz=0.0;
for(size_t i=0;i<N;++i){
sumx+=x[i];sumpx+=px[i];
sumy+=y[i];sumpy+=py[i];
sumz+=z[i];sumpz+=pz[i];
}
cout<<sumx<<"\t"<<sumpx<<"\t"<<sumy<<"\t"<<sumpy<<"\t"<<sumz<<"\t"<<sumpz<<endl;
return 0;
}
|
443c589de6a6b8a52041e62210aad50483b2da92.hip | // !!! This is a file automatically generated by hipify!!!
/* Molecular dynamics simulation linear code for binary Lennard-Jones liquid
under NVE ensemble; Author: You-Liang Zhu, Email: [email protected]
Copyright: You-Liang Zhu
This code is free: you can redistribute it and/or modify it under the terms
of the GNU General Public License.*/
#include <ctype.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
// check CUDA error
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(-1);
}
}
// parallel reduction
extern __shared__ int sdata[];
__global__ void compute_sums_kernel(unsigned int N, int *d_a, int *d_scratch) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int data = 0;
if (i < N)
data = d_a[i];
sdata[threadIdx.x] = data;
__syncthreads();
int offs = blockDim.x >> 1;
while (offs > 0) {
if (threadIdx.x < offs)
sdata[threadIdx.x] += sdata[threadIdx.x + offs];
offs >>= 1;
__syncthreads();
}
if (threadIdx.x == 0)
d_scratch[blockIdx.x] = sdata[0];
}
// final parallel reduction
__global__ void compute_final_kernel(int *d_scratch,
unsigned int num_partial_sums) {
int final_sum = 0;
for (int start = 0; start < num_partial_sums; start += blockDim.x) {
__syncthreads();
if (start + threadIdx.x < num_partial_sums)
sdata[threadIdx.x] = d_scratch[start + threadIdx.x];
else
sdata[threadIdx.x] = 0;
__syncthreads();
int offs = blockDim.x >> 1;
while (offs > 0) {
if (threadIdx.x < offs) {
sdata[threadIdx.x] += sdata[threadIdx.x + offs];
}
offs >>= 1;
__syncthreads();
}
if (threadIdx.x == 0)
final_sum += sdata[0];
}
if (threadIdx.x == 0)
d_scratch[0] = final_sum;
}
int main(int argc, char **argv) {
int N = 10000; // the number of integers for reduction
int *h_a; // pointer for host memory
int *d_a, *d_scratch; // pointer for device memory
hipSetDevice(0); // set GPU ID for computation
int block_size = 64; // define block size
int n_blocks = (int)ceil((float)N / (float)block_size); // define grid size
// Part 1 of 5: allocate host and device memory
size_t memSize = N * sizeof(int);
h_a = (int *)malloc(memSize);
hipMalloc((void **)&d_a, memSize);
hipMalloc((void **)&d_scratch, n_blocks * sizeof(int));
// Part 2 of 5: initiate array
int sum = 0;
for (unsigned int i = 0; i < N; i++) {
int ran = rand();
h_a[i] = ran;
sum += ran;
}
// Part 3 of 5: copy data from host to device memory
hipMemcpy(d_a, h_a, memSize, hipMemcpyHostToDevice);
checkCUDAError("hipMemcpy");
// Part 4 of 5: launch kernel
dim3 grid(n_blocks, 1, 1);
dim3 threads(block_size, 1, 1);
unsigned int shared_bytes = sizeof(int) * block_size;
hipLaunchKernelGGL(( compute_sums_kernel), dim3(grid), dim3(threads), shared_bytes, 0, N, d_a, d_scratch);
// block until the device has completed
/* hipDeviceSynchronize(); */
// check if kernel execution generated an error
checkCUDAError("kernel execution 1");
int final_block_size = 512;
grid = dim3(1, 1, 1);
threads = dim3(final_block_size, 1, 1);
shared_bytes = sizeof(int) * final_block_size;
hipLaunchKernelGGL(( compute_final_kernel), dim3(grid), dim3(threads), shared_bytes, 0, d_scratch, n_blocks);
// block until the device has completed
/* hipDeviceSynchronize(); */
// check if kernel execution generated an error
checkCUDAError("kernel execution 2");
// Part 5 of 5: device to host copy
hipMemcpy(h_a, d_scratch, sizeof(int), hipMemcpyDeviceToHost);
// Check for any CUDA errors
checkCUDAError("hipMemcpy");
// check result from device
if (h_a[0] - sum != 0) {
fprintf(stderr, "Failed!!! %d %d\n", h_a[0], sum);
exit(-1);
}
// free device memory
hipFree(d_a);
hipFree(d_scratch);
// free host memory
free(h_a);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
printf("Success!\n");
return 0;
}
| 443c589de6a6b8a52041e62210aad50483b2da92.cu | /* Molecular dynamics simulation linear code for binary Lennard-Jones liquid
under NVE ensemble; Author: You-Liang Zhu, Email: [email protected]
Copyright: You-Liang Zhu
This code is free: you can redistribute it and/or modify it under the terms
of the GNU General Public License.*/
#include <ctype.h>
#include <cuda_runtime.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
// check CUDA error
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(-1);
}
}
// parallel reduction
extern __shared__ int sdata[];
__global__ void compute_sums_kernel(unsigned int N, int *d_a, int *d_scratch) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int data = 0;
if (i < N)
data = d_a[i];
sdata[threadIdx.x] = data;
__syncthreads();
int offs = blockDim.x >> 1;
while (offs > 0) {
if (threadIdx.x < offs)
sdata[threadIdx.x] += sdata[threadIdx.x + offs];
offs >>= 1;
__syncthreads();
}
if (threadIdx.x == 0)
d_scratch[blockIdx.x] = sdata[0];
}
// final parallel reduction
__global__ void compute_final_kernel(int *d_scratch,
unsigned int num_partial_sums) {
int final_sum = 0;
for (int start = 0; start < num_partial_sums; start += blockDim.x) {
__syncthreads();
if (start + threadIdx.x < num_partial_sums)
sdata[threadIdx.x] = d_scratch[start + threadIdx.x];
else
sdata[threadIdx.x] = 0;
__syncthreads();
int offs = blockDim.x >> 1;
while (offs > 0) {
if (threadIdx.x < offs) {
sdata[threadIdx.x] += sdata[threadIdx.x + offs];
}
offs >>= 1;
__syncthreads();
}
if (threadIdx.x == 0)
final_sum += sdata[0];
}
if (threadIdx.x == 0)
d_scratch[0] = final_sum;
}
int main(int argc, char **argv) {
int N = 10000; // the number of integers for reduction
int *h_a; // pointer for host memory
int *d_a, *d_scratch; // pointer for device memory
cudaSetDevice(0); // set GPU ID for computation
int block_size = 64; // define block size
int n_blocks = (int)ceil((float)N / (float)block_size); // define grid size
// Part 1 of 5: allocate host and device memory
size_t memSize = N * sizeof(int);
h_a = (int *)malloc(memSize);
cudaMalloc((void **)&d_a, memSize);
cudaMalloc((void **)&d_scratch, n_blocks * sizeof(int));
// Part 2 of 5: initiate array
int sum = 0;
for (unsigned int i = 0; i < N; i++) {
int ran = rand();
h_a[i] = ran;
sum += ran;
}
// Part 3 of 5: copy data from host to device memory
cudaMemcpy(d_a, h_a, memSize, cudaMemcpyHostToDevice);
checkCUDAError("cudaMemcpy");
// Part 4 of 5: launch kernel
dim3 grid(n_blocks, 1, 1);
dim3 threads(block_size, 1, 1);
unsigned int shared_bytes = sizeof(int) * block_size;
compute_sums_kernel<<<grid, threads, shared_bytes>>>(N, d_a, d_scratch);
// block until the device has completed
/* cudaThreadSynchronize(); */
// check if kernel execution generated an error
checkCUDAError("kernel execution 1");
int final_block_size = 512;
grid = dim3(1, 1, 1);
threads = dim3(final_block_size, 1, 1);
shared_bytes = sizeof(int) * final_block_size;
compute_final_kernel<<<grid, threads, shared_bytes>>>(d_scratch, n_blocks);
// block until the device has completed
/* cudaThreadSynchronize(); */
// check if kernel execution generated an error
checkCUDAError("kernel execution 2");
// Part 5 of 5: device to host copy
cudaMemcpy(h_a, d_scratch, sizeof(int), cudaMemcpyDeviceToHost);
// Check for any CUDA errors
checkCUDAError("cudaMemcpy");
// check result from device
if (h_a[0] - sum != 0) {
fprintf(stderr, "Failed!!! %d %d\n", h_a[0], sum);
exit(-1);
}
// free device memory
cudaFree(d_a);
cudaFree(d_scratch);
// free host memory
free(h_a);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
printf("Success!\n");
return 0;
}
|
73c937b723e610d6a6ce50eac77d4a9044cb61d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "./svd.cuh"
#include <rocblas.h>
#include <cusolverDn.h>
#define STEP(i) printf("Step %d\n", i);
#define CCE(errValue) \
do { \
if (errValue != hipSuccess) { \
fprintf(stderr ,"[CUDA-ERROR]-[%s(line:%d)]: %s\n", __FILE__, __LINE__, hipGetErrorString(errValue)); \
exit(0); \
} \
} while(0);
// CATCH_CUDA_ERR(hipMalloc(&dev_array, sizeof(int) * used_n));
// CATCH_CUDA_ERR(hipMemcpy(dev_array, array, sizeof(int) * used_n, hipMemcpyHostToDevice));
void copyMatrixFromHostToDevice(Matrix* hostMatrix, Matrix** deviceMatrix, double** deviceMatrixArray, int** deviceDimsArray) {
const int n = hostMatrix->n(), m = hostMatrix->m();
Matrix* temp = new Matrix(n, m);
delete[] temp->matrix;
delete[] temp->real_shape;
const int matrix_size = sizeof(double) * n * m;
CCE(hipMalloc(&temp->matrix, matrix_size));
CCE(hipMemcpy(temp->matrix, hostMatrix->matrix, matrix_size, hipMemcpyHostToDevice));
const int dims_size = sizeof(int) * 2;
CCE(hipMalloc(&temp->real_shape, dims_size));
CCE(hipMemcpy(temp->real_shape, hostMatrix->real_shape, dims_size, hipMemcpyHostToDevice));
CCE(hipMalloc(deviceMatrix, sizeof(Matrix) * 1));
CCE(hipMemcpy(*deviceMatrix, temp, sizeof(Matrix) * 1, hipMemcpyHostToDevice));
*deviceMatrixArray = temp->matrix;
*deviceDimsArray = temp->real_shape;
temp->matrix = NULL;
temp->real_shape = NULL;
delete temp;
}
void copyMatrixFromDeviceToHost(double* deviceMatrixArray, Matrix** hostMatrix, int n, int m) {
*hostMatrix = new Matrix(n, m);
CCE(
hipMemcpy(
(*hostMatrix)->matrix,
deviceMatrixArray,
sizeof(double) * n * m,
hipMemcpyDeviceToHost
)
);
}
/*
QRDecompostion is part of SVD and should be called from host
Matrix* a - pointer to matrix on host
@return pair of Q and R matrix on host
*/
pair<Matrix*, Matrix*> QRDecompositionNaive(Matrix *a) {
int n = a->n();
int m = a->m();
Matrix* Q = new Matrix(n, m);
Matrix* R = new Matrix(m, m);
for (int i = 0; i < m; i++) {
Matrix* ai = subMatrix(a, 0, n + 0, i + 0, i + 1);
for (int k = 0; k < i; k++) {
Matrix* qk = subMatrix(Q, 0, n + 0, k + 0, k + 1);
Matrix* qkt = transpose(qk);
Matrix* tempMultiply = multiply(qkt, ai);
double v = -1 * tempMultiply->get(0, 0);
Matrix* tmp = multiply(qk, v);
Matrix* temp_ai = sum(ai, tmp);
delete ai;
ai = temp_ai;
R->set(k, i, -1 * v);
delete qk;
delete qkt;
delete tmp;
delete tempMultiply;
}
R->set(i, i, vectorColLength(ai));
auto nai = vectorColNormalize(ai);
for (int k = 0; k < n; k++) {
double nk0 = nai->get(k, 0);
Q->set(k, i, nk0);
}
delete ai;
delete nai;
}
return make_pair(Q, R);
}
Matrix* multiply_wrapper(Matrix* a, Matrix* b) {
// Logic
// 1. copy matrixes to device
// 2. call multiply kernel
// 3. copy results to host
// 4. free allocated device memory
Matrix* ab = new Matrix(a->n(), b->m());
// part 1 start
Matrix *a_dev, *b_dev, *ab_dev;
double *a_arr, *b_arr, *ab_arr;
int *a_dim_arr, *b_dim_arr, *ab_dim_arr;
copyMatrixFromHostToDevice(a, &a_dev, &a_arr, &a_dim_arr);
copyMatrixFromHostToDevice(b, &b_dev, &b_arr, &b_dim_arr);
copyMatrixFromHostToDevice(ab, &ab_dev, &ab_arr, &ab_dim_arr);
delete ab;
// part 1 end
// part 2 start
// hipError_t cudaStat; // hipMalloc status
// hipblasHandle_t handle;
// hipblasStatus_t stat = hipblasCreate(&handle); // CUBLAS functions status
// double alpha = 1.0, beta = 0.0;
// hipblasDgemm(handle,
// HIPBLAS_OP_N, HIPBLAS_OP_N,
// a->n, b->m, b->n,
// &alpha,
// a_arr, a->n,
// b_arr, b->n,
// &beta,
// ab_arr, a->n
// );
// CCE(hipDeviceSynchronize());
// CCE(hipGetLastError())
hipLaunchKernelGGL(( multiply), dim3(128), dim3(32), 0, 0, a_dev, b_dev, ab_dev);
CCE(hipGetLastError())
// part 2 end
// part 3 start
copyMatrixFromDeviceToHost(ab_arr, &ab, a->n(), b->m());
// part 3 end
// part 4 start
CCE(hipFree(a_arr));
CCE(hipFree(a_dev));
CCE(hipFree(b_arr));
CCE(hipFree(b_dev));
CCE(hipFree(ab_arr));
CCE(hipFree(ab_dev));
CCE(hipFree(a_dim_arr));
CCE(hipFree(b_dim_arr));
CCE(hipFree(ab_dim_arr));
// part 4 end
return ab;
}
Triple* SVDDecomposition(Matrix *a, int rank, double eps) {
int n = a->n();
int m = a->m();
auto u = randomMatrix(n, rank), sgm = randomMatrix(rank, rank), v = randomMatrix(m, rank);
auto at = transpose(a);
double err = 1e9;
for (; err > eps;) {
Matrix* av = multiply_wrapper(a, v);
// auto av = multiply(a, v);
// show(av, a->n, rank);
// show(av_test, a->n, rank);
// exit(0);
pair<Matrix*, Matrix*> qr_av = QRDecompositionNaive(av);
Matrix* u_tmp = subMatrix(qr_av.first, 0, n, 0, rank);
delete u;
u = u_tmp;
Matrix* atu = multiply_wrapper(at, u);
// auto atu = multiply(at, u);
pair<Matrix*, Matrix*> qr_atu = QRDecompositionNaive(atu);
Matrix* v_tmp = subMatrix(qr_atu.first, 0, m, 0, rank);
delete v;
v = v_tmp;
Matrix* sgm_tmp = subMatrix(qr_atu.second, 0, rank, 0, rank);
delete sgm;
sgm = sgm_tmp;
// find error e = || A*V - U*SGM||
// av = multiply(a, v);
auto usgm = multiply_wrapper(u, sgm);
// auto usgm = multiply(u, sgm);
double revert = -1;
Matrix* usgmt = multiply(usgm, revert);
Matrix* difff = sum(av, usgmt);
err = matrixNorm(difff);
// double av_diff = diff(av, av_test);
printf("Iteration ended, error=%f, diff=%f\n", err, 0.f);
delete av;
// delete av_test;
delete qr_av.first;
delete qr_av.second;
delete atu;
delete qr_atu.first;
delete qr_atu.second;
delete usgm;
delete usgmt;
delete difff;
}
delete at;
return new Triple(u, sgm, v);
}
// Only for rectangal matrix
Triple* SVDDecompositionwCUB(Matrix *t) {
if (t->shape_length!= 2) {
printf("Cannot perform SVD for non rectangular matrix!");
exit(-1);
}
hipsolverDnHandle_t cusolverH; // cusolver handle
cusolverStatus_t cusolvstatus = hipsolverDnCreate(&cusolverH);
printf("CUSOLVE %d\n", cusolvstatus);
Matrix* a;
if (t->n() < t->m()) {
a = transpose(t);
} else {
a = t->copy();
}
int rows = a->n(), cols = a->m();
int rank = min(rows, cols);
double* a_arr;
CCE(hipMalloc(&a_arr, sizeof(double) * rows * cols));
CCE(hipMemcpy(a_arr, a->matrix, sizeof(double) * rows * cols, hipMemcpyHostToDevice));
// Matrix U SIGMA V on host
double *U_arr, *VT_arr;
CCE(hipMalloc(&U_arr, sizeof(double) * rows * rank));
CCE(hipMalloc(&VT_arr, sizeof(double) * rank * cols));
// array for singular values
double* s_arr;
CCE(hipMalloc(&s_arr, sizeof(double) * rank));
CCE(hipGetLastError());
int lda = rows;
int ldu = rows;
int ldvt = cols;
int lwork = 0;
auto status = hipsolverDnDgesvd_bufferSize(cusolverH, rows, cols, &lwork);
printf("Buff size status %d, lwork: %d\n", status, lwork);
CCE(hipGetLastError());
double* work;
CCE(hipMalloc(&work, sizeof(double) * lwork));
double* d_rwork;
CCE(hipMalloc(&d_rwork, sizeof(double) * (rank - 1)));
int* info;
CCE(hipMalloc(&info, sizeof(int)));
cusolverStatus_t cusolver_status;
cusolver_status = hipsolverDnDgesvd(
cusolverH,
'S', 'S',
rows, cols,
a_arr, lda,
s_arr,
U_arr, ldu,
VT_arr, ldvt,
work, lwork,
NULL, info
);
CCE(hipGetLastError());
printf("Debug: rows=%d cols=%d, lda=%d ldu=%d ldvt=%d, lwork=%d, \n", rows, cols, lda, ldu, ldvt, lwork);
printf("Checks lda!<max(1,rows): %d\n", !(lda < max(1, rows)));
printf("Checks ldu!<max(1,rows): %d\n", !(ldu < max(1, rows)));
printf("Checks ldv!<max(1,cols): %d\n", !(ldvt < max(1, cols)));
printf("cuBLAS SVD result status: %d\n", cusolver_status);
// printf("%d %d %d %d\n", CUSOLVER_STATUS_SUCCESS, CUSOLVER_STATUS_NOT_INITIALIZED, CUSOLVER_STATUS_INVALID_VALUE, CUSOLVER_STATUS_ARCH_MISMATCH, CUSOLVER_STATUS_INTERNAL_ERROR);
double* s_cpu = new double[rank];
CCE(hipMemcpy(s_cpu, s_arr, sizeof(double) * rank, hipMemcpyDeviceToHost));
Matrix* S = new Matrix(rank, rank);
for (int i = 0; i < rank; i++) {
S->set(i, i, s_cpu[i]);
}
delete[] s_cpu;
Matrix* U = new Matrix(rows, rank);
CCE(hipMemcpy(U->matrix, U_arr, sizeof(double) * rows * rank, hipMemcpyDeviceToHost));
Matrix* VT = new Matrix(rank, cols);
CCE(hipMemcpy(VT->matrix, VT_arr, sizeof(double) * rank * cols, hipMemcpyDeviceToHost));
if (t->n() < t->m()) {
auto real_U = transpose(VT);
auto real_VT = transpose(U);
return new Triple(real_U, S, real_VT);
} else {
return new Triple(U, S, VT);
}
}
vector<Matrix*> TTDecomposition(Matrix* a, double eps) {
vector<Matrix *> res;
double norm = frobeniousNorm(a);
double threshold = norm * eps / sqrt(a->shape_length - 1);
int n_left = a->real_shape[0];
int n_right = 1;
for (int i = 1; i < a->shape_length; i++) {
n_right *= a->real_shape[i];
}
int shape[] = { n_left, n_right };
Matrix *M = a->copy();
M->reshape(shape, 2);
// U S VT
auto svd_m_full = SVDDecompositionwCUB(M);
auto svd_m = trunkSVDResultsForTT(svd_m_full, eps);
res.push_back(svd_m->first);
int r = svd_m->second->real_shape[0];
delete M;
M = multiply(svd_m->second, svd_m->third);
delete svd_m_full->first;
delete svd_m_full->second;
delete svd_m_full->third;
delete svd_m_full;
delete svd_m->second;
delete svd_m->third;
delete svd_m;
for (int i = 1; i < a->shape_length - 1; i++) {
n_left = a->real_shape[i];
n_right = n_right / a->real_shape[i];
int next_shape[] = {r * n_left, n_right};
M->reshape(next_shape, 2);
auto svd_m_next_full = SVDDecompositionwCUB(M);
auto svd_m_next = trunkSVDResultsForTT(svd_m_next_full, eps);
int r_cur = svd_m_next->second->real_shape[0];
Matrix *GK = svd_m_next->first;
int gk_shape[] = {r, n_left, r_cur};
GK->reshape(gk_shape, 3);
res.push_back(GK);
r = r_cur;
delete M;
M = multiply(svd_m_next->second, svd_m_next->third);
delete svd_m_next_full->first;
delete svd_m_next_full->second;
delete svd_m_next_full->third;
delete svd_m_next_full;
delete svd_m_next->second;
delete svd_m_next->third;
delete svd_m_next;
}
res.push_back(M);
return res;
}
// U S VT
Triple* trunkSVDResultsForTT(Triple* svd, double eps) {
int rank = svd->second->n();
double sum = 0;
for (int i = rank - 1; i >= 1; i--) {
double val = svd->second->get(i, i);
if (sum + val > eps) {
break;
}
sum += val;
rank--;
}
Matrix* U = subMatrix(svd->first, 0, svd->first->n(), 0, rank);
Matrix* S = subMatrix(svd->second, 0, rank, 0, rank);
Matrix* VT = subMatrix(svd->third, 0, rank, 0, svd->third->m());
return new Triple(U, S, VT);
}
double getValueFromTrain(vector<Matrix *> m, vector<int> indexes) {
Matrix *first = subMatrix(m[0], indexes[0], indexes[0] + 1, 0, m[0]->m());
for (int i = 1; i < m.size() - 1; i++) {
Matrix *cur = m[i]->get2DshapeFrom3d(indexes[i]);
auto temp_first = multiply(first, cur);
delete first;
first = temp_first;
delete cur;
}
Matrix *last = subMatrix(m[m.size() - 1], 0, m[m.size() - 1]->n(), indexes[indexes.size() - 1],
indexes[indexes.size() - 1] + 1);
auto temp_first = multiply(first, last);
double res = temp_first->matrix[0];
delete first;
delete last;
delete temp_first;
return res;
} | 73c937b723e610d6a6ce50eac77d4a9044cb61d4.cu | #include "./svd.cuh"
#include <cublas_v2.h>
#include <cusolverDn.h>
#define STEP(i) printf("Step %d\n", i);
#define CCE(errValue) \
do { \
if (errValue != cudaSuccess) { \
fprintf(stderr ,"[CUDA-ERROR]-[%s(line:%d)]: %s\n", __FILE__, __LINE__, cudaGetErrorString(errValue)); \
exit(0); \
} \
} while(0);
// CATCH_CUDA_ERR(cudaMalloc(&dev_array, sizeof(int) * used_n));
// CATCH_CUDA_ERR(cudaMemcpy(dev_array, array, sizeof(int) * used_n, cudaMemcpyHostToDevice));
void copyMatrixFromHostToDevice(Matrix* hostMatrix, Matrix** deviceMatrix, double** deviceMatrixArray, int** deviceDimsArray) {
const int n = hostMatrix->n(), m = hostMatrix->m();
Matrix* temp = new Matrix(n, m);
delete[] temp->matrix;
delete[] temp->real_shape;
const int matrix_size = sizeof(double) * n * m;
CCE(cudaMalloc(&temp->matrix, matrix_size));
CCE(cudaMemcpy(temp->matrix, hostMatrix->matrix, matrix_size, cudaMemcpyHostToDevice));
const int dims_size = sizeof(int) * 2;
CCE(cudaMalloc(&temp->real_shape, dims_size));
CCE(cudaMemcpy(temp->real_shape, hostMatrix->real_shape, dims_size, cudaMemcpyHostToDevice));
CCE(cudaMalloc(deviceMatrix, sizeof(Matrix) * 1));
CCE(cudaMemcpy(*deviceMatrix, temp, sizeof(Matrix) * 1, cudaMemcpyHostToDevice));
*deviceMatrixArray = temp->matrix;
*deviceDimsArray = temp->real_shape;
temp->matrix = NULL;
temp->real_shape = NULL;
delete temp;
}
void copyMatrixFromDeviceToHost(double* deviceMatrixArray, Matrix** hostMatrix, int n, int m) {
*hostMatrix = new Matrix(n, m);
CCE(
cudaMemcpy(
(*hostMatrix)->matrix,
deviceMatrixArray,
sizeof(double) * n * m,
cudaMemcpyDeviceToHost
)
);
}
/*
QRDecompostion is part of SVD and should be called from host
Matrix* a - pointer to matrix on host
@return pair of Q and R matrix on host
*/
pair<Matrix*, Matrix*> QRDecompositionNaive(Matrix *a) {
int n = a->n();
int m = a->m();
Matrix* Q = new Matrix(n, m);
Matrix* R = new Matrix(m, m);
for (int i = 0; i < m; i++) {
Matrix* ai = subMatrix(a, 0, n + 0, i + 0, i + 1);
for (int k = 0; k < i; k++) {
Matrix* qk = subMatrix(Q, 0, n + 0, k + 0, k + 1);
Matrix* qkt = transpose(qk);
Matrix* tempMultiply = multiply(qkt, ai);
double v = -1 * tempMultiply->get(0, 0);
Matrix* tmp = multiply(qk, v);
Matrix* temp_ai = sum(ai, tmp);
delete ai;
ai = temp_ai;
R->set(k, i, -1 * v);
delete qk;
delete qkt;
delete tmp;
delete tempMultiply;
}
R->set(i, i, vectorColLength(ai));
auto nai = vectorColNormalize(ai);
for (int k = 0; k < n; k++) {
double nk0 = nai->get(k, 0);
Q->set(k, i, nk0);
}
delete ai;
delete nai;
}
return make_pair(Q, R);
}
Matrix* multiply_wrapper(Matrix* a, Matrix* b) {
// Logic
// 1. copy matrixes to device
// 2. call multiply kernel
// 3. copy results to host
// 4. free allocated device memory
Matrix* ab = new Matrix(a->n(), b->m());
// part 1 start
Matrix *a_dev, *b_dev, *ab_dev;
double *a_arr, *b_arr, *ab_arr;
int *a_dim_arr, *b_dim_arr, *ab_dim_arr;
copyMatrixFromHostToDevice(a, &a_dev, &a_arr, &a_dim_arr);
copyMatrixFromHostToDevice(b, &b_dev, &b_arr, &b_dim_arr);
copyMatrixFromHostToDevice(ab, &ab_dev, &ab_arr, &ab_dim_arr);
delete ab;
// part 1 end
// part 2 start
// cudaError_t cudaStat; // cudaMalloc status
// cublasHandle_t handle;
// cublasStatus_t stat = cublasCreate(&handle); // CUBLAS functions status
// double alpha = 1.0, beta = 0.0;
// cublasDgemm(handle,
// CUBLAS_OP_N, CUBLAS_OP_N,
// a->n, b->m, b->n,
// &alpha,
// a_arr, a->n,
// b_arr, b->n,
// &beta,
// ab_arr, a->n
// );
// CCE(cudaDeviceSynchronize());
// CCE(cudaGetLastError())
multiply<<<128, 32>>>(a_dev, b_dev, ab_dev);
CCE(cudaGetLastError())
// part 2 end
// part 3 start
copyMatrixFromDeviceToHost(ab_arr, &ab, a->n(), b->m());
// part 3 end
// part 4 start
CCE(cudaFree(a_arr));
CCE(cudaFree(a_dev));
CCE(cudaFree(b_arr));
CCE(cudaFree(b_dev));
CCE(cudaFree(ab_arr));
CCE(cudaFree(ab_dev));
CCE(cudaFree(a_dim_arr));
CCE(cudaFree(b_dim_arr));
CCE(cudaFree(ab_dim_arr));
// part 4 end
return ab;
}
Triple* SVDDecomposition(Matrix *a, int rank, double eps) {
int n = a->n();
int m = a->m();
auto u = randomMatrix(n, rank), sgm = randomMatrix(rank, rank), v = randomMatrix(m, rank);
auto at = transpose(a);
double err = 1e9;
for (; err > eps;) {
Matrix* av = multiply_wrapper(a, v);
// auto av = multiply(a, v);
// show(av, a->n, rank);
// show(av_test, a->n, rank);
// exit(0);
pair<Matrix*, Matrix*> qr_av = QRDecompositionNaive(av);
Matrix* u_tmp = subMatrix(qr_av.first, 0, n, 0, rank);
delete u;
u = u_tmp;
Matrix* atu = multiply_wrapper(at, u);
// auto atu = multiply(at, u);
pair<Matrix*, Matrix*> qr_atu = QRDecompositionNaive(atu);
Matrix* v_tmp = subMatrix(qr_atu.first, 0, m, 0, rank);
delete v;
v = v_tmp;
Matrix* sgm_tmp = subMatrix(qr_atu.second, 0, rank, 0, rank);
delete sgm;
sgm = sgm_tmp;
// find error e = || A*V - U*SGM||
// av = multiply(a, v);
auto usgm = multiply_wrapper(u, sgm);
// auto usgm = multiply(u, sgm);
double revert = -1;
Matrix* usgmt = multiply(usgm, revert);
Matrix* difff = sum(av, usgmt);
err = matrixNorm(difff);
// double av_diff = diff(av, av_test);
printf("Iteration ended, error=%f, diff=%f\n", err, 0.f);
delete av;
// delete av_test;
delete qr_av.first;
delete qr_av.second;
delete atu;
delete qr_atu.first;
delete qr_atu.second;
delete usgm;
delete usgmt;
delete difff;
}
delete at;
return new Triple(u, sgm, v);
}
// Only for rectangal matrix
Triple* SVDDecompositionwCUB(Matrix *t) {
if (t->shape_length!= 2) {
printf("Cannot perform SVD for non rectangular matrix!");
exit(-1);
}
cusolverDnHandle_t cusolverH; // cusolver handle
cusolverStatus_t cusolvstatus = cusolverDnCreate(&cusolverH);
printf("CUSOLVE %d\n", cusolvstatus);
Matrix* a;
if (t->n() < t->m()) {
a = transpose(t);
} else {
a = t->copy();
}
int rows = a->n(), cols = a->m();
int rank = min(rows, cols);
double* a_arr;
CCE(cudaMalloc(&a_arr, sizeof(double) * rows * cols));
CCE(cudaMemcpy(a_arr, a->matrix, sizeof(double) * rows * cols, cudaMemcpyHostToDevice));
// Matrix U SIGMA V on host
double *U_arr, *VT_arr;
CCE(cudaMalloc(&U_arr, sizeof(double) * rows * rank));
CCE(cudaMalloc(&VT_arr, sizeof(double) * rank * cols));
// array for singular values
double* s_arr;
CCE(cudaMalloc(&s_arr, sizeof(double) * rank));
CCE(cudaGetLastError());
int lda = rows;
int ldu = rows;
int ldvt = cols;
int lwork = 0;
auto status = cusolverDnDgesvd_bufferSize(cusolverH, rows, cols, &lwork);
printf("Buff size status %d, lwork: %d\n", status, lwork);
CCE(cudaGetLastError());
double* work;
CCE(cudaMalloc(&work, sizeof(double) * lwork));
double* d_rwork;
CCE(cudaMalloc(&d_rwork, sizeof(double) * (rank - 1)));
int* info;
CCE(cudaMalloc(&info, sizeof(int)));
cusolverStatus_t cusolver_status;
cusolver_status = cusolverDnDgesvd(
cusolverH,
'S', 'S',
rows, cols,
a_arr, lda,
s_arr,
U_arr, ldu,
VT_arr, ldvt,
work, lwork,
NULL, info
);
CCE(cudaGetLastError());
printf("Debug: rows=%d cols=%d, lda=%d ldu=%d ldvt=%d, lwork=%d, \n", rows, cols, lda, ldu, ldvt, lwork);
printf("Checks lda!<max(1,rows): %d\n", !(lda < max(1, rows)));
printf("Checks ldu!<max(1,rows): %d\n", !(ldu < max(1, rows)));
printf("Checks ldv!<max(1,cols): %d\n", !(ldvt < max(1, cols)));
printf("cuBLAS SVD result status: %d\n", cusolver_status);
// printf("%d %d %d %d\n", CUSOLVER_STATUS_SUCCESS, CUSOLVER_STATUS_NOT_INITIALIZED, CUSOLVER_STATUS_INVALID_VALUE, CUSOLVER_STATUS_ARCH_MISMATCH, CUSOLVER_STATUS_INTERNAL_ERROR);
double* s_cpu = new double[rank];
CCE(cudaMemcpy(s_cpu, s_arr, sizeof(double) * rank, cudaMemcpyDeviceToHost));
Matrix* S = new Matrix(rank, rank);
for (int i = 0; i < rank; i++) {
S->set(i, i, s_cpu[i]);
}
delete[] s_cpu;
Matrix* U = new Matrix(rows, rank);
CCE(cudaMemcpy(U->matrix, U_arr, sizeof(double) * rows * rank, cudaMemcpyDeviceToHost));
Matrix* VT = new Matrix(rank, cols);
CCE(cudaMemcpy(VT->matrix, VT_arr, sizeof(double) * rank * cols, cudaMemcpyDeviceToHost));
if (t->n() < t->m()) {
auto real_U = transpose(VT);
auto real_VT = transpose(U);
return new Triple(real_U, S, real_VT);
} else {
return new Triple(U, S, VT);
}
}
vector<Matrix*> TTDecomposition(Matrix* a, double eps) {
vector<Matrix *> res;
double norm = frobeniousNorm(a);
double threshold = norm * eps / sqrt(a->shape_length - 1);
int n_left = a->real_shape[0];
int n_right = 1;
for (int i = 1; i < a->shape_length; i++) {
n_right *= a->real_shape[i];
}
int shape[] = { n_left, n_right };
Matrix *M = a->copy();
M->reshape(shape, 2);
// U S VT
auto svd_m_full = SVDDecompositionwCUB(M);
auto svd_m = trunkSVDResultsForTT(svd_m_full, eps);
res.push_back(svd_m->first);
int r = svd_m->second->real_shape[0];
delete M;
M = multiply(svd_m->second, svd_m->third);
delete svd_m_full->first;
delete svd_m_full->second;
delete svd_m_full->third;
delete svd_m_full;
delete svd_m->second;
delete svd_m->third;
delete svd_m;
for (int i = 1; i < a->shape_length - 1; i++) {
n_left = a->real_shape[i];
n_right = n_right / a->real_shape[i];
int next_shape[] = {r * n_left, n_right};
M->reshape(next_shape, 2);
auto svd_m_next_full = SVDDecompositionwCUB(M);
auto svd_m_next = trunkSVDResultsForTT(svd_m_next_full, eps);
int r_cur = svd_m_next->second->real_shape[0];
Matrix *GK = svd_m_next->first;
int gk_shape[] = {r, n_left, r_cur};
GK->reshape(gk_shape, 3);
res.push_back(GK);
r = r_cur;
delete M;
M = multiply(svd_m_next->second, svd_m_next->third);
delete svd_m_next_full->first;
delete svd_m_next_full->second;
delete svd_m_next_full->third;
delete svd_m_next_full;
delete svd_m_next->second;
delete svd_m_next->third;
delete svd_m_next;
}
res.push_back(M);
return res;
}
// U S VT
Triple* trunkSVDResultsForTT(Triple* svd, double eps) {
int rank = svd->second->n();
double sum = 0;
for (int i = rank - 1; i >= 1; i--) {
double val = svd->second->get(i, i);
if (sum + val > eps) {
break;
}
sum += val;
rank--;
}
Matrix* U = subMatrix(svd->first, 0, svd->first->n(), 0, rank);
Matrix* S = subMatrix(svd->second, 0, rank, 0, rank);
Matrix* VT = subMatrix(svd->third, 0, rank, 0, svd->third->m());
return new Triple(U, S, VT);
}
double getValueFromTrain(vector<Matrix *> m, vector<int> indexes) {
Matrix *first = subMatrix(m[0], indexes[0], indexes[0] + 1, 0, m[0]->m());
for (int i = 1; i < m.size() - 1; i++) {
Matrix *cur = m[i]->get2DshapeFrom3d(indexes[i]);
auto temp_first = multiply(first, cur);
delete first;
first = temp_first;
delete cur;
}
Matrix *last = subMatrix(m[m.size() - 1], 0, m[m.size() - 1]->n(), indexes[indexes.size() - 1],
indexes[indexes.size() - 1] + 1);
auto temp_first = multiply(first, last);
double res = temp_first->matrix[0];
delete first;
delete last;
delete temp_first;
return res;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.